sched.c 261 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875687668776878687968806881688268836884688568866887688868896890689168926893689468956896689768986899690069016902690369046905690669076908690969106911691269136914691569166917691869196920692169226923692469256926692769286929693069316932693369346935693669376938693969406941694269436944694569466947694869496950695169526953695469556956695769586959696069616962696369646965696669676968696969706971697269736974697569766977697869796980698169826983698469856986698769886989699069916992699369946995699669976998699970007001700270037004700570067007700870097010701170127013701470157016701770187019702070217022702370247025702670277028702970307031703270337034703570367037703870397040704170427043704470457046704770487049705070517052705370547055705670577058705970607061706270637064706570667067706870697070707170727073707470757076707770787079708070817082708370847085708670877088708970907091709270937094709570967097709870997100710171027103710471057106710771087109711071117112711371147115711671177118711971207121712271237124712571267127712871297130713171327133713471357136713771387139714071417142714371447145714671477148714971507151715271537154715571567157715871597160716171627163716471657166716771687169717071717172717371747175717671777178717971807181718271837184718571867187718871897190719171927193719471957196719771987199720072017202720372047205720672077208720972107211721272137214721572167217721872197220722172227223722472257226722772287229723072317232723372347235723672377238723972407241724272437244724572467247724872497250725172527253725472557256725772587259726072617262726372647265726672677268726972707271727272737274727572767277727872797280728172827283728472857286728772887289729072917292729372947295729672977298729973007301730273037304730573067307730873097310731173127313731473157316731773187319732073217322732373247325732673277328732973307331733273337334733573367337733873397340734173427343734473457346734773487349735073517352735373547355735673577358735973607361736273637364736573667367736873697370737173727373737473757376737773787379738073817382738373847385738673877388738973907391739273937394739573967397739873997400740174027403740474057406740774087409741074117412741374147415741674177418741974207421742274237424742574267427742874297430743174327433743474357436743774387439744074417442744374447445744674477448744974507451745274537454745574567457745874597460746174627463746474657466746774687469747074717472747374747475747674777478747974807481748274837484748574867487748874897490749174927493749474957496749774987499750075017502750375047505750675077508750975107511751275137514751575167517751875197520752175227523752475257526752775287529753075317532753375347535753675377538753975407541754275437544754575467547754875497550755175527553755475557556755775587559756075617562756375647565756675677568756975707571757275737574757575767577757875797580758175827583758475857586758775887589759075917592759375947595759675977598759976007601760276037604760576067607760876097610761176127613761476157616761776187619762076217622762376247625762676277628762976307631763276337634763576367637763876397640764176427643764476457646764776487649765076517652765376547655765676577658765976607661766276637664766576667667766876697670767176727673767476757676767776787679768076817682768376847685768676877688768976907691769276937694769576967697769876997700770177027703770477057706770777087709771077117712771377147715771677177718771977207721772277237724772577267727772877297730773177327733773477357736773777387739774077417742774377447745774677477748774977507751775277537754775577567757775877597760776177627763776477657766776777687769777077717772777377747775777677777778777977807781778277837784778577867787778877897790779177927793779477957796779777987799780078017802780378047805780678077808780978107811781278137814781578167817781878197820782178227823782478257826782778287829783078317832783378347835783678377838783978407841784278437844784578467847784878497850785178527853785478557856785778587859786078617862786378647865786678677868786978707871787278737874787578767877787878797880788178827883788478857886788778887889789078917892789378947895789678977898789979007901790279037904790579067907790879097910791179127913791479157916791779187919792079217922792379247925792679277928792979307931793279337934793579367937793879397940794179427943794479457946794779487949795079517952795379547955795679577958795979607961796279637964796579667967796879697970797179727973797479757976797779787979798079817982798379847985798679877988798979907991799279937994799579967997799879998000800180028003800480058006800780088009801080118012801380148015801680178018801980208021802280238024802580268027802880298030803180328033803480358036803780388039804080418042804380448045804680478048804980508051805280538054805580568057805880598060806180628063806480658066806780688069807080718072807380748075807680778078807980808081808280838084808580868087808880898090809180928093809480958096809780988099810081018102810381048105810681078108810981108111811281138114811581168117811881198120812181228123812481258126812781288129813081318132813381348135813681378138813981408141814281438144814581468147814881498150815181528153815481558156815781588159816081618162816381648165816681678168816981708171817281738174817581768177817881798180818181828183818481858186818781888189819081918192819381948195819681978198819982008201820282038204820582068207820882098210821182128213821482158216821782188219822082218222822382248225822682278228822982308231823282338234823582368237823882398240824182428243824482458246824782488249825082518252825382548255825682578258825982608261826282638264826582668267826882698270827182728273827482758276827782788279828082818282828382848285828682878288828982908291829282938294829582968297829882998300830183028303830483058306830783088309831083118312831383148315831683178318831983208321832283238324832583268327832883298330833183328333833483358336833783388339834083418342834383448345834683478348834983508351835283538354835583568357835883598360836183628363836483658366836783688369837083718372837383748375837683778378837983808381838283838384838583868387838883898390839183928393839483958396839783988399840084018402840384048405840684078408840984108411841284138414841584168417841884198420842184228423842484258426842784288429843084318432843384348435843684378438843984408441844284438444844584468447844884498450845184528453845484558456845784588459846084618462846384648465846684678468846984708471847284738474847584768477847884798480848184828483848484858486848784888489849084918492849384948495849684978498849985008501850285038504850585068507850885098510851185128513851485158516851785188519852085218522852385248525852685278528852985308531853285338534853585368537853885398540854185428543854485458546854785488549855085518552855385548555855685578558855985608561856285638564856585668567856885698570857185728573857485758576857785788579858085818582858385848585858685878588858985908591859285938594859585968597859885998600860186028603860486058606860786088609861086118612861386148615861686178618861986208621862286238624862586268627862886298630863186328633863486358636863786388639864086418642864386448645864686478648864986508651865286538654865586568657865886598660866186628663866486658666866786688669867086718672867386748675867686778678867986808681868286838684868586868687868886898690869186928693869486958696869786988699870087018702870387048705870687078708870987108711871287138714871587168717871887198720872187228723872487258726872787288729873087318732873387348735873687378738873987408741874287438744874587468747874887498750875187528753875487558756875787588759876087618762876387648765876687678768876987708771877287738774877587768777877887798780878187828783878487858786878787888789879087918792879387948795879687978798879988008801880288038804880588068807880888098810881188128813881488158816881788188819882088218822882388248825882688278828882988308831883288338834883588368837883888398840884188428843884488458846884788488849885088518852885388548855885688578858885988608861886288638864886588668867886888698870887188728873887488758876887788788879888088818882888388848885888688878888888988908891889288938894889588968897889888998900890189028903890489058906890789088909891089118912891389148915891689178918891989208921892289238924892589268927892889298930893189328933893489358936893789388939894089418942894389448945894689478948894989508951895289538954895589568957895889598960896189628963896489658966896789688969897089718972897389748975897689778978897989808981898289838984898589868987898889898990899189928993899489958996899789988999900090019002900390049005900690079008900990109011901290139014901590169017901890199020902190229023902490259026902790289029903090319032903390349035903690379038903990409041904290439044904590469047904890499050905190529053905490559056905790589059906090619062906390649065906690679068906990709071907290739074907590769077907890799080908190829083908490859086908790889089909090919092909390949095909690979098909991009101910291039104910591069107910891099110911191129113911491159116911791189119912091219122912391249125912691279128912991309131913291339134913591369137913891399140914191429143914491459146914791489149915091519152915391549155915691579158915991609161916291639164916591669167916891699170917191729173917491759176917791789179918091819182918391849185918691879188918991909191919291939194919591969197919891999200920192029203920492059206920792089209921092119212921392149215921692179218921992209221922292239224922592269227922892299230923192329233923492359236923792389239924092419242924392449245924692479248924992509251925292539254925592569257925892599260926192629263926492659266926792689269927092719272927392749275927692779278927992809281928292839284928592869287928892899290929192929293929492959296929792989299930093019302930393049305930693079308930993109311931293139314931593169317931893199320932193229323932493259326932793289329933093319332933393349335933693379338933993409341934293439344934593469347934893499350935193529353935493559356935793589359936093619362936393649365936693679368936993709371937293739374937593769377937893799380938193829383938493859386938793889389939093919392939393949395939693979398939994009401940294039404940594069407940894099410941194129413941494159416941794189419942094219422942394249425942694279428942994309431943294339434943594369437943894399440944194429443944494459446944794489449945094519452945394549455945694579458945994609461946294639464946594669467946894699470947194729473947494759476947794789479948094819482948394849485948694879488948994909491949294939494949594969497949894999500950195029503950495059506950795089509951095119512951395149515951695179518951995209521952295239524952595269527952895299530953195329533953495359536953795389539954095419542954395449545954695479548954995509551955295539554955595569557955895599560956195629563956495659566956795689569957095719572957395749575957695779578957995809581958295839584958595869587958895899590959195929593959495959596959795989599960096019602960396049605960696079608960996109611961296139614961596169617961896199620962196229623962496259626962796289629963096319632963396349635963696379638963996409641964296439644964596469647964896499650965196529653965496559656965796589659966096619662966396649665966696679668966996709671967296739674967596769677967896799680968196829683968496859686968796889689969096919692969396949695969696979698969997009701970297039704970597069707970897099710971197129713971497159716971797189719972097219722972397249725972697279728972997309731973297339734973597369737973897399740974197429743974497459746974797489749975097519752975397549755975697579758975997609761976297639764976597669767976897699770977197729773977497759776977797789779978097819782978397849785978697879788978997909791979297939794979597969797979897999800980198029803980498059806980798089809981098119812981398149815981698179818981998209821982298239824982598269827982898299830983198329833983498359836983798389839984098419842984398449845984698479848984998509851985298539854985598569857985898599860986198629863986498659866986798689869987098719872987398749875987698779878987998809881988298839884988598869887988898899890989198929893989498959896989798989899990099019902990399049905990699079908990999109911991299139914991599169917991899199920992199229923992499259926992799289929993099319932993399349935993699379938993999409941994299439944994599469947994899499950995199529953995499559956995799589959996099619962996399649965996699679968996999709971997299739974997599769977997899799980998199829983998499859986998799889989999099919992999399949995999699979998999910000100011000210003100041000510006100071000810009100101001110012100131001410015100161001710018100191002010021100221002310024100251002610027100281002910030100311003210033100341003510036100371003810039100401004110042100431004410045100461004710048100491005010051100521005310054100551005610057100581005910060100611006210063100641006510066100671006810069100701007110072100731007410075100761007710078100791008010081100821008310084100851008610087100881008910090100911009210093100941009510096100971009810099101001010110102101031010410105101061010710108101091011010111101121011310114101151011610117101181011910120101211012210123101241012510126101271012810129101301013110132101331013410135101361013710138101391014010141101421014310144101451014610147101481014910150101511015210153101541015510156101571015810159101601016110162101631016410165101661016710168101691017010171101721017310174101751017610177101781017910180101811018210183101841018510186101871018810189101901019110192101931019410195101961019710198101991020010201102021020310204102051020610207102081020910210102111021210213102141021510216102171021810219102201022110222102231022410225102261022710228102291023010231102321023310234102351023610237102381023910240102411024210243102441024510246102471024810249102501025110252102531025410255102561025710258102591026010261102621026310264102651026610267102681026910270102711027210273102741027510276102771027810279102801028110282102831028410285102861028710288102891029010291102921029310294102951029610297102981029910300103011030210303103041030510306103071030810309103101031110312103131031410315103161031710318103191032010321103221032310324103251032610327103281032910330103311033210333103341033510336103371033810339103401034110342103431034410345103461034710348103491035010351103521035310354103551035610357103581035910360103611036210363103641036510366103671036810369103701037110372103731037410375103761037710378103791038010381103821038310384103851038610387103881038910390103911039210393103941039510396103971039810399104001040110402104031040410405104061040710408104091041010411104121041310414104151041610417104181041910420104211042210423104241042510426104271042810429104301043110432104331043410435104361043710438104391044010441104421044310444104451044610447104481044910450104511045210453104541045510456104571045810459104601046110462104631046410465104661046710468104691047010471104721047310474104751047610477104781047910480104811048210483104841048510486104871048810489104901049110492104931049410495104961049710498104991050010501105021050310504105051050610507105081050910510105111051210513105141051510516105171051810519105201052110522105231052410525105261052710528105291053010531105321053310534105351053610537105381053910540105411054210543105441054510546105471054810549105501055110552105531055410555105561055710558105591056010561105621056310564105651056610567105681056910570105711057210573105741057510576105771057810579105801058110582105831058410585105861058710588105891059010591105921059310594105951059610597105981059910600106011060210603106041060510606106071060810609106101061110612106131061410615106161061710618106191062010621106221062310624106251062610627106281062910630106311063210633106341063510636106371063810639106401064110642106431064410645106461064710648106491065010651106521065310654106551065610657106581065910660106611066210663106641066510666106671066810669106701067110672106731067410675106761067710678106791068010681106821068310684106851068610687
  1. /*
  2. * kernel/sched.c
  3. *
  4. * Kernel scheduler and related syscalls
  5. *
  6. * Copyright (C) 1991-2002 Linus Torvalds
  7. *
  8. * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and
  9. * make semaphores SMP safe
  10. * 1998-11-19 Implemented schedule_timeout() and related stuff
  11. * by Andrea Arcangeli
  12. * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar:
  13. * hybrid priority-list and round-robin design with
  14. * an array-switch method of distributing timeslices
  15. * and per-CPU runqueues. Cleanups and useful suggestions
  16. * by Davide Libenzi, preemptible kernel bits by Robert Love.
  17. * 2003-09-03 Interactivity tuning by Con Kolivas.
  18. * 2004-04-02 Scheduler domains code by Nick Piggin
  19. * 2007-04-15 Work begun on replacing all interactivity tuning with a
  20. * fair scheduling design by Con Kolivas.
  21. * 2007-05-05 Load balancing (smp-nice) and other improvements
  22. * by Peter Williams
  23. * 2007-05-06 Interactivity improvements to CFS by Mike Galbraith
  24. * 2007-07-01 Group scheduling enhancements by Srivatsa Vaddagiri
  25. * 2007-11-29 RT balancing improvements by Steven Rostedt, Gregory Haskins,
  26. * Thomas Gleixner, Mike Kravetz
  27. */
  28. #include <linux/mm.h>
  29. #include <linux/module.h>
  30. #include <linux/nmi.h>
  31. #include <linux/init.h>
  32. #include <linux/uaccess.h>
  33. #include <linux/highmem.h>
  34. #include <linux/smp_lock.h>
  35. #include <asm/mmu_context.h>
  36. #include <linux/interrupt.h>
  37. #include <linux/capability.h>
  38. #include <linux/completion.h>
  39. #include <linux/kernel_stat.h>
  40. #include <linux/debug_locks.h>
  41. #include <linux/perf_counter.h>
  42. #include <linux/security.h>
  43. #include <linux/notifier.h>
  44. #include <linux/profile.h>
  45. #include <linux/freezer.h>
  46. #include <linux/vmalloc.h>
  47. #include <linux/blkdev.h>
  48. #include <linux/delay.h>
  49. #include <linux/pid_namespace.h>
  50. #include <linux/smp.h>
  51. #include <linux/threads.h>
  52. #include <linux/timer.h>
  53. #include <linux/rcupdate.h>
  54. #include <linux/cpu.h>
  55. #include <linux/cpuset.h>
  56. #include <linux/percpu.h>
  57. #include <linux/kthread.h>
  58. #include <linux/proc_fs.h>
  59. #include <linux/seq_file.h>
  60. #include <linux/sysctl.h>
  61. #include <linux/syscalls.h>
  62. #include <linux/times.h>
  63. #include <linux/tsacct_kern.h>
  64. #include <linux/kprobes.h>
  65. #include <linux/delayacct.h>
  66. #include <linux/reciprocal_div.h>
  67. #include <linux/unistd.h>
  68. #include <linux/pagemap.h>
  69. #include <linux/hrtimer.h>
  70. #include <linux/tick.h>
  71. #include <linux/debugfs.h>
  72. #include <linux/ctype.h>
  73. #include <linux/ftrace.h>
  74. #include <asm/tlb.h>
  75. #include <asm/irq_regs.h>
  76. #include "sched_cpupri.h"
  77. #define CREATE_TRACE_POINTS
  78. #include <trace/events/sched.h>
  79. /*
  80. * Convert user-nice values [ -20 ... 0 ... 19 ]
  81. * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
  82. * and back.
  83. */
  84. #define NICE_TO_PRIO(nice) (MAX_RT_PRIO + (nice) + 20)
  85. #define PRIO_TO_NICE(prio) ((prio) - MAX_RT_PRIO - 20)
  86. #define TASK_NICE(p) PRIO_TO_NICE((p)->static_prio)
  87. /*
  88. * 'User priority' is the nice value converted to something we
  89. * can work with better when scaling various scheduler parameters,
  90. * it's a [ 0 ... 39 ] range.
  91. */
  92. #define USER_PRIO(p) ((p)-MAX_RT_PRIO)
  93. #define TASK_USER_PRIO(p) USER_PRIO((p)->static_prio)
  94. #define MAX_USER_PRIO (USER_PRIO(MAX_PRIO))
  95. /*
  96. * Helpers for converting nanosecond timing to jiffy resolution
  97. */
  98. #define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ))
  99. #define NICE_0_LOAD SCHED_LOAD_SCALE
  100. #define NICE_0_SHIFT SCHED_LOAD_SHIFT
  101. /*
  102. * These are the 'tuning knobs' of the scheduler:
  103. *
  104. * default timeslice is 100 msecs (used only for SCHED_RR tasks).
  105. * Timeslices get refilled after they expire.
  106. */
  107. #define DEF_TIMESLICE (100 * HZ / 1000)
  108. /*
  109. * single value that denotes runtime == period, ie unlimited time.
  110. */
  111. #define RUNTIME_INF ((u64)~0ULL)
  112. #ifdef CONFIG_SMP
  113. static void double_rq_lock(struct rq *rq1, struct rq *rq2);
  114. /*
  115. * Divide a load by a sched group cpu_power : (load / sg->__cpu_power)
  116. * Since cpu_power is a 'constant', we can use a reciprocal divide.
  117. */
  118. static inline u32 sg_div_cpu_power(const struct sched_group *sg, u32 load)
  119. {
  120. return reciprocal_divide(load, sg->reciprocal_cpu_power);
  121. }
  122. /*
  123. * Each time a sched group cpu_power is changed,
  124. * we must compute its reciprocal value
  125. */
  126. static inline void sg_inc_cpu_power(struct sched_group *sg, u32 val)
  127. {
  128. sg->__cpu_power += val;
  129. sg->reciprocal_cpu_power = reciprocal_value(sg->__cpu_power);
  130. }
  131. #endif
  132. static inline int rt_policy(int policy)
  133. {
  134. if (unlikely(policy == SCHED_FIFO || policy == SCHED_RR))
  135. return 1;
  136. return 0;
  137. }
  138. static inline int task_has_rt_policy(struct task_struct *p)
  139. {
  140. return rt_policy(p->policy);
  141. }
  142. /*
  143. * This is the priority-queue data structure of the RT scheduling class:
  144. */
  145. struct rt_prio_array {
  146. DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */
  147. struct list_head queue[MAX_RT_PRIO];
  148. };
  149. struct rt_bandwidth {
  150. /* nests inside the rq lock: */
  151. spinlock_t rt_runtime_lock;
  152. ktime_t rt_period;
  153. u64 rt_runtime;
  154. struct hrtimer rt_period_timer;
  155. };
  156. static struct rt_bandwidth def_rt_bandwidth;
  157. static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
  158. static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
  159. {
  160. struct rt_bandwidth *rt_b =
  161. container_of(timer, struct rt_bandwidth, rt_period_timer);
  162. ktime_t now;
  163. int overrun;
  164. int idle = 0;
  165. for (;;) {
  166. now = hrtimer_cb_get_time(timer);
  167. overrun = hrtimer_forward(timer, now, rt_b->rt_period);
  168. if (!overrun)
  169. break;
  170. idle = do_sched_rt_period_timer(rt_b, overrun);
  171. }
  172. return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
  173. }
  174. static
  175. void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
  176. {
  177. rt_b->rt_period = ns_to_ktime(period);
  178. rt_b->rt_runtime = runtime;
  179. spin_lock_init(&rt_b->rt_runtime_lock);
  180. hrtimer_init(&rt_b->rt_period_timer,
  181. CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  182. rt_b->rt_period_timer.function = sched_rt_period_timer;
  183. }
  184. static inline int rt_bandwidth_enabled(void)
  185. {
  186. return sysctl_sched_rt_runtime >= 0;
  187. }
  188. static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
  189. {
  190. ktime_t now;
  191. if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
  192. return;
  193. if (hrtimer_active(&rt_b->rt_period_timer))
  194. return;
  195. spin_lock(&rt_b->rt_runtime_lock);
  196. for (;;) {
  197. unsigned long delta;
  198. ktime_t soft, hard;
  199. if (hrtimer_active(&rt_b->rt_period_timer))
  200. break;
  201. now = hrtimer_cb_get_time(&rt_b->rt_period_timer);
  202. hrtimer_forward(&rt_b->rt_period_timer, now, rt_b->rt_period);
  203. soft = hrtimer_get_softexpires(&rt_b->rt_period_timer);
  204. hard = hrtimer_get_expires(&rt_b->rt_period_timer);
  205. delta = ktime_to_ns(ktime_sub(hard, soft));
  206. __hrtimer_start_range_ns(&rt_b->rt_period_timer, soft, delta,
  207. HRTIMER_MODE_ABS_PINNED, 0);
  208. }
  209. spin_unlock(&rt_b->rt_runtime_lock);
  210. }
  211. #ifdef CONFIG_RT_GROUP_SCHED
  212. static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
  213. {
  214. hrtimer_cancel(&rt_b->rt_period_timer);
  215. }
  216. #endif
  217. /*
  218. * sched_domains_mutex serializes calls to arch_init_sched_domains,
  219. * detach_destroy_domains and partition_sched_domains.
  220. */
  221. static DEFINE_MUTEX(sched_domains_mutex);
  222. #ifdef CONFIG_GROUP_SCHED
  223. #include <linux/cgroup.h>
  224. struct cfs_rq;
  225. static LIST_HEAD(task_groups);
  226. /* task group related information */
  227. struct task_group {
  228. #ifdef CONFIG_CGROUP_SCHED
  229. struct cgroup_subsys_state css;
  230. #endif
  231. #ifdef CONFIG_USER_SCHED
  232. uid_t uid;
  233. #endif
  234. #ifdef CONFIG_FAIR_GROUP_SCHED
  235. /* schedulable entities of this group on each cpu */
  236. struct sched_entity **se;
  237. /* runqueue "owned" by this group on each cpu */
  238. struct cfs_rq **cfs_rq;
  239. unsigned long shares;
  240. #endif
  241. #ifdef CONFIG_RT_GROUP_SCHED
  242. struct sched_rt_entity **rt_se;
  243. struct rt_rq **rt_rq;
  244. struct rt_bandwidth rt_bandwidth;
  245. #endif
  246. struct rcu_head rcu;
  247. struct list_head list;
  248. struct task_group *parent;
  249. struct list_head siblings;
  250. struct list_head children;
  251. };
  252. #ifdef CONFIG_USER_SCHED
  253. /* Helper function to pass uid information to create_sched_user() */
  254. void set_tg_uid(struct user_struct *user)
  255. {
  256. user->tg->uid = user->uid;
  257. }
  258. /*
  259. * Root task group.
  260. * Every UID task group (including init_task_group aka UID-0) will
  261. * be a child to this group.
  262. */
  263. struct task_group root_task_group;
  264. #ifdef CONFIG_FAIR_GROUP_SCHED
  265. /* Default task group's sched entity on each cpu */
  266. static DEFINE_PER_CPU(struct sched_entity, init_sched_entity);
  267. /* Default task group's cfs_rq on each cpu */
  268. static DEFINE_PER_CPU(struct cfs_rq, init_tg_cfs_rq) ____cacheline_aligned_in_smp;
  269. #endif /* CONFIG_FAIR_GROUP_SCHED */
  270. #ifdef CONFIG_RT_GROUP_SCHED
  271. static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity);
  272. static DEFINE_PER_CPU(struct rt_rq, init_rt_rq) ____cacheline_aligned_in_smp;
  273. #endif /* CONFIG_RT_GROUP_SCHED */
  274. #else /* !CONFIG_USER_SCHED */
  275. #define root_task_group init_task_group
  276. #endif /* CONFIG_USER_SCHED */
  277. /* task_group_lock serializes add/remove of task groups and also changes to
  278. * a task group's cpu shares.
  279. */
  280. static DEFINE_SPINLOCK(task_group_lock);
  281. #ifdef CONFIG_SMP
  282. static int root_task_group_empty(void)
  283. {
  284. return list_empty(&root_task_group.children);
  285. }
  286. #endif
  287. #ifdef CONFIG_FAIR_GROUP_SCHED
  288. #ifdef CONFIG_USER_SCHED
  289. # define INIT_TASK_GROUP_LOAD (2*NICE_0_LOAD)
  290. #else /* !CONFIG_USER_SCHED */
  291. # define INIT_TASK_GROUP_LOAD NICE_0_LOAD
  292. #endif /* CONFIG_USER_SCHED */
  293. /*
  294. * A weight of 0 or 1 can cause arithmetics problems.
  295. * A weight of a cfs_rq is the sum of weights of which entities
  296. * are queued on this cfs_rq, so a weight of a entity should not be
  297. * too large, so as the shares value of a task group.
  298. * (The default weight is 1024 - so there's no practical
  299. * limitation from this.)
  300. */
  301. #define MIN_SHARES 2
  302. #define MAX_SHARES (1UL << 18)
  303. static int init_task_group_load = INIT_TASK_GROUP_LOAD;
  304. #endif
  305. /* Default task group.
  306. * Every task in system belong to this group at bootup.
  307. */
  308. struct task_group init_task_group;
  309. /* return group to which a task belongs */
  310. static inline struct task_group *task_group(struct task_struct *p)
  311. {
  312. struct task_group *tg;
  313. #ifdef CONFIG_USER_SCHED
  314. rcu_read_lock();
  315. tg = __task_cred(p)->user->tg;
  316. rcu_read_unlock();
  317. #elif defined(CONFIG_CGROUP_SCHED)
  318. tg = container_of(task_subsys_state(p, cpu_cgroup_subsys_id),
  319. struct task_group, css);
  320. #else
  321. tg = &init_task_group;
  322. #endif
  323. return tg;
  324. }
  325. /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
  326. static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
  327. {
  328. #ifdef CONFIG_FAIR_GROUP_SCHED
  329. p->se.cfs_rq = task_group(p)->cfs_rq[cpu];
  330. p->se.parent = task_group(p)->se[cpu];
  331. #endif
  332. #ifdef CONFIG_RT_GROUP_SCHED
  333. p->rt.rt_rq = task_group(p)->rt_rq[cpu];
  334. p->rt.parent = task_group(p)->rt_se[cpu];
  335. #endif
  336. }
  337. #else
  338. #ifdef CONFIG_SMP
  339. static int root_task_group_empty(void)
  340. {
  341. return 1;
  342. }
  343. #endif
  344. static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
  345. static inline struct task_group *task_group(struct task_struct *p)
  346. {
  347. return NULL;
  348. }
  349. #endif /* CONFIG_GROUP_SCHED */
  350. /* CFS-related fields in a runqueue */
  351. struct cfs_rq {
  352. struct load_weight load;
  353. unsigned long nr_running;
  354. u64 exec_clock;
  355. u64 min_vruntime;
  356. struct rb_root tasks_timeline;
  357. struct rb_node *rb_leftmost;
  358. struct list_head tasks;
  359. struct list_head *balance_iterator;
  360. /*
  361. * 'curr' points to currently running entity on this cfs_rq.
  362. * It is set to NULL otherwise (i.e when none are currently running).
  363. */
  364. struct sched_entity *curr, *next, *last;
  365. unsigned int nr_spread_over;
  366. #ifdef CONFIG_FAIR_GROUP_SCHED
  367. struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */
  368. /*
  369. * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
  370. * a hierarchy). Non-leaf lrqs hold other higher schedulable entities
  371. * (like users, containers etc.)
  372. *
  373. * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This
  374. * list is used during load balance.
  375. */
  376. struct list_head leaf_cfs_rq_list;
  377. struct task_group *tg; /* group that "owns" this runqueue */
  378. #ifdef CONFIG_SMP
  379. /*
  380. * the part of load.weight contributed by tasks
  381. */
  382. unsigned long task_weight;
  383. /*
  384. * h_load = weight * f(tg)
  385. *
  386. * Where f(tg) is the recursive weight fraction assigned to
  387. * this group.
  388. */
  389. unsigned long h_load;
  390. /*
  391. * this cpu's part of tg->shares
  392. */
  393. unsigned long shares;
  394. /*
  395. * load.weight at the time we set shares
  396. */
  397. unsigned long rq_weight;
  398. #endif
  399. #endif
  400. };
  401. /* Real-Time classes' related field in a runqueue: */
  402. struct rt_rq {
  403. struct rt_prio_array active;
  404. unsigned long rt_nr_running;
  405. #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
  406. struct {
  407. int curr; /* highest queued rt task prio */
  408. #ifdef CONFIG_SMP
  409. int next; /* next highest */
  410. #endif
  411. } highest_prio;
  412. #endif
  413. #ifdef CONFIG_SMP
  414. unsigned long rt_nr_migratory;
  415. unsigned long rt_nr_total;
  416. int overloaded;
  417. struct plist_head pushable_tasks;
  418. #endif
  419. int rt_throttled;
  420. u64 rt_time;
  421. u64 rt_runtime;
  422. /* Nests inside the rq lock: */
  423. spinlock_t rt_runtime_lock;
  424. #ifdef CONFIG_RT_GROUP_SCHED
  425. unsigned long rt_nr_boosted;
  426. struct rq *rq;
  427. struct list_head leaf_rt_rq_list;
  428. struct task_group *tg;
  429. struct sched_rt_entity *rt_se;
  430. #endif
  431. };
  432. #ifdef CONFIG_SMP
  433. /*
  434. * We add the notion of a root-domain which will be used to define per-domain
  435. * variables. Each exclusive cpuset essentially defines an island domain by
  436. * fully partitioning the member cpus from any other cpuset. Whenever a new
  437. * exclusive cpuset is created, we also create and attach a new root-domain
  438. * object.
  439. *
  440. */
  441. struct root_domain {
  442. atomic_t refcount;
  443. cpumask_var_t span;
  444. cpumask_var_t online;
  445. /*
  446. * The "RT overload" flag: it gets set if a CPU has more than
  447. * one runnable RT task.
  448. */
  449. cpumask_var_t rto_mask;
  450. atomic_t rto_count;
  451. #ifdef CONFIG_SMP
  452. struct cpupri cpupri;
  453. #endif
  454. #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
  455. /*
  456. * Preferred wake up cpu nominated by sched_mc balance that will be
  457. * used when most cpus are idle in the system indicating overall very
  458. * low system utilisation. Triggered at POWERSAVINGS_BALANCE_WAKEUP(2)
  459. */
  460. unsigned int sched_mc_preferred_wakeup_cpu;
  461. #endif
  462. };
  463. /*
  464. * By default the system creates a single root-domain with all cpus as
  465. * members (mimicking the global state we have today).
  466. */
  467. static struct root_domain def_root_domain;
  468. #endif
  469. /*
  470. * This is the main, per-CPU runqueue data structure.
  471. *
  472. * Locking rule: those places that want to lock multiple runqueues
  473. * (such as the load balancing or the thread migration code), lock
  474. * acquire operations must be ordered by ascending &runqueue.
  475. */
  476. struct rq {
  477. /* runqueue lock: */
  478. spinlock_t lock;
  479. /*
  480. * nr_running and cpu_load should be in the same cacheline because
  481. * remote CPUs use both these fields when doing load calculation.
  482. */
  483. unsigned long nr_running;
  484. #define CPU_LOAD_IDX_MAX 5
  485. unsigned long cpu_load[CPU_LOAD_IDX_MAX];
  486. #ifdef CONFIG_NO_HZ
  487. unsigned long last_tick_seen;
  488. unsigned char in_nohz_recently;
  489. #endif
  490. /* capture load from *all* tasks on this cpu: */
  491. struct load_weight load;
  492. unsigned long nr_load_updates;
  493. u64 nr_switches;
  494. u64 nr_migrations_in;
  495. struct cfs_rq cfs;
  496. struct rt_rq rt;
  497. #ifdef CONFIG_FAIR_GROUP_SCHED
  498. /* list of leaf cfs_rq on this cpu: */
  499. struct list_head leaf_cfs_rq_list;
  500. #endif
  501. #ifdef CONFIG_RT_GROUP_SCHED
  502. struct list_head leaf_rt_rq_list;
  503. #endif
  504. /*
  505. * This is part of a global counter where only the total sum
  506. * over all CPUs matters. A task can increase this counter on
  507. * one CPU and if it got migrated afterwards it may decrease
  508. * it on another CPU. Always updated under the runqueue lock:
  509. */
  510. unsigned long nr_uninterruptible;
  511. struct task_struct *curr, *idle;
  512. unsigned long next_balance;
  513. struct mm_struct *prev_mm;
  514. u64 clock;
  515. atomic_t nr_iowait;
  516. #ifdef CONFIG_SMP
  517. struct root_domain *rd;
  518. struct sched_domain *sd;
  519. unsigned char idle_at_tick;
  520. /* For active balancing */
  521. int post_schedule;
  522. int active_balance;
  523. int push_cpu;
  524. /* cpu of this runqueue: */
  525. int cpu;
  526. int online;
  527. unsigned long avg_load_per_task;
  528. struct task_struct *migration_thread;
  529. struct list_head migration_queue;
  530. #endif
  531. /* calc_load related fields */
  532. unsigned long calc_load_update;
  533. long calc_load_active;
  534. #ifdef CONFIG_SCHED_HRTICK
  535. #ifdef CONFIG_SMP
  536. int hrtick_csd_pending;
  537. struct call_single_data hrtick_csd;
  538. #endif
  539. struct hrtimer hrtick_timer;
  540. #endif
  541. #ifdef CONFIG_SCHEDSTATS
  542. /* latency stats */
  543. struct sched_info rq_sched_info;
  544. unsigned long long rq_cpu_time;
  545. /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
  546. /* sys_sched_yield() stats */
  547. unsigned int yld_count;
  548. /* schedule() stats */
  549. unsigned int sched_switch;
  550. unsigned int sched_count;
  551. unsigned int sched_goidle;
  552. /* try_to_wake_up() stats */
  553. unsigned int ttwu_count;
  554. unsigned int ttwu_local;
  555. /* BKL stats */
  556. unsigned int bkl_count;
  557. #endif
  558. };
  559. static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
  560. static inline void check_preempt_curr(struct rq *rq, struct task_struct *p, int sync)
  561. {
  562. rq->curr->sched_class->check_preempt_curr(rq, p, sync);
  563. }
  564. static inline int cpu_of(struct rq *rq)
  565. {
  566. #ifdef CONFIG_SMP
  567. return rq->cpu;
  568. #else
  569. return 0;
  570. #endif
  571. }
  572. /*
  573. * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
  574. * See detach_destroy_domains: synchronize_sched for details.
  575. *
  576. * The domain tree of any CPU may only be accessed from within
  577. * preempt-disabled sections.
  578. */
  579. #define for_each_domain(cpu, __sd) \
  580. for (__sd = rcu_dereference(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent)
  581. #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
  582. #define this_rq() (&__get_cpu_var(runqueues))
  583. #define task_rq(p) cpu_rq(task_cpu(p))
  584. #define cpu_curr(cpu) (cpu_rq(cpu)->curr)
  585. #define raw_rq() (&__raw_get_cpu_var(runqueues))
  586. inline void update_rq_clock(struct rq *rq)
  587. {
  588. rq->clock = sched_clock_cpu(cpu_of(rq));
  589. }
  590. /*
  591. * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
  592. */
  593. #ifdef CONFIG_SCHED_DEBUG
  594. # define const_debug __read_mostly
  595. #else
  596. # define const_debug static const
  597. #endif
  598. /**
  599. * runqueue_is_locked
  600. *
  601. * Returns true if the current cpu runqueue is locked.
  602. * This interface allows printk to be called with the runqueue lock
  603. * held and know whether or not it is OK to wake up the klogd.
  604. */
  605. int runqueue_is_locked(void)
  606. {
  607. int cpu = get_cpu();
  608. struct rq *rq = cpu_rq(cpu);
  609. int ret;
  610. ret = spin_is_locked(&rq->lock);
  611. put_cpu();
  612. return ret;
  613. }
  614. /*
  615. * Debugging: various feature bits
  616. */
  617. #define SCHED_FEAT(name, enabled) \
  618. __SCHED_FEAT_##name ,
  619. enum {
  620. #include "sched_features.h"
  621. };
  622. #undef SCHED_FEAT
  623. #define SCHED_FEAT(name, enabled) \
  624. (1UL << __SCHED_FEAT_##name) * enabled |
  625. const_debug unsigned int sysctl_sched_features =
  626. #include "sched_features.h"
  627. 0;
  628. #undef SCHED_FEAT
  629. #ifdef CONFIG_SCHED_DEBUG
  630. #define SCHED_FEAT(name, enabled) \
  631. #name ,
  632. static __read_mostly char *sched_feat_names[] = {
  633. #include "sched_features.h"
  634. NULL
  635. };
  636. #undef SCHED_FEAT
  637. static int sched_feat_show(struct seq_file *m, void *v)
  638. {
  639. int i;
  640. for (i = 0; sched_feat_names[i]; i++) {
  641. if (!(sysctl_sched_features & (1UL << i)))
  642. seq_puts(m, "NO_");
  643. seq_printf(m, "%s ", sched_feat_names[i]);
  644. }
  645. seq_puts(m, "\n");
  646. return 0;
  647. }
  648. static ssize_t
  649. sched_feat_write(struct file *filp, const char __user *ubuf,
  650. size_t cnt, loff_t *ppos)
  651. {
  652. char buf[64];
  653. char *cmp = buf;
  654. int neg = 0;
  655. int i;
  656. if (cnt > 63)
  657. cnt = 63;
  658. if (copy_from_user(&buf, ubuf, cnt))
  659. return -EFAULT;
  660. buf[cnt] = 0;
  661. if (strncmp(buf, "NO_", 3) == 0) {
  662. neg = 1;
  663. cmp += 3;
  664. }
  665. for (i = 0; sched_feat_names[i]; i++) {
  666. int len = strlen(sched_feat_names[i]);
  667. if (strncmp(cmp, sched_feat_names[i], len) == 0) {
  668. if (neg)
  669. sysctl_sched_features &= ~(1UL << i);
  670. else
  671. sysctl_sched_features |= (1UL << i);
  672. break;
  673. }
  674. }
  675. if (!sched_feat_names[i])
  676. return -EINVAL;
  677. filp->f_pos += cnt;
  678. return cnt;
  679. }
  680. static int sched_feat_open(struct inode *inode, struct file *filp)
  681. {
  682. return single_open(filp, sched_feat_show, NULL);
  683. }
  684. static struct file_operations sched_feat_fops = {
  685. .open = sched_feat_open,
  686. .write = sched_feat_write,
  687. .read = seq_read,
  688. .llseek = seq_lseek,
  689. .release = single_release,
  690. };
  691. static __init int sched_init_debug(void)
  692. {
  693. debugfs_create_file("sched_features", 0644, NULL, NULL,
  694. &sched_feat_fops);
  695. return 0;
  696. }
  697. late_initcall(sched_init_debug);
  698. #endif
  699. #define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
  700. /*
  701. * Number of tasks to iterate in a single balance run.
  702. * Limited because this is done with IRQs disabled.
  703. */
  704. const_debug unsigned int sysctl_sched_nr_migrate = 32;
  705. /*
  706. * ratelimit for updating the group shares.
  707. * default: 0.25ms
  708. */
  709. unsigned int sysctl_sched_shares_ratelimit = 250000;
  710. /*
  711. * Inject some fuzzyness into changing the per-cpu group shares
  712. * this avoids remote rq-locks at the expense of fairness.
  713. * default: 4
  714. */
  715. unsigned int sysctl_sched_shares_thresh = 4;
  716. /*
  717. * period over which we measure -rt task cpu usage in us.
  718. * default: 1s
  719. */
  720. unsigned int sysctl_sched_rt_period = 1000000;
  721. static __read_mostly int scheduler_running;
  722. /*
  723. * part of the period that we allow rt tasks to run in us.
  724. * default: 0.95s
  725. */
  726. int sysctl_sched_rt_runtime = 950000;
  727. static inline u64 global_rt_period(void)
  728. {
  729. return (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
  730. }
  731. static inline u64 global_rt_runtime(void)
  732. {
  733. if (sysctl_sched_rt_runtime < 0)
  734. return RUNTIME_INF;
  735. return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
  736. }
  737. #ifndef prepare_arch_switch
  738. # define prepare_arch_switch(next) do { } while (0)
  739. #endif
  740. #ifndef finish_arch_switch
  741. # define finish_arch_switch(prev) do { } while (0)
  742. #endif
  743. static inline int task_current(struct rq *rq, struct task_struct *p)
  744. {
  745. return rq->curr == p;
  746. }
  747. #ifndef __ARCH_WANT_UNLOCKED_CTXSW
  748. static inline int task_running(struct rq *rq, struct task_struct *p)
  749. {
  750. return task_current(rq, p);
  751. }
  752. static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
  753. {
  754. }
  755. static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
  756. {
  757. #ifdef CONFIG_DEBUG_SPINLOCK
  758. /* this is a valid case when another task releases the spinlock */
  759. rq->lock.owner = current;
  760. #endif
  761. /*
  762. * If we are tracking spinlock dependencies then we have to
  763. * fix up the runqueue lock - which gets 'carried over' from
  764. * prev into current:
  765. */
  766. spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
  767. spin_unlock_irq(&rq->lock);
  768. }
  769. #else /* __ARCH_WANT_UNLOCKED_CTXSW */
  770. static inline int task_running(struct rq *rq, struct task_struct *p)
  771. {
  772. #ifdef CONFIG_SMP
  773. return p->oncpu;
  774. #else
  775. return task_current(rq, p);
  776. #endif
  777. }
  778. static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
  779. {
  780. #ifdef CONFIG_SMP
  781. /*
  782. * We can optimise this out completely for !SMP, because the
  783. * SMP rebalancing from interrupt is the only thing that cares
  784. * here.
  785. */
  786. next->oncpu = 1;
  787. #endif
  788. #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
  789. spin_unlock_irq(&rq->lock);
  790. #else
  791. spin_unlock(&rq->lock);
  792. #endif
  793. }
  794. static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
  795. {
  796. #ifdef CONFIG_SMP
  797. /*
  798. * After ->oncpu is cleared, the task can be moved to a different CPU.
  799. * We must ensure this doesn't happen until the switch is completely
  800. * finished.
  801. */
  802. smp_wmb();
  803. prev->oncpu = 0;
  804. #endif
  805. #ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW
  806. local_irq_enable();
  807. #endif
  808. }
  809. #endif /* __ARCH_WANT_UNLOCKED_CTXSW */
  810. /*
  811. * __task_rq_lock - lock the runqueue a given task resides on.
  812. * Must be called interrupts disabled.
  813. */
  814. static inline struct rq *__task_rq_lock(struct task_struct *p)
  815. __acquires(rq->lock)
  816. {
  817. for (;;) {
  818. struct rq *rq = task_rq(p);
  819. spin_lock(&rq->lock);
  820. if (likely(rq == task_rq(p)))
  821. return rq;
  822. spin_unlock(&rq->lock);
  823. }
  824. }
  825. /*
  826. * task_rq_lock - lock the runqueue a given task resides on and disable
  827. * interrupts. Note the ordering: we can safely lookup the task_rq without
  828. * explicitly disabling preemption.
  829. */
  830. static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
  831. __acquires(rq->lock)
  832. {
  833. struct rq *rq;
  834. for (;;) {
  835. local_irq_save(*flags);
  836. rq = task_rq(p);
  837. spin_lock(&rq->lock);
  838. if (likely(rq == task_rq(p)))
  839. return rq;
  840. spin_unlock_irqrestore(&rq->lock, *flags);
  841. }
  842. }
  843. void task_rq_unlock_wait(struct task_struct *p)
  844. {
  845. struct rq *rq = task_rq(p);
  846. smp_mb(); /* spin-unlock-wait is not a full memory barrier */
  847. spin_unlock_wait(&rq->lock);
  848. }
  849. static void __task_rq_unlock(struct rq *rq)
  850. __releases(rq->lock)
  851. {
  852. spin_unlock(&rq->lock);
  853. }
  854. static inline void task_rq_unlock(struct rq *rq, unsigned long *flags)
  855. __releases(rq->lock)
  856. {
  857. spin_unlock_irqrestore(&rq->lock, *flags);
  858. }
  859. /*
  860. * this_rq_lock - lock this runqueue and disable interrupts.
  861. */
  862. static struct rq *this_rq_lock(void)
  863. __acquires(rq->lock)
  864. {
  865. struct rq *rq;
  866. local_irq_disable();
  867. rq = this_rq();
  868. spin_lock(&rq->lock);
  869. return rq;
  870. }
  871. #ifdef CONFIG_SCHED_HRTICK
  872. /*
  873. * Use HR-timers to deliver accurate preemption points.
  874. *
  875. * Its all a bit involved since we cannot program an hrt while holding the
  876. * rq->lock. So what we do is store a state in in rq->hrtick_* and ask for a
  877. * reschedule event.
  878. *
  879. * When we get rescheduled we reprogram the hrtick_timer outside of the
  880. * rq->lock.
  881. */
  882. /*
  883. * Use hrtick when:
  884. * - enabled by features
  885. * - hrtimer is actually high res
  886. */
  887. static inline int hrtick_enabled(struct rq *rq)
  888. {
  889. if (!sched_feat(HRTICK))
  890. return 0;
  891. if (!cpu_active(cpu_of(rq)))
  892. return 0;
  893. return hrtimer_is_hres_active(&rq->hrtick_timer);
  894. }
  895. static void hrtick_clear(struct rq *rq)
  896. {
  897. if (hrtimer_active(&rq->hrtick_timer))
  898. hrtimer_cancel(&rq->hrtick_timer);
  899. }
  900. /*
  901. * High-resolution timer tick.
  902. * Runs from hardirq context with interrupts disabled.
  903. */
  904. static enum hrtimer_restart hrtick(struct hrtimer *timer)
  905. {
  906. struct rq *rq = container_of(timer, struct rq, hrtick_timer);
  907. WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
  908. spin_lock(&rq->lock);
  909. update_rq_clock(rq);
  910. rq->curr->sched_class->task_tick(rq, rq->curr, 1);
  911. spin_unlock(&rq->lock);
  912. return HRTIMER_NORESTART;
  913. }
  914. #ifdef CONFIG_SMP
  915. /*
  916. * called from hardirq (IPI) context
  917. */
  918. static void __hrtick_start(void *arg)
  919. {
  920. struct rq *rq = arg;
  921. spin_lock(&rq->lock);
  922. hrtimer_restart(&rq->hrtick_timer);
  923. rq->hrtick_csd_pending = 0;
  924. spin_unlock(&rq->lock);
  925. }
  926. /*
  927. * Called to set the hrtick timer state.
  928. *
  929. * called with rq->lock held and irqs disabled
  930. */
  931. static void hrtick_start(struct rq *rq, u64 delay)
  932. {
  933. struct hrtimer *timer = &rq->hrtick_timer;
  934. ktime_t time = ktime_add_ns(timer->base->get_time(), delay);
  935. hrtimer_set_expires(timer, time);
  936. if (rq == this_rq()) {
  937. hrtimer_restart(timer);
  938. } else if (!rq->hrtick_csd_pending) {
  939. __smp_call_function_single(cpu_of(rq), &rq->hrtick_csd, 0);
  940. rq->hrtick_csd_pending = 1;
  941. }
  942. }
  943. static int
  944. hotplug_hrtick(struct notifier_block *nfb, unsigned long action, void *hcpu)
  945. {
  946. int cpu = (int)(long)hcpu;
  947. switch (action) {
  948. case CPU_UP_CANCELED:
  949. case CPU_UP_CANCELED_FROZEN:
  950. case CPU_DOWN_PREPARE:
  951. case CPU_DOWN_PREPARE_FROZEN:
  952. case CPU_DEAD:
  953. case CPU_DEAD_FROZEN:
  954. hrtick_clear(cpu_rq(cpu));
  955. return NOTIFY_OK;
  956. }
  957. return NOTIFY_DONE;
  958. }
  959. static __init void init_hrtick(void)
  960. {
  961. hotcpu_notifier(hotplug_hrtick, 0);
  962. }
  963. #else
  964. /*
  965. * Called to set the hrtick timer state.
  966. *
  967. * called with rq->lock held and irqs disabled
  968. */
  969. static void hrtick_start(struct rq *rq, u64 delay)
  970. {
  971. __hrtimer_start_range_ns(&rq->hrtick_timer, ns_to_ktime(delay), 0,
  972. HRTIMER_MODE_REL_PINNED, 0);
  973. }
  974. static inline void init_hrtick(void)
  975. {
  976. }
  977. #endif /* CONFIG_SMP */
  978. static void init_rq_hrtick(struct rq *rq)
  979. {
  980. #ifdef CONFIG_SMP
  981. rq->hrtick_csd_pending = 0;
  982. rq->hrtick_csd.flags = 0;
  983. rq->hrtick_csd.func = __hrtick_start;
  984. rq->hrtick_csd.info = rq;
  985. #endif
  986. hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  987. rq->hrtick_timer.function = hrtick;
  988. }
  989. #else /* CONFIG_SCHED_HRTICK */
  990. static inline void hrtick_clear(struct rq *rq)
  991. {
  992. }
  993. static inline void init_rq_hrtick(struct rq *rq)
  994. {
  995. }
  996. static inline void init_hrtick(void)
  997. {
  998. }
  999. #endif /* CONFIG_SCHED_HRTICK */
  1000. /*
  1001. * resched_task - mark a task 'to be rescheduled now'.
  1002. *
  1003. * On UP this means the setting of the need_resched flag, on SMP it
  1004. * might also involve a cross-CPU call to trigger the scheduler on
  1005. * the target CPU.
  1006. */
  1007. #ifdef CONFIG_SMP
  1008. #ifndef tsk_is_polling
  1009. #define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
  1010. #endif
  1011. static void resched_task(struct task_struct *p)
  1012. {
  1013. int cpu;
  1014. assert_spin_locked(&task_rq(p)->lock);
  1015. if (test_tsk_need_resched(p))
  1016. return;
  1017. set_tsk_need_resched(p);
  1018. cpu = task_cpu(p);
  1019. if (cpu == smp_processor_id())
  1020. return;
  1021. /* NEED_RESCHED must be visible before we test polling */
  1022. smp_mb();
  1023. if (!tsk_is_polling(p))
  1024. smp_send_reschedule(cpu);
  1025. }
  1026. static void resched_cpu(int cpu)
  1027. {
  1028. struct rq *rq = cpu_rq(cpu);
  1029. unsigned long flags;
  1030. if (!spin_trylock_irqsave(&rq->lock, flags))
  1031. return;
  1032. resched_task(cpu_curr(cpu));
  1033. spin_unlock_irqrestore(&rq->lock, flags);
  1034. }
  1035. #ifdef CONFIG_NO_HZ
  1036. /*
  1037. * When add_timer_on() enqueues a timer into the timer wheel of an
  1038. * idle CPU then this timer might expire before the next timer event
  1039. * which is scheduled to wake up that CPU. In case of a completely
  1040. * idle system the next event might even be infinite time into the
  1041. * future. wake_up_idle_cpu() ensures that the CPU is woken up and
  1042. * leaves the inner idle loop so the newly added timer is taken into
  1043. * account when the CPU goes back to idle and evaluates the timer
  1044. * wheel for the next timer event.
  1045. */
  1046. void wake_up_idle_cpu(int cpu)
  1047. {
  1048. struct rq *rq = cpu_rq(cpu);
  1049. if (cpu == smp_processor_id())
  1050. return;
  1051. /*
  1052. * This is safe, as this function is called with the timer
  1053. * wheel base lock of (cpu) held. When the CPU is on the way
  1054. * to idle and has not yet set rq->curr to idle then it will
  1055. * be serialized on the timer wheel base lock and take the new
  1056. * timer into account automatically.
  1057. */
  1058. if (rq->curr != rq->idle)
  1059. return;
  1060. /*
  1061. * We can set TIF_RESCHED on the idle task of the other CPU
  1062. * lockless. The worst case is that the other CPU runs the
  1063. * idle task through an additional NOOP schedule()
  1064. */
  1065. set_tsk_need_resched(rq->idle);
  1066. /* NEED_RESCHED must be visible before we test polling */
  1067. smp_mb();
  1068. if (!tsk_is_polling(rq->idle))
  1069. smp_send_reschedule(cpu);
  1070. }
  1071. #endif /* CONFIG_NO_HZ */
  1072. #else /* !CONFIG_SMP */
  1073. static void resched_task(struct task_struct *p)
  1074. {
  1075. assert_spin_locked(&task_rq(p)->lock);
  1076. set_tsk_need_resched(p);
  1077. }
  1078. #endif /* CONFIG_SMP */
  1079. #if BITS_PER_LONG == 32
  1080. # define WMULT_CONST (~0UL)
  1081. #else
  1082. # define WMULT_CONST (1UL << 32)
  1083. #endif
  1084. #define WMULT_SHIFT 32
  1085. /*
  1086. * Shift right and round:
  1087. */
  1088. #define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y))
  1089. /*
  1090. * delta *= weight / lw
  1091. */
  1092. static unsigned long
  1093. calc_delta_mine(unsigned long delta_exec, unsigned long weight,
  1094. struct load_weight *lw)
  1095. {
  1096. u64 tmp;
  1097. if (!lw->inv_weight) {
  1098. if (BITS_PER_LONG > 32 && unlikely(lw->weight >= WMULT_CONST))
  1099. lw->inv_weight = 1;
  1100. else
  1101. lw->inv_weight = 1 + (WMULT_CONST-lw->weight/2)
  1102. / (lw->weight+1);
  1103. }
  1104. tmp = (u64)delta_exec * weight;
  1105. /*
  1106. * Check whether we'd overflow the 64-bit multiplication:
  1107. */
  1108. if (unlikely(tmp > WMULT_CONST))
  1109. tmp = SRR(SRR(tmp, WMULT_SHIFT/2) * lw->inv_weight,
  1110. WMULT_SHIFT/2);
  1111. else
  1112. tmp = SRR(tmp * lw->inv_weight, WMULT_SHIFT);
  1113. return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX);
  1114. }
  1115. static inline void update_load_add(struct load_weight *lw, unsigned long inc)
  1116. {
  1117. lw->weight += inc;
  1118. lw->inv_weight = 0;
  1119. }
  1120. static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
  1121. {
  1122. lw->weight -= dec;
  1123. lw->inv_weight = 0;
  1124. }
  1125. /*
  1126. * To aid in avoiding the subversion of "niceness" due to uneven distribution
  1127. * of tasks with abnormal "nice" values across CPUs the contribution that
  1128. * each task makes to its run queue's load is weighted according to its
  1129. * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a
  1130. * scaled version of the new time slice allocation that they receive on time
  1131. * slice expiry etc.
  1132. */
  1133. #define WEIGHT_IDLEPRIO 3
  1134. #define WMULT_IDLEPRIO 1431655765
  1135. /*
  1136. * Nice levels are multiplicative, with a gentle 10% change for every
  1137. * nice level changed. I.e. when a CPU-bound task goes from nice 0 to
  1138. * nice 1, it will get ~10% less CPU time than another CPU-bound task
  1139. * that remained on nice 0.
  1140. *
  1141. * The "10% effect" is relative and cumulative: from _any_ nice level,
  1142. * if you go up 1 level, it's -10% CPU usage, if you go down 1 level
  1143. * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25.
  1144. * If a task goes up by ~10% and another task goes down by ~10% then
  1145. * the relative distance between them is ~25%.)
  1146. */
  1147. static const int prio_to_weight[40] = {
  1148. /* -20 */ 88761, 71755, 56483, 46273, 36291,
  1149. /* -15 */ 29154, 23254, 18705, 14949, 11916,
  1150. /* -10 */ 9548, 7620, 6100, 4904, 3906,
  1151. /* -5 */ 3121, 2501, 1991, 1586, 1277,
  1152. /* 0 */ 1024, 820, 655, 526, 423,
  1153. /* 5 */ 335, 272, 215, 172, 137,
  1154. /* 10 */ 110, 87, 70, 56, 45,
  1155. /* 15 */ 36, 29, 23, 18, 15,
  1156. };
  1157. /*
  1158. * Inverse (2^32/x) values of the prio_to_weight[] array, precalculated.
  1159. *
  1160. * In cases where the weight does not change often, we can use the
  1161. * precalculated inverse to speed up arithmetics by turning divisions
  1162. * into multiplications:
  1163. */
  1164. static const u32 prio_to_wmult[40] = {
  1165. /* -20 */ 48388, 59856, 76040, 92818, 118348,
  1166. /* -15 */ 147320, 184698, 229616, 287308, 360437,
  1167. /* -10 */ 449829, 563644, 704093, 875809, 1099582,
  1168. /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326,
  1169. /* 0 */ 4194304, 5237765, 6557202, 8165337, 10153587,
  1170. /* 5 */ 12820798, 15790321, 19976592, 24970740, 31350126,
  1171. /* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717,
  1172. /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
  1173. };
  1174. static void activate_task(struct rq *rq, struct task_struct *p, int wakeup);
  1175. /*
  1176. * runqueue iterator, to support SMP load-balancing between different
  1177. * scheduling classes, without having to expose their internal data
  1178. * structures to the load-balancing proper:
  1179. */
  1180. struct rq_iterator {
  1181. void *arg;
  1182. struct task_struct *(*start)(void *);
  1183. struct task_struct *(*next)(void *);
  1184. };
  1185. #ifdef CONFIG_SMP
  1186. static unsigned long
  1187. balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
  1188. unsigned long max_load_move, struct sched_domain *sd,
  1189. enum cpu_idle_type idle, int *all_pinned,
  1190. int *this_best_prio, struct rq_iterator *iterator);
  1191. static int
  1192. iter_move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
  1193. struct sched_domain *sd, enum cpu_idle_type idle,
  1194. struct rq_iterator *iterator);
  1195. #endif
  1196. /* Time spent by the tasks of the cpu accounting group executing in ... */
  1197. enum cpuacct_stat_index {
  1198. CPUACCT_STAT_USER, /* ... user mode */
  1199. CPUACCT_STAT_SYSTEM, /* ... kernel mode */
  1200. CPUACCT_STAT_NSTATS,
  1201. };
  1202. #ifdef CONFIG_CGROUP_CPUACCT
  1203. static void cpuacct_charge(struct task_struct *tsk, u64 cputime);
  1204. static void cpuacct_update_stats(struct task_struct *tsk,
  1205. enum cpuacct_stat_index idx, cputime_t val);
  1206. #else
  1207. static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
  1208. static inline void cpuacct_update_stats(struct task_struct *tsk,
  1209. enum cpuacct_stat_index idx, cputime_t val) {}
  1210. #endif
  1211. static inline void inc_cpu_load(struct rq *rq, unsigned long load)
  1212. {
  1213. update_load_add(&rq->load, load);
  1214. }
  1215. static inline void dec_cpu_load(struct rq *rq, unsigned long load)
  1216. {
  1217. update_load_sub(&rq->load, load);
  1218. }
  1219. #if (defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)) || defined(CONFIG_RT_GROUP_SCHED)
  1220. typedef int (*tg_visitor)(struct task_group *, void *);
  1221. /*
  1222. * Iterate the full tree, calling @down when first entering a node and @up when
  1223. * leaving it for the final time.
  1224. */
  1225. static int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
  1226. {
  1227. struct task_group *parent, *child;
  1228. int ret;
  1229. rcu_read_lock();
  1230. parent = &root_task_group;
  1231. down:
  1232. ret = (*down)(parent, data);
  1233. if (ret)
  1234. goto out_unlock;
  1235. list_for_each_entry_rcu(child, &parent->children, siblings) {
  1236. parent = child;
  1237. goto down;
  1238. up:
  1239. continue;
  1240. }
  1241. ret = (*up)(parent, data);
  1242. if (ret)
  1243. goto out_unlock;
  1244. child = parent;
  1245. parent = parent->parent;
  1246. if (parent)
  1247. goto up;
  1248. out_unlock:
  1249. rcu_read_unlock();
  1250. return ret;
  1251. }
  1252. static int tg_nop(struct task_group *tg, void *data)
  1253. {
  1254. return 0;
  1255. }
  1256. #endif
  1257. #ifdef CONFIG_SMP
  1258. static unsigned long source_load(int cpu, int type);
  1259. static unsigned long target_load(int cpu, int type);
  1260. static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd);
  1261. static unsigned long cpu_avg_load_per_task(int cpu)
  1262. {
  1263. struct rq *rq = cpu_rq(cpu);
  1264. unsigned long nr_running = ACCESS_ONCE(rq->nr_running);
  1265. if (nr_running)
  1266. rq->avg_load_per_task = rq->load.weight / nr_running;
  1267. else
  1268. rq->avg_load_per_task = 0;
  1269. return rq->avg_load_per_task;
  1270. }
  1271. #ifdef CONFIG_FAIR_GROUP_SCHED
  1272. struct update_shares_data {
  1273. unsigned long rq_weight[NR_CPUS];
  1274. };
  1275. static DEFINE_PER_CPU(struct update_shares_data, update_shares_data);
  1276. static void __set_se_shares(struct sched_entity *se, unsigned long shares);
  1277. /*
  1278. * Calculate and set the cpu's group shares.
  1279. */
  1280. static void update_group_shares_cpu(struct task_group *tg, int cpu,
  1281. unsigned long sd_shares,
  1282. unsigned long sd_rq_weight,
  1283. struct update_shares_data *usd)
  1284. {
  1285. unsigned long shares, rq_weight;
  1286. int boost = 0;
  1287. rq_weight = usd->rq_weight[cpu];
  1288. if (!rq_weight) {
  1289. boost = 1;
  1290. rq_weight = NICE_0_LOAD;
  1291. }
  1292. /*
  1293. * \Sum_j shares_j * rq_weight_i
  1294. * shares_i = -----------------------------
  1295. * \Sum_j rq_weight_j
  1296. */
  1297. shares = (sd_shares * rq_weight) / sd_rq_weight;
  1298. shares = clamp_t(unsigned long, shares, MIN_SHARES, MAX_SHARES);
  1299. if (abs(shares - tg->se[cpu]->load.weight) >
  1300. sysctl_sched_shares_thresh) {
  1301. struct rq *rq = cpu_rq(cpu);
  1302. unsigned long flags;
  1303. spin_lock_irqsave(&rq->lock, flags);
  1304. tg->cfs_rq[cpu]->rq_weight = boost ? 0 : rq_weight;
  1305. tg->cfs_rq[cpu]->shares = boost ? 0 : shares;
  1306. __set_se_shares(tg->se[cpu], shares);
  1307. spin_unlock_irqrestore(&rq->lock, flags);
  1308. }
  1309. }
  1310. /*
  1311. * Re-compute the task group their per cpu shares over the given domain.
  1312. * This needs to be done in a bottom-up fashion because the rq weight of a
  1313. * parent group depends on the shares of its child groups.
  1314. */
  1315. static int tg_shares_up(struct task_group *tg, void *data)
  1316. {
  1317. unsigned long weight, rq_weight = 0, shares = 0;
  1318. struct update_shares_data *usd;
  1319. struct sched_domain *sd = data;
  1320. unsigned long flags;
  1321. int i;
  1322. if (!tg->se[0])
  1323. return 0;
  1324. local_irq_save(flags);
  1325. usd = &__get_cpu_var(update_shares_data);
  1326. for_each_cpu(i, sched_domain_span(sd)) {
  1327. weight = tg->cfs_rq[i]->load.weight;
  1328. usd->rq_weight[i] = weight;
  1329. /*
  1330. * If there are currently no tasks on the cpu pretend there
  1331. * is one of average load so that when a new task gets to
  1332. * run here it will not get delayed by group starvation.
  1333. */
  1334. if (!weight)
  1335. weight = NICE_0_LOAD;
  1336. rq_weight += weight;
  1337. shares += tg->cfs_rq[i]->shares;
  1338. }
  1339. if ((!shares && rq_weight) || shares > tg->shares)
  1340. shares = tg->shares;
  1341. if (!sd->parent || !(sd->parent->flags & SD_LOAD_BALANCE))
  1342. shares = tg->shares;
  1343. for_each_cpu(i, sched_domain_span(sd))
  1344. update_group_shares_cpu(tg, i, shares, rq_weight, usd);
  1345. local_irq_restore(flags);
  1346. return 0;
  1347. }
  1348. /*
  1349. * Compute the cpu's hierarchical load factor for each task group.
  1350. * This needs to be done in a top-down fashion because the load of a child
  1351. * group is a fraction of its parents load.
  1352. */
  1353. static int tg_load_down(struct task_group *tg, void *data)
  1354. {
  1355. unsigned long load;
  1356. long cpu = (long)data;
  1357. if (!tg->parent) {
  1358. load = cpu_rq(cpu)->load.weight;
  1359. } else {
  1360. load = tg->parent->cfs_rq[cpu]->h_load;
  1361. load *= tg->cfs_rq[cpu]->shares;
  1362. load /= tg->parent->cfs_rq[cpu]->load.weight + 1;
  1363. }
  1364. tg->cfs_rq[cpu]->h_load = load;
  1365. return 0;
  1366. }
  1367. static void update_shares(struct sched_domain *sd)
  1368. {
  1369. s64 elapsed;
  1370. u64 now;
  1371. if (root_task_group_empty())
  1372. return;
  1373. now = cpu_clock(raw_smp_processor_id());
  1374. elapsed = now - sd->last_update;
  1375. if (elapsed >= (s64)(u64)sysctl_sched_shares_ratelimit) {
  1376. sd->last_update = now;
  1377. walk_tg_tree(tg_nop, tg_shares_up, sd);
  1378. }
  1379. }
  1380. static void update_shares_locked(struct rq *rq, struct sched_domain *sd)
  1381. {
  1382. if (root_task_group_empty())
  1383. return;
  1384. spin_unlock(&rq->lock);
  1385. update_shares(sd);
  1386. spin_lock(&rq->lock);
  1387. }
  1388. static void update_h_load(long cpu)
  1389. {
  1390. if (root_task_group_empty())
  1391. return;
  1392. walk_tg_tree(tg_load_down, tg_nop, (void *)cpu);
  1393. }
  1394. #else
  1395. static inline void update_shares(struct sched_domain *sd)
  1396. {
  1397. }
  1398. static inline void update_shares_locked(struct rq *rq, struct sched_domain *sd)
  1399. {
  1400. }
  1401. #endif
  1402. #ifdef CONFIG_PREEMPT
  1403. /*
  1404. * fair double_lock_balance: Safely acquires both rq->locks in a fair
  1405. * way at the expense of forcing extra atomic operations in all
  1406. * invocations. This assures that the double_lock is acquired using the
  1407. * same underlying policy as the spinlock_t on this architecture, which
  1408. * reduces latency compared to the unfair variant below. However, it
  1409. * also adds more overhead and therefore may reduce throughput.
  1410. */
  1411. static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
  1412. __releases(this_rq->lock)
  1413. __acquires(busiest->lock)
  1414. __acquires(this_rq->lock)
  1415. {
  1416. spin_unlock(&this_rq->lock);
  1417. double_rq_lock(this_rq, busiest);
  1418. return 1;
  1419. }
  1420. #else
  1421. /*
  1422. * Unfair double_lock_balance: Optimizes throughput at the expense of
  1423. * latency by eliminating extra atomic operations when the locks are
  1424. * already in proper order on entry. This favors lower cpu-ids and will
  1425. * grant the double lock to lower cpus over higher ids under contention,
  1426. * regardless of entry order into the function.
  1427. */
  1428. static int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
  1429. __releases(this_rq->lock)
  1430. __acquires(busiest->lock)
  1431. __acquires(this_rq->lock)
  1432. {
  1433. int ret = 0;
  1434. if (unlikely(!spin_trylock(&busiest->lock))) {
  1435. if (busiest < this_rq) {
  1436. spin_unlock(&this_rq->lock);
  1437. spin_lock(&busiest->lock);
  1438. spin_lock_nested(&this_rq->lock, SINGLE_DEPTH_NESTING);
  1439. ret = 1;
  1440. } else
  1441. spin_lock_nested(&busiest->lock, SINGLE_DEPTH_NESTING);
  1442. }
  1443. return ret;
  1444. }
  1445. #endif /* CONFIG_PREEMPT */
  1446. /*
  1447. * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
  1448. */
  1449. static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
  1450. {
  1451. if (unlikely(!irqs_disabled())) {
  1452. /* printk() doesn't work good under rq->lock */
  1453. spin_unlock(&this_rq->lock);
  1454. BUG_ON(1);
  1455. }
  1456. return _double_lock_balance(this_rq, busiest);
  1457. }
  1458. static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
  1459. __releases(busiest->lock)
  1460. {
  1461. spin_unlock(&busiest->lock);
  1462. lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
  1463. }
  1464. #endif
  1465. #ifdef CONFIG_FAIR_GROUP_SCHED
  1466. static void cfs_rq_set_shares(struct cfs_rq *cfs_rq, unsigned long shares)
  1467. {
  1468. #ifdef CONFIG_SMP
  1469. cfs_rq->shares = shares;
  1470. #endif
  1471. }
  1472. #endif
  1473. static void calc_load_account_active(struct rq *this_rq);
  1474. #include "sched_stats.h"
  1475. #include "sched_idletask.c"
  1476. #include "sched_fair.c"
  1477. #include "sched_rt.c"
  1478. #ifdef CONFIG_SCHED_DEBUG
  1479. # include "sched_debug.c"
  1480. #endif
  1481. #define sched_class_highest (&rt_sched_class)
  1482. #define for_each_class(class) \
  1483. for (class = sched_class_highest; class; class = class->next)
  1484. static void inc_nr_running(struct rq *rq)
  1485. {
  1486. rq->nr_running++;
  1487. }
  1488. static void dec_nr_running(struct rq *rq)
  1489. {
  1490. rq->nr_running--;
  1491. }
  1492. static void set_load_weight(struct task_struct *p)
  1493. {
  1494. if (task_has_rt_policy(p)) {
  1495. p->se.load.weight = prio_to_weight[0] * 2;
  1496. p->se.load.inv_weight = prio_to_wmult[0] >> 1;
  1497. return;
  1498. }
  1499. /*
  1500. * SCHED_IDLE tasks get minimal weight:
  1501. */
  1502. if (p->policy == SCHED_IDLE) {
  1503. p->se.load.weight = WEIGHT_IDLEPRIO;
  1504. p->se.load.inv_weight = WMULT_IDLEPRIO;
  1505. return;
  1506. }
  1507. p->se.load.weight = prio_to_weight[p->static_prio - MAX_RT_PRIO];
  1508. p->se.load.inv_weight = prio_to_wmult[p->static_prio - MAX_RT_PRIO];
  1509. }
  1510. static void update_avg(u64 *avg, u64 sample)
  1511. {
  1512. s64 diff = sample - *avg;
  1513. *avg += diff >> 3;
  1514. }
  1515. static void enqueue_task(struct rq *rq, struct task_struct *p, int wakeup)
  1516. {
  1517. if (wakeup)
  1518. p->se.start_runtime = p->se.sum_exec_runtime;
  1519. sched_info_queued(p);
  1520. p->sched_class->enqueue_task(rq, p, wakeup);
  1521. p->se.on_rq = 1;
  1522. }
  1523. static void dequeue_task(struct rq *rq, struct task_struct *p, int sleep)
  1524. {
  1525. if (sleep) {
  1526. if (p->se.last_wakeup) {
  1527. update_avg(&p->se.avg_overlap,
  1528. p->se.sum_exec_runtime - p->se.last_wakeup);
  1529. p->se.last_wakeup = 0;
  1530. } else {
  1531. update_avg(&p->se.avg_wakeup,
  1532. sysctl_sched_wakeup_granularity);
  1533. }
  1534. }
  1535. sched_info_dequeued(p);
  1536. p->sched_class->dequeue_task(rq, p, sleep);
  1537. p->se.on_rq = 0;
  1538. }
  1539. /*
  1540. * __normal_prio - return the priority that is based on the static prio
  1541. */
  1542. static inline int __normal_prio(struct task_struct *p)
  1543. {
  1544. return p->static_prio;
  1545. }
  1546. /*
  1547. * Calculate the expected normal priority: i.e. priority
  1548. * without taking RT-inheritance into account. Might be
  1549. * boosted by interactivity modifiers. Changes upon fork,
  1550. * setprio syscalls, and whenever the interactivity
  1551. * estimator recalculates.
  1552. */
  1553. static inline int normal_prio(struct task_struct *p)
  1554. {
  1555. int prio;
  1556. if (task_has_rt_policy(p))
  1557. prio = MAX_RT_PRIO-1 - p->rt_priority;
  1558. else
  1559. prio = __normal_prio(p);
  1560. return prio;
  1561. }
  1562. /*
  1563. * Calculate the current priority, i.e. the priority
  1564. * taken into account by the scheduler. This value might
  1565. * be boosted by RT tasks, or might be boosted by
  1566. * interactivity modifiers. Will be RT if the task got
  1567. * RT-boosted. If not then it returns p->normal_prio.
  1568. */
  1569. static int effective_prio(struct task_struct *p)
  1570. {
  1571. p->normal_prio = normal_prio(p);
  1572. /*
  1573. * If we are RT tasks or we were boosted to RT priority,
  1574. * keep the priority unchanged. Otherwise, update priority
  1575. * to the normal priority:
  1576. */
  1577. if (!rt_prio(p->prio))
  1578. return p->normal_prio;
  1579. return p->prio;
  1580. }
  1581. /*
  1582. * activate_task - move a task to the runqueue.
  1583. */
  1584. static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
  1585. {
  1586. if (task_contributes_to_load(p))
  1587. rq->nr_uninterruptible--;
  1588. enqueue_task(rq, p, wakeup);
  1589. inc_nr_running(rq);
  1590. }
  1591. /*
  1592. * deactivate_task - remove a task from the runqueue.
  1593. */
  1594. static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep)
  1595. {
  1596. if (task_contributes_to_load(p))
  1597. rq->nr_uninterruptible++;
  1598. dequeue_task(rq, p, sleep);
  1599. dec_nr_running(rq);
  1600. }
  1601. /**
  1602. * task_curr - is this task currently executing on a CPU?
  1603. * @p: the task in question.
  1604. */
  1605. inline int task_curr(const struct task_struct *p)
  1606. {
  1607. return cpu_curr(task_cpu(p)) == p;
  1608. }
  1609. static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
  1610. {
  1611. set_task_rq(p, cpu);
  1612. #ifdef CONFIG_SMP
  1613. /*
  1614. * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
  1615. * successfuly executed on another CPU. We must ensure that updates of
  1616. * per-task data have been completed by this moment.
  1617. */
  1618. smp_wmb();
  1619. task_thread_info(p)->cpu = cpu;
  1620. #endif
  1621. }
  1622. static inline void check_class_changed(struct rq *rq, struct task_struct *p,
  1623. const struct sched_class *prev_class,
  1624. int oldprio, int running)
  1625. {
  1626. if (prev_class != p->sched_class) {
  1627. if (prev_class->switched_from)
  1628. prev_class->switched_from(rq, p, running);
  1629. p->sched_class->switched_to(rq, p, running);
  1630. } else
  1631. p->sched_class->prio_changed(rq, p, oldprio, running);
  1632. }
  1633. #ifdef CONFIG_SMP
  1634. /* Used instead of source_load when we know the type == 0 */
  1635. static unsigned long weighted_cpuload(const int cpu)
  1636. {
  1637. return cpu_rq(cpu)->load.weight;
  1638. }
  1639. /*
  1640. * Is this task likely cache-hot:
  1641. */
  1642. static int
  1643. task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
  1644. {
  1645. s64 delta;
  1646. /*
  1647. * Buddy candidates are cache hot:
  1648. */
  1649. if (sched_feat(CACHE_HOT_BUDDY) &&
  1650. (&p->se == cfs_rq_of(&p->se)->next ||
  1651. &p->se == cfs_rq_of(&p->se)->last))
  1652. return 1;
  1653. if (p->sched_class != &fair_sched_class)
  1654. return 0;
  1655. if (sysctl_sched_migration_cost == -1)
  1656. return 1;
  1657. if (sysctl_sched_migration_cost == 0)
  1658. return 0;
  1659. delta = now - p->se.exec_start;
  1660. return delta < (s64)sysctl_sched_migration_cost;
  1661. }
  1662. void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
  1663. {
  1664. int old_cpu = task_cpu(p);
  1665. struct rq *old_rq = cpu_rq(old_cpu), *new_rq = cpu_rq(new_cpu);
  1666. struct cfs_rq *old_cfsrq = task_cfs_rq(p),
  1667. *new_cfsrq = cpu_cfs_rq(old_cfsrq, new_cpu);
  1668. u64 clock_offset;
  1669. clock_offset = old_rq->clock - new_rq->clock;
  1670. trace_sched_migrate_task(p, new_cpu);
  1671. #ifdef CONFIG_SCHEDSTATS
  1672. if (p->se.wait_start)
  1673. p->se.wait_start -= clock_offset;
  1674. if (p->se.sleep_start)
  1675. p->se.sleep_start -= clock_offset;
  1676. if (p->se.block_start)
  1677. p->se.block_start -= clock_offset;
  1678. #endif
  1679. if (old_cpu != new_cpu) {
  1680. p->se.nr_migrations++;
  1681. new_rq->nr_migrations_in++;
  1682. #ifdef CONFIG_SCHEDSTATS
  1683. if (task_hot(p, old_rq->clock, NULL))
  1684. schedstat_inc(p, se.nr_forced2_migrations);
  1685. #endif
  1686. perf_swcounter_event(PERF_COUNT_SW_CPU_MIGRATIONS,
  1687. 1, 1, NULL, 0);
  1688. }
  1689. p->se.vruntime -= old_cfsrq->min_vruntime -
  1690. new_cfsrq->min_vruntime;
  1691. __set_task_cpu(p, new_cpu);
  1692. }
  1693. struct migration_req {
  1694. struct list_head list;
  1695. struct task_struct *task;
  1696. int dest_cpu;
  1697. struct completion done;
  1698. };
  1699. /*
  1700. * The task's runqueue lock must be held.
  1701. * Returns true if you have to wait for migration thread.
  1702. */
  1703. static int
  1704. migrate_task(struct task_struct *p, int dest_cpu, struct migration_req *req)
  1705. {
  1706. struct rq *rq = task_rq(p);
  1707. /*
  1708. * If the task is not on a runqueue (and not running), then
  1709. * it is sufficient to simply update the task's cpu field.
  1710. */
  1711. if (!p->se.on_rq && !task_running(rq, p)) {
  1712. set_task_cpu(p, dest_cpu);
  1713. return 0;
  1714. }
  1715. init_completion(&req->done);
  1716. req->task = p;
  1717. req->dest_cpu = dest_cpu;
  1718. list_add(&req->list, &rq->migration_queue);
  1719. return 1;
  1720. }
  1721. /*
  1722. * wait_task_context_switch - wait for a thread to complete at least one
  1723. * context switch.
  1724. *
  1725. * @p must not be current.
  1726. */
  1727. void wait_task_context_switch(struct task_struct *p)
  1728. {
  1729. unsigned long nvcsw, nivcsw, flags;
  1730. int running;
  1731. struct rq *rq;
  1732. nvcsw = p->nvcsw;
  1733. nivcsw = p->nivcsw;
  1734. for (;;) {
  1735. /*
  1736. * The runqueue is assigned before the actual context
  1737. * switch. We need to take the runqueue lock.
  1738. *
  1739. * We could check initially without the lock but it is
  1740. * very likely that we need to take the lock in every
  1741. * iteration.
  1742. */
  1743. rq = task_rq_lock(p, &flags);
  1744. running = task_running(rq, p);
  1745. task_rq_unlock(rq, &flags);
  1746. if (likely(!running))
  1747. break;
  1748. /*
  1749. * The switch count is incremented before the actual
  1750. * context switch. We thus wait for two switches to be
  1751. * sure at least one completed.
  1752. */
  1753. if ((p->nvcsw - nvcsw) > 1)
  1754. break;
  1755. if ((p->nivcsw - nivcsw) > 1)
  1756. break;
  1757. cpu_relax();
  1758. }
  1759. }
  1760. /*
  1761. * wait_task_inactive - wait for a thread to unschedule.
  1762. *
  1763. * If @match_state is nonzero, it's the @p->state value just checked and
  1764. * not expected to change. If it changes, i.e. @p might have woken up,
  1765. * then return zero. When we succeed in waiting for @p to be off its CPU,
  1766. * we return a positive number (its total switch count). If a second call
  1767. * a short while later returns the same number, the caller can be sure that
  1768. * @p has remained unscheduled the whole time.
  1769. *
  1770. * The caller must ensure that the task *will* unschedule sometime soon,
  1771. * else this function might spin for a *long* time. This function can't
  1772. * be called with interrupts off, or it may introduce deadlock with
  1773. * smp_call_function() if an IPI is sent by the same process we are
  1774. * waiting to become inactive.
  1775. */
  1776. unsigned long wait_task_inactive(struct task_struct *p, long match_state)
  1777. {
  1778. unsigned long flags;
  1779. int running, on_rq;
  1780. unsigned long ncsw;
  1781. struct rq *rq;
  1782. for (;;) {
  1783. /*
  1784. * We do the initial early heuristics without holding
  1785. * any task-queue locks at all. We'll only try to get
  1786. * the runqueue lock when things look like they will
  1787. * work out!
  1788. */
  1789. rq = task_rq(p);
  1790. /*
  1791. * If the task is actively running on another CPU
  1792. * still, just relax and busy-wait without holding
  1793. * any locks.
  1794. *
  1795. * NOTE! Since we don't hold any locks, it's not
  1796. * even sure that "rq" stays as the right runqueue!
  1797. * But we don't care, since "task_running()" will
  1798. * return false if the runqueue has changed and p
  1799. * is actually now running somewhere else!
  1800. */
  1801. while (task_running(rq, p)) {
  1802. if (match_state && unlikely(p->state != match_state))
  1803. return 0;
  1804. cpu_relax();
  1805. }
  1806. /*
  1807. * Ok, time to look more closely! We need the rq
  1808. * lock now, to be *sure*. If we're wrong, we'll
  1809. * just go back and repeat.
  1810. */
  1811. rq = task_rq_lock(p, &flags);
  1812. trace_sched_wait_task(rq, p);
  1813. running = task_running(rq, p);
  1814. on_rq = p->se.on_rq;
  1815. ncsw = 0;
  1816. if (!match_state || p->state == match_state)
  1817. ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
  1818. task_rq_unlock(rq, &flags);
  1819. /*
  1820. * If it changed from the expected state, bail out now.
  1821. */
  1822. if (unlikely(!ncsw))
  1823. break;
  1824. /*
  1825. * Was it really running after all now that we
  1826. * checked with the proper locks actually held?
  1827. *
  1828. * Oops. Go back and try again..
  1829. */
  1830. if (unlikely(running)) {
  1831. cpu_relax();
  1832. continue;
  1833. }
  1834. /*
  1835. * It's not enough that it's not actively running,
  1836. * it must be off the runqueue _entirely_, and not
  1837. * preempted!
  1838. *
  1839. * So if it was still runnable (but just not actively
  1840. * running right now), it's preempted, and we should
  1841. * yield - it could be a while.
  1842. */
  1843. if (unlikely(on_rq)) {
  1844. schedule_timeout_uninterruptible(1);
  1845. continue;
  1846. }
  1847. /*
  1848. * Ahh, all good. It wasn't running, and it wasn't
  1849. * runnable, which means that it will never become
  1850. * running in the future either. We're all done!
  1851. */
  1852. break;
  1853. }
  1854. return ncsw;
  1855. }
  1856. /***
  1857. * kick_process - kick a running thread to enter/exit the kernel
  1858. * @p: the to-be-kicked thread
  1859. *
  1860. * Cause a process which is running on another CPU to enter
  1861. * kernel-mode, without any delay. (to get signals handled.)
  1862. *
  1863. * NOTE: this function doesnt have to take the runqueue lock,
  1864. * because all it wants to ensure is that the remote task enters
  1865. * the kernel. If the IPI races and the task has been migrated
  1866. * to another CPU then no harm is done and the purpose has been
  1867. * achieved as well.
  1868. */
  1869. void kick_process(struct task_struct *p)
  1870. {
  1871. int cpu;
  1872. preempt_disable();
  1873. cpu = task_cpu(p);
  1874. if ((cpu != smp_processor_id()) && task_curr(p))
  1875. smp_send_reschedule(cpu);
  1876. preempt_enable();
  1877. }
  1878. EXPORT_SYMBOL_GPL(kick_process);
  1879. /*
  1880. * Return a low guess at the load of a migration-source cpu weighted
  1881. * according to the scheduling class and "nice" value.
  1882. *
  1883. * We want to under-estimate the load of migration sources, to
  1884. * balance conservatively.
  1885. */
  1886. static unsigned long source_load(int cpu, int type)
  1887. {
  1888. struct rq *rq = cpu_rq(cpu);
  1889. unsigned long total = weighted_cpuload(cpu);
  1890. if (type == 0 || !sched_feat(LB_BIAS))
  1891. return total;
  1892. return min(rq->cpu_load[type-1], total);
  1893. }
  1894. /*
  1895. * Return a high guess at the load of a migration-target cpu weighted
  1896. * according to the scheduling class and "nice" value.
  1897. */
  1898. static unsigned long target_load(int cpu, int type)
  1899. {
  1900. struct rq *rq = cpu_rq(cpu);
  1901. unsigned long total = weighted_cpuload(cpu);
  1902. if (type == 0 || !sched_feat(LB_BIAS))
  1903. return total;
  1904. return max(rq->cpu_load[type-1], total);
  1905. }
  1906. /*
  1907. * find_idlest_group finds and returns the least busy CPU group within the
  1908. * domain.
  1909. */
  1910. static struct sched_group *
  1911. find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
  1912. {
  1913. struct sched_group *idlest = NULL, *this = NULL, *group = sd->groups;
  1914. unsigned long min_load = ULONG_MAX, this_load = 0;
  1915. int load_idx = sd->forkexec_idx;
  1916. int imbalance = 100 + (sd->imbalance_pct-100)/2;
  1917. do {
  1918. unsigned long load, avg_load;
  1919. int local_group;
  1920. int i;
  1921. /* Skip over this group if it has no CPUs allowed */
  1922. if (!cpumask_intersects(sched_group_cpus(group),
  1923. &p->cpus_allowed))
  1924. continue;
  1925. local_group = cpumask_test_cpu(this_cpu,
  1926. sched_group_cpus(group));
  1927. /* Tally up the load of all CPUs in the group */
  1928. avg_load = 0;
  1929. for_each_cpu(i, sched_group_cpus(group)) {
  1930. /* Bias balancing toward cpus of our domain */
  1931. if (local_group)
  1932. load = source_load(i, load_idx);
  1933. else
  1934. load = target_load(i, load_idx);
  1935. avg_load += load;
  1936. }
  1937. /* Adjust by relative CPU power of the group */
  1938. avg_load = sg_div_cpu_power(group,
  1939. avg_load * SCHED_LOAD_SCALE);
  1940. if (local_group) {
  1941. this_load = avg_load;
  1942. this = group;
  1943. } else if (avg_load < min_load) {
  1944. min_load = avg_load;
  1945. idlest = group;
  1946. }
  1947. } while (group = group->next, group != sd->groups);
  1948. if (!idlest || 100*this_load < imbalance*min_load)
  1949. return NULL;
  1950. return idlest;
  1951. }
  1952. /*
  1953. * find_idlest_cpu - find the idlest cpu among the cpus in group.
  1954. */
  1955. static int
  1956. find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
  1957. {
  1958. unsigned long load, min_load = ULONG_MAX;
  1959. int idlest = -1;
  1960. int i;
  1961. /* Traverse only the allowed CPUs */
  1962. for_each_cpu_and(i, sched_group_cpus(group), &p->cpus_allowed) {
  1963. load = weighted_cpuload(i);
  1964. if (load < min_load || (load == min_load && i == this_cpu)) {
  1965. min_load = load;
  1966. idlest = i;
  1967. }
  1968. }
  1969. return idlest;
  1970. }
  1971. /*
  1972. * sched_balance_self: balance the current task (running on cpu) in domains
  1973. * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and
  1974. * SD_BALANCE_EXEC.
  1975. *
  1976. * Balance, ie. select the least loaded group.
  1977. *
  1978. * Returns the target CPU number, or the same CPU if no balancing is needed.
  1979. *
  1980. * preempt must be disabled.
  1981. */
  1982. static int sched_balance_self(int cpu, int flag)
  1983. {
  1984. struct task_struct *t = current;
  1985. struct sched_domain *tmp, *sd = NULL;
  1986. for_each_domain(cpu, tmp) {
  1987. /*
  1988. * If power savings logic is enabled for a domain, stop there.
  1989. */
  1990. if (tmp->flags & SD_POWERSAVINGS_BALANCE)
  1991. break;
  1992. if (tmp->flags & flag)
  1993. sd = tmp;
  1994. }
  1995. if (sd)
  1996. update_shares(sd);
  1997. while (sd) {
  1998. struct sched_group *group;
  1999. int new_cpu, weight;
  2000. if (!(sd->flags & flag)) {
  2001. sd = sd->child;
  2002. continue;
  2003. }
  2004. group = find_idlest_group(sd, t, cpu);
  2005. if (!group) {
  2006. sd = sd->child;
  2007. continue;
  2008. }
  2009. new_cpu = find_idlest_cpu(group, t, cpu);
  2010. if (new_cpu == -1 || new_cpu == cpu) {
  2011. /* Now try balancing at a lower domain level of cpu */
  2012. sd = sd->child;
  2013. continue;
  2014. }
  2015. /* Now try balancing at a lower domain level of new_cpu */
  2016. cpu = new_cpu;
  2017. weight = cpumask_weight(sched_domain_span(sd));
  2018. sd = NULL;
  2019. for_each_domain(cpu, tmp) {
  2020. if (weight <= cpumask_weight(sched_domain_span(tmp)))
  2021. break;
  2022. if (tmp->flags & flag)
  2023. sd = tmp;
  2024. }
  2025. /* while loop will break here if sd == NULL */
  2026. }
  2027. return cpu;
  2028. }
  2029. #endif /* CONFIG_SMP */
  2030. /**
  2031. * task_oncpu_function_call - call a function on the cpu on which a task runs
  2032. * @p: the task to evaluate
  2033. * @func: the function to be called
  2034. * @info: the function call argument
  2035. *
  2036. * Calls the function @func when the task is currently running. This might
  2037. * be on the current CPU, which just calls the function directly
  2038. */
  2039. void task_oncpu_function_call(struct task_struct *p,
  2040. void (*func) (void *info), void *info)
  2041. {
  2042. int cpu;
  2043. preempt_disable();
  2044. cpu = task_cpu(p);
  2045. if (task_curr(p))
  2046. smp_call_function_single(cpu, func, info, 1);
  2047. preempt_enable();
  2048. }
  2049. /***
  2050. * try_to_wake_up - wake up a thread
  2051. * @p: the to-be-woken-up thread
  2052. * @state: the mask of task states that can be woken
  2053. * @sync: do a synchronous wakeup?
  2054. *
  2055. * Put it on the run-queue if it's not already there. The "current"
  2056. * thread is always on the run-queue (except when the actual
  2057. * re-schedule is in progress), and as such you're allowed to do
  2058. * the simpler "current->state = TASK_RUNNING" to mark yourself
  2059. * runnable without the overhead of this.
  2060. *
  2061. * returns failure only if the task is already active.
  2062. */
  2063. static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
  2064. {
  2065. int cpu, orig_cpu, this_cpu, success = 0;
  2066. unsigned long flags;
  2067. long old_state;
  2068. struct rq *rq;
  2069. if (!sched_feat(SYNC_WAKEUPS))
  2070. sync = 0;
  2071. #ifdef CONFIG_SMP
  2072. if (sched_feat(LB_WAKEUP_UPDATE) && !root_task_group_empty()) {
  2073. struct sched_domain *sd;
  2074. this_cpu = raw_smp_processor_id();
  2075. cpu = task_cpu(p);
  2076. for_each_domain(this_cpu, sd) {
  2077. if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
  2078. update_shares(sd);
  2079. break;
  2080. }
  2081. }
  2082. }
  2083. #endif
  2084. smp_wmb();
  2085. rq = task_rq_lock(p, &flags);
  2086. update_rq_clock(rq);
  2087. old_state = p->state;
  2088. if (!(old_state & state))
  2089. goto out;
  2090. if (p->se.on_rq)
  2091. goto out_running;
  2092. cpu = task_cpu(p);
  2093. orig_cpu = cpu;
  2094. this_cpu = smp_processor_id();
  2095. #ifdef CONFIG_SMP
  2096. if (unlikely(task_running(rq, p)))
  2097. goto out_activate;
  2098. cpu = p->sched_class->select_task_rq(p, sync);
  2099. if (cpu != orig_cpu) {
  2100. set_task_cpu(p, cpu);
  2101. task_rq_unlock(rq, &flags);
  2102. /* might preempt at this point */
  2103. rq = task_rq_lock(p, &flags);
  2104. old_state = p->state;
  2105. if (!(old_state & state))
  2106. goto out;
  2107. if (p->se.on_rq)
  2108. goto out_running;
  2109. this_cpu = smp_processor_id();
  2110. cpu = task_cpu(p);
  2111. }
  2112. #ifdef CONFIG_SCHEDSTATS
  2113. schedstat_inc(rq, ttwu_count);
  2114. if (cpu == this_cpu)
  2115. schedstat_inc(rq, ttwu_local);
  2116. else {
  2117. struct sched_domain *sd;
  2118. for_each_domain(this_cpu, sd) {
  2119. if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
  2120. schedstat_inc(sd, ttwu_wake_remote);
  2121. break;
  2122. }
  2123. }
  2124. }
  2125. #endif /* CONFIG_SCHEDSTATS */
  2126. out_activate:
  2127. #endif /* CONFIG_SMP */
  2128. schedstat_inc(p, se.nr_wakeups);
  2129. if (sync)
  2130. schedstat_inc(p, se.nr_wakeups_sync);
  2131. if (orig_cpu != cpu)
  2132. schedstat_inc(p, se.nr_wakeups_migrate);
  2133. if (cpu == this_cpu)
  2134. schedstat_inc(p, se.nr_wakeups_local);
  2135. else
  2136. schedstat_inc(p, se.nr_wakeups_remote);
  2137. activate_task(rq, p, 1);
  2138. success = 1;
  2139. /*
  2140. * Only attribute actual wakeups done by this task.
  2141. */
  2142. if (!in_interrupt()) {
  2143. struct sched_entity *se = &current->se;
  2144. u64 sample = se->sum_exec_runtime;
  2145. if (se->last_wakeup)
  2146. sample -= se->last_wakeup;
  2147. else
  2148. sample -= se->start_runtime;
  2149. update_avg(&se->avg_wakeup, sample);
  2150. se->last_wakeup = se->sum_exec_runtime;
  2151. }
  2152. out_running:
  2153. trace_sched_wakeup(rq, p, success);
  2154. check_preempt_curr(rq, p, sync);
  2155. p->state = TASK_RUNNING;
  2156. #ifdef CONFIG_SMP
  2157. if (p->sched_class->task_wake_up)
  2158. p->sched_class->task_wake_up(rq, p);
  2159. #endif
  2160. out:
  2161. task_rq_unlock(rq, &flags);
  2162. return success;
  2163. }
  2164. /**
  2165. * wake_up_process - Wake up a specific process
  2166. * @p: The process to be woken up.
  2167. *
  2168. * Attempt to wake up the nominated process and move it to the set of runnable
  2169. * processes. Returns 1 if the process was woken up, 0 if it was already
  2170. * running.
  2171. *
  2172. * It may be assumed that this function implies a write memory barrier before
  2173. * changing the task state if and only if any tasks are woken up.
  2174. */
  2175. int wake_up_process(struct task_struct *p)
  2176. {
  2177. return try_to_wake_up(p, TASK_ALL, 0);
  2178. }
  2179. EXPORT_SYMBOL(wake_up_process);
  2180. int wake_up_state(struct task_struct *p, unsigned int state)
  2181. {
  2182. return try_to_wake_up(p, state, 0);
  2183. }
  2184. /*
  2185. * Perform scheduler related setup for a newly forked process p.
  2186. * p is forked by current.
  2187. *
  2188. * __sched_fork() is basic setup used by init_idle() too:
  2189. */
  2190. static void __sched_fork(struct task_struct *p)
  2191. {
  2192. p->se.exec_start = 0;
  2193. p->se.sum_exec_runtime = 0;
  2194. p->se.prev_sum_exec_runtime = 0;
  2195. p->se.nr_migrations = 0;
  2196. p->se.last_wakeup = 0;
  2197. p->se.avg_overlap = 0;
  2198. p->se.start_runtime = 0;
  2199. p->se.avg_wakeup = sysctl_sched_wakeup_granularity;
  2200. #ifdef CONFIG_SCHEDSTATS
  2201. p->se.wait_start = 0;
  2202. p->se.wait_max = 0;
  2203. p->se.wait_count = 0;
  2204. p->se.wait_sum = 0;
  2205. p->se.sleep_start = 0;
  2206. p->se.sleep_max = 0;
  2207. p->se.sum_sleep_runtime = 0;
  2208. p->se.block_start = 0;
  2209. p->se.block_max = 0;
  2210. p->se.exec_max = 0;
  2211. p->se.slice_max = 0;
  2212. p->se.nr_migrations_cold = 0;
  2213. p->se.nr_failed_migrations_affine = 0;
  2214. p->se.nr_failed_migrations_running = 0;
  2215. p->se.nr_failed_migrations_hot = 0;
  2216. p->se.nr_forced_migrations = 0;
  2217. p->se.nr_forced2_migrations = 0;
  2218. p->se.nr_wakeups = 0;
  2219. p->se.nr_wakeups_sync = 0;
  2220. p->se.nr_wakeups_migrate = 0;
  2221. p->se.nr_wakeups_local = 0;
  2222. p->se.nr_wakeups_remote = 0;
  2223. p->se.nr_wakeups_affine = 0;
  2224. p->se.nr_wakeups_affine_attempts = 0;
  2225. p->se.nr_wakeups_passive = 0;
  2226. p->se.nr_wakeups_idle = 0;
  2227. #endif
  2228. INIT_LIST_HEAD(&p->rt.run_list);
  2229. p->se.on_rq = 0;
  2230. INIT_LIST_HEAD(&p->se.group_node);
  2231. #ifdef CONFIG_PREEMPT_NOTIFIERS
  2232. INIT_HLIST_HEAD(&p->preempt_notifiers);
  2233. #endif
  2234. /*
  2235. * We mark the process as running here, but have not actually
  2236. * inserted it onto the runqueue yet. This guarantees that
  2237. * nobody will actually run it, and a signal or other external
  2238. * event cannot wake it up and insert it on the runqueue either.
  2239. */
  2240. p->state = TASK_RUNNING;
  2241. }
  2242. /*
  2243. * fork()/clone()-time setup:
  2244. */
  2245. void sched_fork(struct task_struct *p, int clone_flags)
  2246. {
  2247. int cpu = get_cpu();
  2248. __sched_fork(p);
  2249. #ifdef CONFIG_SMP
  2250. cpu = sched_balance_self(cpu, SD_BALANCE_FORK);
  2251. #endif
  2252. set_task_cpu(p, cpu);
  2253. /*
  2254. * Make sure we do not leak PI boosting priority to the child.
  2255. */
  2256. p->prio = current->normal_prio;
  2257. /*
  2258. * Revert to default priority/policy on fork if requested.
  2259. */
  2260. if (unlikely(p->sched_reset_on_fork)) {
  2261. if (p->policy == SCHED_FIFO || p->policy == SCHED_RR)
  2262. p->policy = SCHED_NORMAL;
  2263. if (p->normal_prio < DEFAULT_PRIO)
  2264. p->prio = DEFAULT_PRIO;
  2265. if (PRIO_TO_NICE(p->static_prio) < 0) {
  2266. p->static_prio = NICE_TO_PRIO(0);
  2267. set_load_weight(p);
  2268. }
  2269. /*
  2270. * We don't need the reset flag anymore after the fork. It has
  2271. * fulfilled its duty:
  2272. */
  2273. p->sched_reset_on_fork = 0;
  2274. }
  2275. if (!rt_prio(p->prio))
  2276. p->sched_class = &fair_sched_class;
  2277. #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
  2278. if (likely(sched_info_on()))
  2279. memset(&p->sched_info, 0, sizeof(p->sched_info));
  2280. #endif
  2281. #if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
  2282. p->oncpu = 0;
  2283. #endif
  2284. #ifdef CONFIG_PREEMPT
  2285. /* Want to start with kernel preemption disabled. */
  2286. task_thread_info(p)->preempt_count = 1;
  2287. #endif
  2288. plist_node_init(&p->pushable_tasks, MAX_PRIO);
  2289. put_cpu();
  2290. }
  2291. /*
  2292. * wake_up_new_task - wake up a newly created task for the first time.
  2293. *
  2294. * This function will do some initial scheduler statistics housekeeping
  2295. * that must be done for every newly created context, then puts the task
  2296. * on the runqueue and wakes it.
  2297. */
  2298. void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
  2299. {
  2300. unsigned long flags;
  2301. struct rq *rq;
  2302. rq = task_rq_lock(p, &flags);
  2303. BUG_ON(p->state != TASK_RUNNING);
  2304. update_rq_clock(rq);
  2305. p->prio = effective_prio(p);
  2306. if (!p->sched_class->task_new || !current->se.on_rq) {
  2307. activate_task(rq, p, 0);
  2308. } else {
  2309. /*
  2310. * Let the scheduling class do new task startup
  2311. * management (if any):
  2312. */
  2313. p->sched_class->task_new(rq, p);
  2314. inc_nr_running(rq);
  2315. }
  2316. trace_sched_wakeup_new(rq, p, 1);
  2317. check_preempt_curr(rq, p, 0);
  2318. #ifdef CONFIG_SMP
  2319. if (p->sched_class->task_wake_up)
  2320. p->sched_class->task_wake_up(rq, p);
  2321. #endif
  2322. task_rq_unlock(rq, &flags);
  2323. }
  2324. #ifdef CONFIG_PREEMPT_NOTIFIERS
  2325. /**
  2326. * preempt_notifier_register - tell me when current is being preempted & rescheduled
  2327. * @notifier: notifier struct to register
  2328. */
  2329. void preempt_notifier_register(struct preempt_notifier *notifier)
  2330. {
  2331. hlist_add_head(&notifier->link, &current->preempt_notifiers);
  2332. }
  2333. EXPORT_SYMBOL_GPL(preempt_notifier_register);
  2334. /**
  2335. * preempt_notifier_unregister - no longer interested in preemption notifications
  2336. * @notifier: notifier struct to unregister
  2337. *
  2338. * This is safe to call from within a preemption notifier.
  2339. */
  2340. void preempt_notifier_unregister(struct preempt_notifier *notifier)
  2341. {
  2342. hlist_del(&notifier->link);
  2343. }
  2344. EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
  2345. static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
  2346. {
  2347. struct preempt_notifier *notifier;
  2348. struct hlist_node *node;
  2349. hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link)
  2350. notifier->ops->sched_in(notifier, raw_smp_processor_id());
  2351. }
  2352. static void
  2353. fire_sched_out_preempt_notifiers(struct task_struct *curr,
  2354. struct task_struct *next)
  2355. {
  2356. struct preempt_notifier *notifier;
  2357. struct hlist_node *node;
  2358. hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link)
  2359. notifier->ops->sched_out(notifier, next);
  2360. }
  2361. #else /* !CONFIG_PREEMPT_NOTIFIERS */
  2362. static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
  2363. {
  2364. }
  2365. static void
  2366. fire_sched_out_preempt_notifiers(struct task_struct *curr,
  2367. struct task_struct *next)
  2368. {
  2369. }
  2370. #endif /* CONFIG_PREEMPT_NOTIFIERS */
  2371. /**
  2372. * prepare_task_switch - prepare to switch tasks
  2373. * @rq: the runqueue preparing to switch
  2374. * @prev: the current task that is being switched out
  2375. * @next: the task we are going to switch to.
  2376. *
  2377. * This is called with the rq lock held and interrupts off. It must
  2378. * be paired with a subsequent finish_task_switch after the context
  2379. * switch.
  2380. *
  2381. * prepare_task_switch sets up locking and calls architecture specific
  2382. * hooks.
  2383. */
  2384. static inline void
  2385. prepare_task_switch(struct rq *rq, struct task_struct *prev,
  2386. struct task_struct *next)
  2387. {
  2388. fire_sched_out_preempt_notifiers(prev, next);
  2389. prepare_lock_switch(rq, next);
  2390. prepare_arch_switch(next);
  2391. }
  2392. /**
  2393. * finish_task_switch - clean up after a task-switch
  2394. * @rq: runqueue associated with task-switch
  2395. * @prev: the thread we just switched away from.
  2396. *
  2397. * finish_task_switch must be called after the context switch, paired
  2398. * with a prepare_task_switch call before the context switch.
  2399. * finish_task_switch will reconcile locking set up by prepare_task_switch,
  2400. * and do any other architecture-specific cleanup actions.
  2401. *
  2402. * Note that we may have delayed dropping an mm in context_switch(). If
  2403. * so, we finish that here outside of the runqueue lock. (Doing it
  2404. * with the lock held can cause deadlocks; see schedule() for
  2405. * details.)
  2406. */
  2407. static void finish_task_switch(struct rq *rq, struct task_struct *prev)
  2408. __releases(rq->lock)
  2409. {
  2410. struct mm_struct *mm = rq->prev_mm;
  2411. long prev_state;
  2412. rq->prev_mm = NULL;
  2413. /*
  2414. * A task struct has one reference for the use as "current".
  2415. * If a task dies, then it sets TASK_DEAD in tsk->state and calls
  2416. * schedule one last time. The schedule call will never return, and
  2417. * the scheduled task must drop that reference.
  2418. * The test for TASK_DEAD must occur while the runqueue locks are
  2419. * still held, otherwise prev could be scheduled on another cpu, die
  2420. * there before we look at prev->state, and then the reference would
  2421. * be dropped twice.
  2422. * Manfred Spraul <manfred@colorfullife.com>
  2423. */
  2424. prev_state = prev->state;
  2425. finish_arch_switch(prev);
  2426. perf_counter_task_sched_in(current, cpu_of(rq));
  2427. finish_lock_switch(rq, prev);
  2428. fire_sched_in_preempt_notifiers(current);
  2429. if (mm)
  2430. mmdrop(mm);
  2431. if (unlikely(prev_state == TASK_DEAD)) {
  2432. /*
  2433. * Remove function-return probe instances associated with this
  2434. * task and put them back on the free list.
  2435. */
  2436. kprobe_flush_task(prev);
  2437. put_task_struct(prev);
  2438. }
  2439. }
  2440. #ifdef CONFIG_SMP
  2441. /* assumes rq->lock is held */
  2442. static inline void pre_schedule(struct rq *rq, struct task_struct *prev)
  2443. {
  2444. if (prev->sched_class->pre_schedule)
  2445. prev->sched_class->pre_schedule(rq, prev);
  2446. }
  2447. /* rq->lock is NOT held, but preemption is disabled */
  2448. static inline void post_schedule(struct rq *rq)
  2449. {
  2450. if (rq->post_schedule) {
  2451. unsigned long flags;
  2452. spin_lock_irqsave(&rq->lock, flags);
  2453. if (rq->curr->sched_class->post_schedule)
  2454. rq->curr->sched_class->post_schedule(rq);
  2455. spin_unlock_irqrestore(&rq->lock, flags);
  2456. rq->post_schedule = 0;
  2457. }
  2458. }
  2459. #else
  2460. static inline void pre_schedule(struct rq *rq, struct task_struct *p)
  2461. {
  2462. }
  2463. static inline void post_schedule(struct rq *rq)
  2464. {
  2465. }
  2466. #endif
  2467. /**
  2468. * schedule_tail - first thing a freshly forked thread must call.
  2469. * @prev: the thread we just switched away from.
  2470. */
  2471. asmlinkage void schedule_tail(struct task_struct *prev)
  2472. __releases(rq->lock)
  2473. {
  2474. struct rq *rq = this_rq();
  2475. finish_task_switch(rq, prev);
  2476. /*
  2477. * FIXME: do we need to worry about rq being invalidated by the
  2478. * task_switch?
  2479. */
  2480. post_schedule(rq);
  2481. #ifdef __ARCH_WANT_UNLOCKED_CTXSW
  2482. /* In this case, finish_task_switch does not reenable preemption */
  2483. preempt_enable();
  2484. #endif
  2485. if (current->set_child_tid)
  2486. put_user(task_pid_vnr(current), current->set_child_tid);
  2487. }
  2488. /*
  2489. * context_switch - switch to the new MM and the new
  2490. * thread's register state.
  2491. */
  2492. static inline void
  2493. context_switch(struct rq *rq, struct task_struct *prev,
  2494. struct task_struct *next)
  2495. {
  2496. struct mm_struct *mm, *oldmm;
  2497. prepare_task_switch(rq, prev, next);
  2498. trace_sched_switch(rq, prev, next);
  2499. mm = next->mm;
  2500. oldmm = prev->active_mm;
  2501. /*
  2502. * For paravirt, this is coupled with an exit in switch_to to
  2503. * combine the page table reload and the switch backend into
  2504. * one hypercall.
  2505. */
  2506. arch_start_context_switch(prev);
  2507. if (unlikely(!mm)) {
  2508. next->active_mm = oldmm;
  2509. atomic_inc(&oldmm->mm_count);
  2510. enter_lazy_tlb(oldmm, next);
  2511. } else
  2512. switch_mm(oldmm, mm, next);
  2513. if (unlikely(!prev->mm)) {
  2514. prev->active_mm = NULL;
  2515. rq->prev_mm = oldmm;
  2516. }
  2517. /*
  2518. * Since the runqueue lock will be released by the next
  2519. * task (which is an invalid locking op but in the case
  2520. * of the scheduler it's an obvious special-case), so we
  2521. * do an early lockdep release here:
  2522. */
  2523. #ifndef __ARCH_WANT_UNLOCKED_CTXSW
  2524. spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
  2525. #endif
  2526. /* Here we just switch the register state and the stack. */
  2527. switch_to(prev, next, prev);
  2528. barrier();
  2529. /*
  2530. * this_rq must be evaluated again because prev may have moved
  2531. * CPUs since it called schedule(), thus the 'rq' on its stack
  2532. * frame will be invalid.
  2533. */
  2534. finish_task_switch(this_rq(), prev);
  2535. }
  2536. /*
  2537. * nr_running, nr_uninterruptible and nr_context_switches:
  2538. *
  2539. * externally visible scheduler statistics: current number of runnable
  2540. * threads, current number of uninterruptible-sleeping threads, total
  2541. * number of context switches performed since bootup.
  2542. */
  2543. unsigned long nr_running(void)
  2544. {
  2545. unsigned long i, sum = 0;
  2546. for_each_online_cpu(i)
  2547. sum += cpu_rq(i)->nr_running;
  2548. return sum;
  2549. }
  2550. unsigned long nr_uninterruptible(void)
  2551. {
  2552. unsigned long i, sum = 0;
  2553. for_each_possible_cpu(i)
  2554. sum += cpu_rq(i)->nr_uninterruptible;
  2555. /*
  2556. * Since we read the counters lockless, it might be slightly
  2557. * inaccurate. Do not allow it to go below zero though:
  2558. */
  2559. if (unlikely((long)sum < 0))
  2560. sum = 0;
  2561. return sum;
  2562. }
  2563. unsigned long long nr_context_switches(void)
  2564. {
  2565. int i;
  2566. unsigned long long sum = 0;
  2567. for_each_possible_cpu(i)
  2568. sum += cpu_rq(i)->nr_switches;
  2569. return sum;
  2570. }
  2571. unsigned long nr_iowait(void)
  2572. {
  2573. unsigned long i, sum = 0;
  2574. for_each_possible_cpu(i)
  2575. sum += atomic_read(&cpu_rq(i)->nr_iowait);
  2576. return sum;
  2577. }
  2578. /* Variables and functions for calc_load */
  2579. static atomic_long_t calc_load_tasks;
  2580. static unsigned long calc_load_update;
  2581. unsigned long avenrun[3];
  2582. EXPORT_SYMBOL(avenrun);
  2583. /**
  2584. * get_avenrun - get the load average array
  2585. * @loads: pointer to dest load array
  2586. * @offset: offset to add
  2587. * @shift: shift count to shift the result left
  2588. *
  2589. * These values are estimates at best, so no need for locking.
  2590. */
  2591. void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
  2592. {
  2593. loads[0] = (avenrun[0] + offset) << shift;
  2594. loads[1] = (avenrun[1] + offset) << shift;
  2595. loads[2] = (avenrun[2] + offset) << shift;
  2596. }
  2597. static unsigned long
  2598. calc_load(unsigned long load, unsigned long exp, unsigned long active)
  2599. {
  2600. load *= exp;
  2601. load += active * (FIXED_1 - exp);
  2602. return load >> FSHIFT;
  2603. }
  2604. /*
  2605. * calc_load - update the avenrun load estimates 10 ticks after the
  2606. * CPUs have updated calc_load_tasks.
  2607. */
  2608. void calc_global_load(void)
  2609. {
  2610. unsigned long upd = calc_load_update + 10;
  2611. long active;
  2612. if (time_before(jiffies, upd))
  2613. return;
  2614. active = atomic_long_read(&calc_load_tasks);
  2615. active = active > 0 ? active * FIXED_1 : 0;
  2616. avenrun[0] = calc_load(avenrun[0], EXP_1, active);
  2617. avenrun[1] = calc_load(avenrun[1], EXP_5, active);
  2618. avenrun[2] = calc_load(avenrun[2], EXP_15, active);
  2619. calc_load_update += LOAD_FREQ;
  2620. }
  2621. /*
  2622. * Either called from update_cpu_load() or from a cpu going idle
  2623. */
  2624. static void calc_load_account_active(struct rq *this_rq)
  2625. {
  2626. long nr_active, delta;
  2627. nr_active = this_rq->nr_running;
  2628. nr_active += (long) this_rq->nr_uninterruptible;
  2629. if (nr_active != this_rq->calc_load_active) {
  2630. delta = nr_active - this_rq->calc_load_active;
  2631. this_rq->calc_load_active = nr_active;
  2632. atomic_long_add(delta, &calc_load_tasks);
  2633. }
  2634. }
  2635. /*
  2636. * Externally visible per-cpu scheduler statistics:
  2637. * cpu_nr_migrations(cpu) - number of migrations into that cpu
  2638. */
  2639. u64 cpu_nr_migrations(int cpu)
  2640. {
  2641. return cpu_rq(cpu)->nr_migrations_in;
  2642. }
  2643. /*
  2644. * Update rq->cpu_load[] statistics. This function is usually called every
  2645. * scheduler tick (TICK_NSEC).
  2646. */
  2647. static void update_cpu_load(struct rq *this_rq)
  2648. {
  2649. unsigned long this_load = this_rq->load.weight;
  2650. int i, scale;
  2651. this_rq->nr_load_updates++;
  2652. /* Update our load: */
  2653. for (i = 0, scale = 1; i < CPU_LOAD_IDX_MAX; i++, scale += scale) {
  2654. unsigned long old_load, new_load;
  2655. /* scale is effectively 1 << i now, and >> i divides by scale */
  2656. old_load = this_rq->cpu_load[i];
  2657. new_load = this_load;
  2658. /*
  2659. * Round up the averaging division if load is increasing. This
  2660. * prevents us from getting stuck on 9 if the load is 10, for
  2661. * example.
  2662. */
  2663. if (new_load > old_load)
  2664. new_load += scale-1;
  2665. this_rq->cpu_load[i] = (old_load*(scale-1) + new_load) >> i;
  2666. }
  2667. if (time_after_eq(jiffies, this_rq->calc_load_update)) {
  2668. this_rq->calc_load_update += LOAD_FREQ;
  2669. calc_load_account_active(this_rq);
  2670. }
  2671. }
  2672. #ifdef CONFIG_SMP
  2673. /*
  2674. * double_rq_lock - safely lock two runqueues
  2675. *
  2676. * Note this does not disable interrupts like task_rq_lock,
  2677. * you need to do so manually before calling.
  2678. */
  2679. static void double_rq_lock(struct rq *rq1, struct rq *rq2)
  2680. __acquires(rq1->lock)
  2681. __acquires(rq2->lock)
  2682. {
  2683. BUG_ON(!irqs_disabled());
  2684. if (rq1 == rq2) {
  2685. spin_lock(&rq1->lock);
  2686. __acquire(rq2->lock); /* Fake it out ;) */
  2687. } else {
  2688. if (rq1 < rq2) {
  2689. spin_lock(&rq1->lock);
  2690. spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
  2691. } else {
  2692. spin_lock(&rq2->lock);
  2693. spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
  2694. }
  2695. }
  2696. update_rq_clock(rq1);
  2697. update_rq_clock(rq2);
  2698. }
  2699. /*
  2700. * double_rq_unlock - safely unlock two runqueues
  2701. *
  2702. * Note this does not restore interrupts like task_rq_unlock,
  2703. * you need to do so manually after calling.
  2704. */
  2705. static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
  2706. __releases(rq1->lock)
  2707. __releases(rq2->lock)
  2708. {
  2709. spin_unlock(&rq1->lock);
  2710. if (rq1 != rq2)
  2711. spin_unlock(&rq2->lock);
  2712. else
  2713. __release(rq2->lock);
  2714. }
  2715. /*
  2716. * If dest_cpu is allowed for this process, migrate the task to it.
  2717. * This is accomplished by forcing the cpu_allowed mask to only
  2718. * allow dest_cpu, which will force the cpu onto dest_cpu. Then
  2719. * the cpu_allowed mask is restored.
  2720. */
  2721. static void sched_migrate_task(struct task_struct *p, int dest_cpu)
  2722. {
  2723. struct migration_req req;
  2724. unsigned long flags;
  2725. struct rq *rq;
  2726. rq = task_rq_lock(p, &flags);
  2727. if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed)
  2728. || unlikely(!cpu_active(dest_cpu)))
  2729. goto out;
  2730. /* force the process onto the specified CPU */
  2731. if (migrate_task(p, dest_cpu, &req)) {
  2732. /* Need to wait for migration thread (might exit: take ref). */
  2733. struct task_struct *mt = rq->migration_thread;
  2734. get_task_struct(mt);
  2735. task_rq_unlock(rq, &flags);
  2736. wake_up_process(mt);
  2737. put_task_struct(mt);
  2738. wait_for_completion(&req.done);
  2739. return;
  2740. }
  2741. out:
  2742. task_rq_unlock(rq, &flags);
  2743. }
  2744. /*
  2745. * sched_exec - execve() is a valuable balancing opportunity, because at
  2746. * this point the task has the smallest effective memory and cache footprint.
  2747. */
  2748. void sched_exec(void)
  2749. {
  2750. int new_cpu, this_cpu = get_cpu();
  2751. new_cpu = sched_balance_self(this_cpu, SD_BALANCE_EXEC);
  2752. put_cpu();
  2753. if (new_cpu != this_cpu)
  2754. sched_migrate_task(current, new_cpu);
  2755. }
  2756. /*
  2757. * pull_task - move a task from a remote runqueue to the local runqueue.
  2758. * Both runqueues must be locked.
  2759. */
  2760. static void pull_task(struct rq *src_rq, struct task_struct *p,
  2761. struct rq *this_rq, int this_cpu)
  2762. {
  2763. deactivate_task(src_rq, p, 0);
  2764. set_task_cpu(p, this_cpu);
  2765. activate_task(this_rq, p, 0);
  2766. /*
  2767. * Note that idle threads have a prio of MAX_PRIO, for this test
  2768. * to be always true for them.
  2769. */
  2770. check_preempt_curr(this_rq, p, 0);
  2771. }
  2772. /*
  2773. * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
  2774. */
  2775. static
  2776. int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
  2777. struct sched_domain *sd, enum cpu_idle_type idle,
  2778. int *all_pinned)
  2779. {
  2780. int tsk_cache_hot = 0;
  2781. /*
  2782. * We do not migrate tasks that are:
  2783. * 1) running (obviously), or
  2784. * 2) cannot be migrated to this CPU due to cpus_allowed, or
  2785. * 3) are cache-hot on their current CPU.
  2786. */
  2787. if (!cpumask_test_cpu(this_cpu, &p->cpus_allowed)) {
  2788. schedstat_inc(p, se.nr_failed_migrations_affine);
  2789. return 0;
  2790. }
  2791. *all_pinned = 0;
  2792. if (task_running(rq, p)) {
  2793. schedstat_inc(p, se.nr_failed_migrations_running);
  2794. return 0;
  2795. }
  2796. /*
  2797. * Aggressive migration if:
  2798. * 1) task is cache cold, or
  2799. * 2) too many balance attempts have failed.
  2800. */
  2801. tsk_cache_hot = task_hot(p, rq->clock, sd);
  2802. if (!tsk_cache_hot ||
  2803. sd->nr_balance_failed > sd->cache_nice_tries) {
  2804. #ifdef CONFIG_SCHEDSTATS
  2805. if (tsk_cache_hot) {
  2806. schedstat_inc(sd, lb_hot_gained[idle]);
  2807. schedstat_inc(p, se.nr_forced_migrations);
  2808. }
  2809. #endif
  2810. return 1;
  2811. }
  2812. if (tsk_cache_hot) {
  2813. schedstat_inc(p, se.nr_failed_migrations_hot);
  2814. return 0;
  2815. }
  2816. return 1;
  2817. }
  2818. static unsigned long
  2819. balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
  2820. unsigned long max_load_move, struct sched_domain *sd,
  2821. enum cpu_idle_type idle, int *all_pinned,
  2822. int *this_best_prio, struct rq_iterator *iterator)
  2823. {
  2824. int loops = 0, pulled = 0, pinned = 0;
  2825. struct task_struct *p;
  2826. long rem_load_move = max_load_move;
  2827. if (max_load_move == 0)
  2828. goto out;
  2829. pinned = 1;
  2830. /*
  2831. * Start the load-balancing iterator:
  2832. */
  2833. p = iterator->start(iterator->arg);
  2834. next:
  2835. if (!p || loops++ > sysctl_sched_nr_migrate)
  2836. goto out;
  2837. if ((p->se.load.weight >> 1) > rem_load_move ||
  2838. !can_migrate_task(p, busiest, this_cpu, sd, idle, &pinned)) {
  2839. p = iterator->next(iterator->arg);
  2840. goto next;
  2841. }
  2842. pull_task(busiest, p, this_rq, this_cpu);
  2843. pulled++;
  2844. rem_load_move -= p->se.load.weight;
  2845. #ifdef CONFIG_PREEMPT
  2846. /*
  2847. * NEWIDLE balancing is a source of latency, so preemptible kernels
  2848. * will stop after the first task is pulled to minimize the critical
  2849. * section.
  2850. */
  2851. if (idle == CPU_NEWLY_IDLE)
  2852. goto out;
  2853. #endif
  2854. /*
  2855. * We only want to steal up to the prescribed amount of weighted load.
  2856. */
  2857. if (rem_load_move > 0) {
  2858. if (p->prio < *this_best_prio)
  2859. *this_best_prio = p->prio;
  2860. p = iterator->next(iterator->arg);
  2861. goto next;
  2862. }
  2863. out:
  2864. /*
  2865. * Right now, this is one of only two places pull_task() is called,
  2866. * so we can safely collect pull_task() stats here rather than
  2867. * inside pull_task().
  2868. */
  2869. schedstat_add(sd, lb_gained[idle], pulled);
  2870. if (all_pinned)
  2871. *all_pinned = pinned;
  2872. return max_load_move - rem_load_move;
  2873. }
  2874. /*
  2875. * move_tasks tries to move up to max_load_move weighted load from busiest to
  2876. * this_rq, as part of a balancing operation within domain "sd".
  2877. * Returns 1 if successful and 0 otherwise.
  2878. *
  2879. * Called with both runqueues locked.
  2880. */
  2881. static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
  2882. unsigned long max_load_move,
  2883. struct sched_domain *sd, enum cpu_idle_type idle,
  2884. int *all_pinned)
  2885. {
  2886. const struct sched_class *class = sched_class_highest;
  2887. unsigned long total_load_moved = 0;
  2888. int this_best_prio = this_rq->curr->prio;
  2889. do {
  2890. total_load_moved +=
  2891. class->load_balance(this_rq, this_cpu, busiest,
  2892. max_load_move - total_load_moved,
  2893. sd, idle, all_pinned, &this_best_prio);
  2894. class = class->next;
  2895. #ifdef CONFIG_PREEMPT
  2896. /*
  2897. * NEWIDLE balancing is a source of latency, so preemptible
  2898. * kernels will stop after the first task is pulled to minimize
  2899. * the critical section.
  2900. */
  2901. if (idle == CPU_NEWLY_IDLE && this_rq->nr_running)
  2902. break;
  2903. #endif
  2904. } while (class && max_load_move > total_load_moved);
  2905. return total_load_moved > 0;
  2906. }
  2907. static int
  2908. iter_move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
  2909. struct sched_domain *sd, enum cpu_idle_type idle,
  2910. struct rq_iterator *iterator)
  2911. {
  2912. struct task_struct *p = iterator->start(iterator->arg);
  2913. int pinned = 0;
  2914. while (p) {
  2915. if (can_migrate_task(p, busiest, this_cpu, sd, idle, &pinned)) {
  2916. pull_task(busiest, p, this_rq, this_cpu);
  2917. /*
  2918. * Right now, this is only the second place pull_task()
  2919. * is called, so we can safely collect pull_task()
  2920. * stats here rather than inside pull_task().
  2921. */
  2922. schedstat_inc(sd, lb_gained[idle]);
  2923. return 1;
  2924. }
  2925. p = iterator->next(iterator->arg);
  2926. }
  2927. return 0;
  2928. }
  2929. /*
  2930. * move_one_task tries to move exactly one task from busiest to this_rq, as
  2931. * part of active balancing operations within "domain".
  2932. * Returns 1 if successful and 0 otherwise.
  2933. *
  2934. * Called with both runqueues locked.
  2935. */
  2936. static int move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
  2937. struct sched_domain *sd, enum cpu_idle_type idle)
  2938. {
  2939. const struct sched_class *class;
  2940. for_each_class(class) {
  2941. if (class->move_one_task(this_rq, this_cpu, busiest, sd, idle))
  2942. return 1;
  2943. }
  2944. return 0;
  2945. }
  2946. /********** Helpers for find_busiest_group ************************/
  2947. /*
  2948. * sd_lb_stats - Structure to store the statistics of a sched_domain
  2949. * during load balancing.
  2950. */
  2951. struct sd_lb_stats {
  2952. struct sched_group *busiest; /* Busiest group in this sd */
  2953. struct sched_group *this; /* Local group in this sd */
  2954. unsigned long total_load; /* Total load of all groups in sd */
  2955. unsigned long total_pwr; /* Total power of all groups in sd */
  2956. unsigned long avg_load; /* Average load across all groups in sd */
  2957. /** Statistics of this group */
  2958. unsigned long this_load;
  2959. unsigned long this_load_per_task;
  2960. unsigned long this_nr_running;
  2961. /* Statistics of the busiest group */
  2962. unsigned long max_load;
  2963. unsigned long busiest_load_per_task;
  2964. unsigned long busiest_nr_running;
  2965. int group_imb; /* Is there imbalance in this sd */
  2966. #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
  2967. int power_savings_balance; /* Is powersave balance needed for this sd */
  2968. struct sched_group *group_min; /* Least loaded group in sd */
  2969. struct sched_group *group_leader; /* Group which relieves group_min */
  2970. unsigned long min_load_per_task; /* load_per_task in group_min */
  2971. unsigned long leader_nr_running; /* Nr running of group_leader */
  2972. unsigned long min_nr_running; /* Nr running of group_min */
  2973. #endif
  2974. };
  2975. /*
  2976. * sg_lb_stats - stats of a sched_group required for load_balancing
  2977. */
  2978. struct sg_lb_stats {
  2979. unsigned long avg_load; /*Avg load across the CPUs of the group */
  2980. unsigned long group_load; /* Total load over the CPUs of the group */
  2981. unsigned long sum_nr_running; /* Nr tasks running in the group */
  2982. unsigned long sum_weighted_load; /* Weighted load of group's tasks */
  2983. unsigned long group_capacity;
  2984. int group_imb; /* Is there an imbalance in the group ? */
  2985. };
  2986. /**
  2987. * group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
  2988. * @group: The group whose first cpu is to be returned.
  2989. */
  2990. static inline unsigned int group_first_cpu(struct sched_group *group)
  2991. {
  2992. return cpumask_first(sched_group_cpus(group));
  2993. }
  2994. /**
  2995. * get_sd_load_idx - Obtain the load index for a given sched domain.
  2996. * @sd: The sched_domain whose load_idx is to be obtained.
  2997. * @idle: The Idle status of the CPU for whose sd load_icx is obtained.
  2998. */
  2999. static inline int get_sd_load_idx(struct sched_domain *sd,
  3000. enum cpu_idle_type idle)
  3001. {
  3002. int load_idx;
  3003. switch (idle) {
  3004. case CPU_NOT_IDLE:
  3005. load_idx = sd->busy_idx;
  3006. break;
  3007. case CPU_NEWLY_IDLE:
  3008. load_idx = sd->newidle_idx;
  3009. break;
  3010. default:
  3011. load_idx = sd->idle_idx;
  3012. break;
  3013. }
  3014. return load_idx;
  3015. }
  3016. #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
  3017. /**
  3018. * init_sd_power_savings_stats - Initialize power savings statistics for
  3019. * the given sched_domain, during load balancing.
  3020. *
  3021. * @sd: Sched domain whose power-savings statistics are to be initialized.
  3022. * @sds: Variable containing the statistics for sd.
  3023. * @idle: Idle status of the CPU at which we're performing load-balancing.
  3024. */
  3025. static inline void init_sd_power_savings_stats(struct sched_domain *sd,
  3026. struct sd_lb_stats *sds, enum cpu_idle_type idle)
  3027. {
  3028. /*
  3029. * Busy processors will not participate in power savings
  3030. * balance.
  3031. */
  3032. if (idle == CPU_NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE))
  3033. sds->power_savings_balance = 0;
  3034. else {
  3035. sds->power_savings_balance = 1;
  3036. sds->min_nr_running = ULONG_MAX;
  3037. sds->leader_nr_running = 0;
  3038. }
  3039. }
  3040. /**
  3041. * update_sd_power_savings_stats - Update the power saving stats for a
  3042. * sched_domain while performing load balancing.
  3043. *
  3044. * @group: sched_group belonging to the sched_domain under consideration.
  3045. * @sds: Variable containing the statistics of the sched_domain
  3046. * @local_group: Does group contain the CPU for which we're performing
  3047. * load balancing ?
  3048. * @sgs: Variable containing the statistics of the group.
  3049. */
  3050. static inline void update_sd_power_savings_stats(struct sched_group *group,
  3051. struct sd_lb_stats *sds, int local_group, struct sg_lb_stats *sgs)
  3052. {
  3053. if (!sds->power_savings_balance)
  3054. return;
  3055. /*
  3056. * If the local group is idle or completely loaded
  3057. * no need to do power savings balance at this domain
  3058. */
  3059. if (local_group && (sds->this_nr_running >= sgs->group_capacity ||
  3060. !sds->this_nr_running))
  3061. sds->power_savings_balance = 0;
  3062. /*
  3063. * If a group is already running at full capacity or idle,
  3064. * don't include that group in power savings calculations
  3065. */
  3066. if (!sds->power_savings_balance ||
  3067. sgs->sum_nr_running >= sgs->group_capacity ||
  3068. !sgs->sum_nr_running)
  3069. return;
  3070. /*
  3071. * Calculate the group which has the least non-idle load.
  3072. * This is the group from where we need to pick up the load
  3073. * for saving power
  3074. */
  3075. if ((sgs->sum_nr_running < sds->min_nr_running) ||
  3076. (sgs->sum_nr_running == sds->min_nr_running &&
  3077. group_first_cpu(group) > group_first_cpu(sds->group_min))) {
  3078. sds->group_min = group;
  3079. sds->min_nr_running = sgs->sum_nr_running;
  3080. sds->min_load_per_task = sgs->sum_weighted_load /
  3081. sgs->sum_nr_running;
  3082. }
  3083. /*
  3084. * Calculate the group which is almost near its
  3085. * capacity but still has some space to pick up some load
  3086. * from other group and save more power
  3087. */
  3088. if (sgs->sum_nr_running > sgs->group_capacity - 1)
  3089. return;
  3090. if (sgs->sum_nr_running > sds->leader_nr_running ||
  3091. (sgs->sum_nr_running == sds->leader_nr_running &&
  3092. group_first_cpu(group) < group_first_cpu(sds->group_leader))) {
  3093. sds->group_leader = group;
  3094. sds->leader_nr_running = sgs->sum_nr_running;
  3095. }
  3096. }
  3097. /**
  3098. * check_power_save_busiest_group - see if there is potential for some power-savings balance
  3099. * @sds: Variable containing the statistics of the sched_domain
  3100. * under consideration.
  3101. * @this_cpu: Cpu at which we're currently performing load-balancing.
  3102. * @imbalance: Variable to store the imbalance.
  3103. *
  3104. * Description:
  3105. * Check if we have potential to perform some power-savings balance.
  3106. * If yes, set the busiest group to be the least loaded group in the
  3107. * sched_domain, so that it's CPUs can be put to idle.
  3108. *
  3109. * Returns 1 if there is potential to perform power-savings balance.
  3110. * Else returns 0.
  3111. */
  3112. static inline int check_power_save_busiest_group(struct sd_lb_stats *sds,
  3113. int this_cpu, unsigned long *imbalance)
  3114. {
  3115. if (!sds->power_savings_balance)
  3116. return 0;
  3117. if (sds->this != sds->group_leader ||
  3118. sds->group_leader == sds->group_min)
  3119. return 0;
  3120. *imbalance = sds->min_load_per_task;
  3121. sds->busiest = sds->group_min;
  3122. if (sched_mc_power_savings >= POWERSAVINGS_BALANCE_WAKEUP) {
  3123. cpu_rq(this_cpu)->rd->sched_mc_preferred_wakeup_cpu =
  3124. group_first_cpu(sds->group_leader);
  3125. }
  3126. return 1;
  3127. }
  3128. #else /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
  3129. static inline void init_sd_power_savings_stats(struct sched_domain *sd,
  3130. struct sd_lb_stats *sds, enum cpu_idle_type idle)
  3131. {
  3132. return;
  3133. }
  3134. static inline void update_sd_power_savings_stats(struct sched_group *group,
  3135. struct sd_lb_stats *sds, int local_group, struct sg_lb_stats *sgs)
  3136. {
  3137. return;
  3138. }
  3139. static inline int check_power_save_busiest_group(struct sd_lb_stats *sds,
  3140. int this_cpu, unsigned long *imbalance)
  3141. {
  3142. return 0;
  3143. }
  3144. #endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
  3145. /**
  3146. * update_sg_lb_stats - Update sched_group's statistics for load balancing.
  3147. * @group: sched_group whose statistics are to be updated.
  3148. * @this_cpu: Cpu for which load balance is currently performed.
  3149. * @idle: Idle status of this_cpu
  3150. * @load_idx: Load index of sched_domain of this_cpu for load calc.
  3151. * @sd_idle: Idle status of the sched_domain containing group.
  3152. * @local_group: Does group contain this_cpu.
  3153. * @cpus: Set of cpus considered for load balancing.
  3154. * @balance: Should we balance.
  3155. * @sgs: variable to hold the statistics for this group.
  3156. */
  3157. static inline void update_sg_lb_stats(struct sched_group *group, int this_cpu,
  3158. enum cpu_idle_type idle, int load_idx, int *sd_idle,
  3159. int local_group, const struct cpumask *cpus,
  3160. int *balance, struct sg_lb_stats *sgs)
  3161. {
  3162. unsigned long load, max_cpu_load, min_cpu_load;
  3163. int i;
  3164. unsigned int balance_cpu = -1, first_idle_cpu = 0;
  3165. unsigned long sum_avg_load_per_task;
  3166. unsigned long avg_load_per_task;
  3167. if (local_group)
  3168. balance_cpu = group_first_cpu(group);
  3169. /* Tally up the load of all CPUs in the group */
  3170. sum_avg_load_per_task = avg_load_per_task = 0;
  3171. max_cpu_load = 0;
  3172. min_cpu_load = ~0UL;
  3173. for_each_cpu_and(i, sched_group_cpus(group), cpus) {
  3174. struct rq *rq = cpu_rq(i);
  3175. if (*sd_idle && rq->nr_running)
  3176. *sd_idle = 0;
  3177. /* Bias balancing toward cpus of our domain */
  3178. if (local_group) {
  3179. if (idle_cpu(i) && !first_idle_cpu) {
  3180. first_idle_cpu = 1;
  3181. balance_cpu = i;
  3182. }
  3183. load = target_load(i, load_idx);
  3184. } else {
  3185. load = source_load(i, load_idx);
  3186. if (load > max_cpu_load)
  3187. max_cpu_load = load;
  3188. if (min_cpu_load > load)
  3189. min_cpu_load = load;
  3190. }
  3191. sgs->group_load += load;
  3192. sgs->sum_nr_running += rq->nr_running;
  3193. sgs->sum_weighted_load += weighted_cpuload(i);
  3194. sum_avg_load_per_task += cpu_avg_load_per_task(i);
  3195. }
  3196. /*
  3197. * First idle cpu or the first cpu(busiest) in this sched group
  3198. * is eligible for doing load balancing at this and above
  3199. * domains. In the newly idle case, we will allow all the cpu's
  3200. * to do the newly idle load balance.
  3201. */
  3202. if (idle != CPU_NEWLY_IDLE && local_group &&
  3203. balance_cpu != this_cpu && balance) {
  3204. *balance = 0;
  3205. return;
  3206. }
  3207. /* Adjust by relative CPU power of the group */
  3208. sgs->avg_load = sg_div_cpu_power(group,
  3209. sgs->group_load * SCHED_LOAD_SCALE);
  3210. /*
  3211. * Consider the group unbalanced when the imbalance is larger
  3212. * than the average weight of two tasks.
  3213. *
  3214. * APZ: with cgroup the avg task weight can vary wildly and
  3215. * might not be a suitable number - should we keep a
  3216. * normalized nr_running number somewhere that negates
  3217. * the hierarchy?
  3218. */
  3219. avg_load_per_task = sg_div_cpu_power(group,
  3220. sum_avg_load_per_task * SCHED_LOAD_SCALE);
  3221. if ((max_cpu_load - min_cpu_load) > 2*avg_load_per_task)
  3222. sgs->group_imb = 1;
  3223. sgs->group_capacity = group->__cpu_power / SCHED_LOAD_SCALE;
  3224. }
  3225. /**
  3226. * update_sd_lb_stats - Update sched_group's statistics for load balancing.
  3227. * @sd: sched_domain whose statistics are to be updated.
  3228. * @this_cpu: Cpu for which load balance is currently performed.
  3229. * @idle: Idle status of this_cpu
  3230. * @sd_idle: Idle status of the sched_domain containing group.
  3231. * @cpus: Set of cpus considered for load balancing.
  3232. * @balance: Should we balance.
  3233. * @sds: variable to hold the statistics for this sched_domain.
  3234. */
  3235. static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu,
  3236. enum cpu_idle_type idle, int *sd_idle,
  3237. const struct cpumask *cpus, int *balance,
  3238. struct sd_lb_stats *sds)
  3239. {
  3240. struct sched_group *group = sd->groups;
  3241. struct sg_lb_stats sgs;
  3242. int load_idx;
  3243. init_sd_power_savings_stats(sd, sds, idle);
  3244. load_idx = get_sd_load_idx(sd, idle);
  3245. do {
  3246. int local_group;
  3247. local_group = cpumask_test_cpu(this_cpu,
  3248. sched_group_cpus(group));
  3249. memset(&sgs, 0, sizeof(sgs));
  3250. update_sg_lb_stats(group, this_cpu, idle, load_idx, sd_idle,
  3251. local_group, cpus, balance, &sgs);
  3252. if (local_group && balance && !(*balance))
  3253. return;
  3254. sds->total_load += sgs.group_load;
  3255. sds->total_pwr += group->__cpu_power;
  3256. if (local_group) {
  3257. sds->this_load = sgs.avg_load;
  3258. sds->this = group;
  3259. sds->this_nr_running = sgs.sum_nr_running;
  3260. sds->this_load_per_task = sgs.sum_weighted_load;
  3261. } else if (sgs.avg_load > sds->max_load &&
  3262. (sgs.sum_nr_running > sgs.group_capacity ||
  3263. sgs.group_imb)) {
  3264. sds->max_load = sgs.avg_load;
  3265. sds->busiest = group;
  3266. sds->busiest_nr_running = sgs.sum_nr_running;
  3267. sds->busiest_load_per_task = sgs.sum_weighted_load;
  3268. sds->group_imb = sgs.group_imb;
  3269. }
  3270. update_sd_power_savings_stats(group, sds, local_group, &sgs);
  3271. group = group->next;
  3272. } while (group != sd->groups);
  3273. }
  3274. /**
  3275. * fix_small_imbalance - Calculate the minor imbalance that exists
  3276. * amongst the groups of a sched_domain, during
  3277. * load balancing.
  3278. * @sds: Statistics of the sched_domain whose imbalance is to be calculated.
  3279. * @this_cpu: The cpu at whose sched_domain we're performing load-balance.
  3280. * @imbalance: Variable to store the imbalance.
  3281. */
  3282. static inline void fix_small_imbalance(struct sd_lb_stats *sds,
  3283. int this_cpu, unsigned long *imbalance)
  3284. {
  3285. unsigned long tmp, pwr_now = 0, pwr_move = 0;
  3286. unsigned int imbn = 2;
  3287. if (sds->this_nr_running) {
  3288. sds->this_load_per_task /= sds->this_nr_running;
  3289. if (sds->busiest_load_per_task >
  3290. sds->this_load_per_task)
  3291. imbn = 1;
  3292. } else
  3293. sds->this_load_per_task =
  3294. cpu_avg_load_per_task(this_cpu);
  3295. if (sds->max_load - sds->this_load + sds->busiest_load_per_task >=
  3296. sds->busiest_load_per_task * imbn) {
  3297. *imbalance = sds->busiest_load_per_task;
  3298. return;
  3299. }
  3300. /*
  3301. * OK, we don't have enough imbalance to justify moving tasks,
  3302. * however we may be able to increase total CPU power used by
  3303. * moving them.
  3304. */
  3305. pwr_now += sds->busiest->__cpu_power *
  3306. min(sds->busiest_load_per_task, sds->max_load);
  3307. pwr_now += sds->this->__cpu_power *
  3308. min(sds->this_load_per_task, sds->this_load);
  3309. pwr_now /= SCHED_LOAD_SCALE;
  3310. /* Amount of load we'd subtract */
  3311. tmp = sg_div_cpu_power(sds->busiest,
  3312. sds->busiest_load_per_task * SCHED_LOAD_SCALE);
  3313. if (sds->max_load > tmp)
  3314. pwr_move += sds->busiest->__cpu_power *
  3315. min(sds->busiest_load_per_task, sds->max_load - tmp);
  3316. /* Amount of load we'd add */
  3317. if (sds->max_load * sds->busiest->__cpu_power <
  3318. sds->busiest_load_per_task * SCHED_LOAD_SCALE)
  3319. tmp = sg_div_cpu_power(sds->this,
  3320. sds->max_load * sds->busiest->__cpu_power);
  3321. else
  3322. tmp = sg_div_cpu_power(sds->this,
  3323. sds->busiest_load_per_task * SCHED_LOAD_SCALE);
  3324. pwr_move += sds->this->__cpu_power *
  3325. min(sds->this_load_per_task, sds->this_load + tmp);
  3326. pwr_move /= SCHED_LOAD_SCALE;
  3327. /* Move if we gain throughput */
  3328. if (pwr_move > pwr_now)
  3329. *imbalance = sds->busiest_load_per_task;
  3330. }
  3331. /**
  3332. * calculate_imbalance - Calculate the amount of imbalance present within the
  3333. * groups of a given sched_domain during load balance.
  3334. * @sds: statistics of the sched_domain whose imbalance is to be calculated.
  3335. * @this_cpu: Cpu for which currently load balance is being performed.
  3336. * @imbalance: The variable to store the imbalance.
  3337. */
  3338. static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
  3339. unsigned long *imbalance)
  3340. {
  3341. unsigned long max_pull;
  3342. /*
  3343. * In the presence of smp nice balancing, certain scenarios can have
  3344. * max load less than avg load(as we skip the groups at or below
  3345. * its cpu_power, while calculating max_load..)
  3346. */
  3347. if (sds->max_load < sds->avg_load) {
  3348. *imbalance = 0;
  3349. return fix_small_imbalance(sds, this_cpu, imbalance);
  3350. }
  3351. /* Don't want to pull so many tasks that a group would go idle */
  3352. max_pull = min(sds->max_load - sds->avg_load,
  3353. sds->max_load - sds->busiest_load_per_task);
  3354. /* How much load to actually move to equalise the imbalance */
  3355. *imbalance = min(max_pull * sds->busiest->__cpu_power,
  3356. (sds->avg_load - sds->this_load) * sds->this->__cpu_power)
  3357. / SCHED_LOAD_SCALE;
  3358. /*
  3359. * if *imbalance is less than the average load per runnable task
  3360. * there is no gaurantee that any tasks will be moved so we'll have
  3361. * a think about bumping its value to force at least one task to be
  3362. * moved
  3363. */
  3364. if (*imbalance < sds->busiest_load_per_task)
  3365. return fix_small_imbalance(sds, this_cpu, imbalance);
  3366. }
  3367. /******* find_busiest_group() helpers end here *********************/
  3368. /**
  3369. * find_busiest_group - Returns the busiest group within the sched_domain
  3370. * if there is an imbalance. If there isn't an imbalance, and
  3371. * the user has opted for power-savings, it returns a group whose
  3372. * CPUs can be put to idle by rebalancing those tasks elsewhere, if
  3373. * such a group exists.
  3374. *
  3375. * Also calculates the amount of weighted load which should be moved
  3376. * to restore balance.
  3377. *
  3378. * @sd: The sched_domain whose busiest group is to be returned.
  3379. * @this_cpu: The cpu for which load balancing is currently being performed.
  3380. * @imbalance: Variable which stores amount of weighted load which should
  3381. * be moved to restore balance/put a group to idle.
  3382. * @idle: The idle status of this_cpu.
  3383. * @sd_idle: The idleness of sd
  3384. * @cpus: The set of CPUs under consideration for load-balancing.
  3385. * @balance: Pointer to a variable indicating if this_cpu
  3386. * is the appropriate cpu to perform load balancing at this_level.
  3387. *
  3388. * Returns: - the busiest group if imbalance exists.
  3389. * - If no imbalance and user has opted for power-savings balance,
  3390. * return the least loaded group whose CPUs can be
  3391. * put to idle by rebalancing its tasks onto our group.
  3392. */
  3393. static struct sched_group *
  3394. find_busiest_group(struct sched_domain *sd, int this_cpu,
  3395. unsigned long *imbalance, enum cpu_idle_type idle,
  3396. int *sd_idle, const struct cpumask *cpus, int *balance)
  3397. {
  3398. struct sd_lb_stats sds;
  3399. memset(&sds, 0, sizeof(sds));
  3400. /*
  3401. * Compute the various statistics relavent for load balancing at
  3402. * this level.
  3403. */
  3404. update_sd_lb_stats(sd, this_cpu, idle, sd_idle, cpus,
  3405. balance, &sds);
  3406. /* Cases where imbalance does not exist from POV of this_cpu */
  3407. /* 1) this_cpu is not the appropriate cpu to perform load balancing
  3408. * at this level.
  3409. * 2) There is no busy sibling group to pull from.
  3410. * 3) This group is the busiest group.
  3411. * 4) This group is more busy than the avg busieness at this
  3412. * sched_domain.
  3413. * 5) The imbalance is within the specified limit.
  3414. * 6) Any rebalance would lead to ping-pong
  3415. */
  3416. if (balance && !(*balance))
  3417. goto ret;
  3418. if (!sds.busiest || sds.busiest_nr_running == 0)
  3419. goto out_balanced;
  3420. if (sds.this_load >= sds.max_load)
  3421. goto out_balanced;
  3422. sds.avg_load = (SCHED_LOAD_SCALE * sds.total_load) / sds.total_pwr;
  3423. if (sds.this_load >= sds.avg_load)
  3424. goto out_balanced;
  3425. if (100 * sds.max_load <= sd->imbalance_pct * sds.this_load)
  3426. goto out_balanced;
  3427. sds.busiest_load_per_task /= sds.busiest_nr_running;
  3428. if (sds.group_imb)
  3429. sds.busiest_load_per_task =
  3430. min(sds.busiest_load_per_task, sds.avg_load);
  3431. /*
  3432. * We're trying to get all the cpus to the average_load, so we don't
  3433. * want to push ourselves above the average load, nor do we wish to
  3434. * reduce the max loaded cpu below the average load, as either of these
  3435. * actions would just result in more rebalancing later, and ping-pong
  3436. * tasks around. Thus we look for the minimum possible imbalance.
  3437. * Negative imbalances (*we* are more loaded than anyone else) will
  3438. * be counted as no imbalance for these purposes -- we can't fix that
  3439. * by pulling tasks to us. Be careful of negative numbers as they'll
  3440. * appear as very large values with unsigned longs.
  3441. */
  3442. if (sds.max_load <= sds.busiest_load_per_task)
  3443. goto out_balanced;
  3444. /* Looks like there is an imbalance. Compute it */
  3445. calculate_imbalance(&sds, this_cpu, imbalance);
  3446. return sds.busiest;
  3447. out_balanced:
  3448. /*
  3449. * There is no obvious imbalance. But check if we can do some balancing
  3450. * to save power.
  3451. */
  3452. if (check_power_save_busiest_group(&sds, this_cpu, imbalance))
  3453. return sds.busiest;
  3454. ret:
  3455. *imbalance = 0;
  3456. return NULL;
  3457. }
  3458. /*
  3459. * find_busiest_queue - find the busiest runqueue among the cpus in group.
  3460. */
  3461. static struct rq *
  3462. find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle,
  3463. unsigned long imbalance, const struct cpumask *cpus)
  3464. {
  3465. struct rq *busiest = NULL, *rq;
  3466. unsigned long max_load = 0;
  3467. int i;
  3468. for_each_cpu(i, sched_group_cpus(group)) {
  3469. unsigned long wl;
  3470. if (!cpumask_test_cpu(i, cpus))
  3471. continue;
  3472. rq = cpu_rq(i);
  3473. wl = weighted_cpuload(i);
  3474. if (rq->nr_running == 1 && wl > imbalance)
  3475. continue;
  3476. if (wl > max_load) {
  3477. max_load = wl;
  3478. busiest = rq;
  3479. }
  3480. }
  3481. return busiest;
  3482. }
  3483. /*
  3484. * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
  3485. * so long as it is large enough.
  3486. */
  3487. #define MAX_PINNED_INTERVAL 512
  3488. /* Working cpumask for load_balance and load_balance_newidle. */
  3489. static DEFINE_PER_CPU(cpumask_var_t, load_balance_tmpmask);
  3490. /*
  3491. * Check this_cpu to ensure it is balanced within domain. Attempt to move
  3492. * tasks if there is an imbalance.
  3493. */
  3494. static int load_balance(int this_cpu, struct rq *this_rq,
  3495. struct sched_domain *sd, enum cpu_idle_type idle,
  3496. int *balance)
  3497. {
  3498. int ld_moved, all_pinned = 0, active_balance = 0, sd_idle = 0;
  3499. struct sched_group *group;
  3500. unsigned long imbalance;
  3501. struct rq *busiest;
  3502. unsigned long flags;
  3503. struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);
  3504. cpumask_setall(cpus);
  3505. /*
  3506. * When power savings policy is enabled for the parent domain, idle
  3507. * sibling can pick up load irrespective of busy siblings. In this case,
  3508. * let the state of idle sibling percolate up as CPU_IDLE, instead of
  3509. * portraying it as CPU_NOT_IDLE.
  3510. */
  3511. if (idle != CPU_NOT_IDLE && sd->flags & SD_SHARE_CPUPOWER &&
  3512. !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
  3513. sd_idle = 1;
  3514. schedstat_inc(sd, lb_count[idle]);
  3515. redo:
  3516. update_shares(sd);
  3517. group = find_busiest_group(sd, this_cpu, &imbalance, idle, &sd_idle,
  3518. cpus, balance);
  3519. if (*balance == 0)
  3520. goto out_balanced;
  3521. if (!group) {
  3522. schedstat_inc(sd, lb_nobusyg[idle]);
  3523. goto out_balanced;
  3524. }
  3525. busiest = find_busiest_queue(group, idle, imbalance, cpus);
  3526. if (!busiest) {
  3527. schedstat_inc(sd, lb_nobusyq[idle]);
  3528. goto out_balanced;
  3529. }
  3530. BUG_ON(busiest == this_rq);
  3531. schedstat_add(sd, lb_imbalance[idle], imbalance);
  3532. ld_moved = 0;
  3533. if (busiest->nr_running > 1) {
  3534. /*
  3535. * Attempt to move tasks. If find_busiest_group has found
  3536. * an imbalance but busiest->nr_running <= 1, the group is
  3537. * still unbalanced. ld_moved simply stays zero, so it is
  3538. * correctly treated as an imbalance.
  3539. */
  3540. local_irq_save(flags);
  3541. double_rq_lock(this_rq, busiest);
  3542. ld_moved = move_tasks(this_rq, this_cpu, busiest,
  3543. imbalance, sd, idle, &all_pinned);
  3544. double_rq_unlock(this_rq, busiest);
  3545. local_irq_restore(flags);
  3546. /*
  3547. * some other cpu did the load balance for us.
  3548. */
  3549. if (ld_moved && this_cpu != smp_processor_id())
  3550. resched_cpu(this_cpu);
  3551. /* All tasks on this runqueue were pinned by CPU affinity */
  3552. if (unlikely(all_pinned)) {
  3553. cpumask_clear_cpu(cpu_of(busiest), cpus);
  3554. if (!cpumask_empty(cpus))
  3555. goto redo;
  3556. goto out_balanced;
  3557. }
  3558. }
  3559. if (!ld_moved) {
  3560. schedstat_inc(sd, lb_failed[idle]);
  3561. sd->nr_balance_failed++;
  3562. if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) {
  3563. spin_lock_irqsave(&busiest->lock, flags);
  3564. /* don't kick the migration_thread, if the curr
  3565. * task on busiest cpu can't be moved to this_cpu
  3566. */
  3567. if (!cpumask_test_cpu(this_cpu,
  3568. &busiest->curr->cpus_allowed)) {
  3569. spin_unlock_irqrestore(&busiest->lock, flags);
  3570. all_pinned = 1;
  3571. goto out_one_pinned;
  3572. }
  3573. if (!busiest->active_balance) {
  3574. busiest->active_balance = 1;
  3575. busiest->push_cpu = this_cpu;
  3576. active_balance = 1;
  3577. }
  3578. spin_unlock_irqrestore(&busiest->lock, flags);
  3579. if (active_balance)
  3580. wake_up_process(busiest->migration_thread);
  3581. /*
  3582. * We've kicked active balancing, reset the failure
  3583. * counter.
  3584. */
  3585. sd->nr_balance_failed = sd->cache_nice_tries+1;
  3586. }
  3587. } else
  3588. sd->nr_balance_failed = 0;
  3589. if (likely(!active_balance)) {
  3590. /* We were unbalanced, so reset the balancing interval */
  3591. sd->balance_interval = sd->min_interval;
  3592. } else {
  3593. /*
  3594. * If we've begun active balancing, start to back off. This
  3595. * case may not be covered by the all_pinned logic if there
  3596. * is only 1 task on the busy runqueue (because we don't call
  3597. * move_tasks).
  3598. */
  3599. if (sd->balance_interval < sd->max_interval)
  3600. sd->balance_interval *= 2;
  3601. }
  3602. if (!ld_moved && !sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
  3603. !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
  3604. ld_moved = -1;
  3605. goto out;
  3606. out_balanced:
  3607. schedstat_inc(sd, lb_balanced[idle]);
  3608. sd->nr_balance_failed = 0;
  3609. out_one_pinned:
  3610. /* tune up the balancing interval */
  3611. if ((all_pinned && sd->balance_interval < MAX_PINNED_INTERVAL) ||
  3612. (sd->balance_interval < sd->max_interval))
  3613. sd->balance_interval *= 2;
  3614. if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
  3615. !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
  3616. ld_moved = -1;
  3617. else
  3618. ld_moved = 0;
  3619. out:
  3620. if (ld_moved)
  3621. update_shares(sd);
  3622. return ld_moved;
  3623. }
  3624. /*
  3625. * Check this_cpu to ensure it is balanced within domain. Attempt to move
  3626. * tasks if there is an imbalance.
  3627. *
  3628. * Called from schedule when this_rq is about to become idle (CPU_NEWLY_IDLE).
  3629. * this_rq is locked.
  3630. */
  3631. static int
  3632. load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd)
  3633. {
  3634. struct sched_group *group;
  3635. struct rq *busiest = NULL;
  3636. unsigned long imbalance;
  3637. int ld_moved = 0;
  3638. int sd_idle = 0;
  3639. int all_pinned = 0;
  3640. struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);
  3641. cpumask_setall(cpus);
  3642. /*
  3643. * When power savings policy is enabled for the parent domain, idle
  3644. * sibling can pick up load irrespective of busy siblings. In this case,
  3645. * let the state of idle sibling percolate up as IDLE, instead of
  3646. * portraying it as CPU_NOT_IDLE.
  3647. */
  3648. if (sd->flags & SD_SHARE_CPUPOWER &&
  3649. !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
  3650. sd_idle = 1;
  3651. schedstat_inc(sd, lb_count[CPU_NEWLY_IDLE]);
  3652. redo:
  3653. update_shares_locked(this_rq, sd);
  3654. group = find_busiest_group(sd, this_cpu, &imbalance, CPU_NEWLY_IDLE,
  3655. &sd_idle, cpus, NULL);
  3656. if (!group) {
  3657. schedstat_inc(sd, lb_nobusyg[CPU_NEWLY_IDLE]);
  3658. goto out_balanced;
  3659. }
  3660. busiest = find_busiest_queue(group, CPU_NEWLY_IDLE, imbalance, cpus);
  3661. if (!busiest) {
  3662. schedstat_inc(sd, lb_nobusyq[CPU_NEWLY_IDLE]);
  3663. goto out_balanced;
  3664. }
  3665. BUG_ON(busiest == this_rq);
  3666. schedstat_add(sd, lb_imbalance[CPU_NEWLY_IDLE], imbalance);
  3667. ld_moved = 0;
  3668. if (busiest->nr_running > 1) {
  3669. /* Attempt to move tasks */
  3670. double_lock_balance(this_rq, busiest);
  3671. /* this_rq->clock is already updated */
  3672. update_rq_clock(busiest);
  3673. ld_moved = move_tasks(this_rq, this_cpu, busiest,
  3674. imbalance, sd, CPU_NEWLY_IDLE,
  3675. &all_pinned);
  3676. double_unlock_balance(this_rq, busiest);
  3677. if (unlikely(all_pinned)) {
  3678. cpumask_clear_cpu(cpu_of(busiest), cpus);
  3679. if (!cpumask_empty(cpus))
  3680. goto redo;
  3681. }
  3682. }
  3683. if (!ld_moved) {
  3684. int active_balance = 0;
  3685. schedstat_inc(sd, lb_failed[CPU_NEWLY_IDLE]);
  3686. if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
  3687. !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
  3688. return -1;
  3689. if (sched_mc_power_savings < POWERSAVINGS_BALANCE_WAKEUP)
  3690. return -1;
  3691. if (sd->nr_balance_failed++ < 2)
  3692. return -1;
  3693. /*
  3694. * The only task running in a non-idle cpu can be moved to this
  3695. * cpu in an attempt to completely freeup the other CPU
  3696. * package. The same method used to move task in load_balance()
  3697. * have been extended for load_balance_newidle() to speedup
  3698. * consolidation at sched_mc=POWERSAVINGS_BALANCE_WAKEUP (2)
  3699. *
  3700. * The package power saving logic comes from
  3701. * find_busiest_group(). If there are no imbalance, then
  3702. * f_b_g() will return NULL. However when sched_mc={1,2} then
  3703. * f_b_g() will select a group from which a running task may be
  3704. * pulled to this cpu in order to make the other package idle.
  3705. * If there is no opportunity to make a package idle and if
  3706. * there are no imbalance, then f_b_g() will return NULL and no
  3707. * action will be taken in load_balance_newidle().
  3708. *
  3709. * Under normal task pull operation due to imbalance, there
  3710. * will be more than one task in the source run queue and
  3711. * move_tasks() will succeed. ld_moved will be true and this
  3712. * active balance code will not be triggered.
  3713. */
  3714. /* Lock busiest in correct order while this_rq is held */
  3715. double_lock_balance(this_rq, busiest);
  3716. /*
  3717. * don't kick the migration_thread, if the curr
  3718. * task on busiest cpu can't be moved to this_cpu
  3719. */
  3720. if (!cpumask_test_cpu(this_cpu, &busiest->curr->cpus_allowed)) {
  3721. double_unlock_balance(this_rq, busiest);
  3722. all_pinned = 1;
  3723. return ld_moved;
  3724. }
  3725. if (!busiest->active_balance) {
  3726. busiest->active_balance = 1;
  3727. busiest->push_cpu = this_cpu;
  3728. active_balance = 1;
  3729. }
  3730. double_unlock_balance(this_rq, busiest);
  3731. /*
  3732. * Should not call ttwu while holding a rq->lock
  3733. */
  3734. spin_unlock(&this_rq->lock);
  3735. if (active_balance)
  3736. wake_up_process(busiest->migration_thread);
  3737. spin_lock(&this_rq->lock);
  3738. } else
  3739. sd->nr_balance_failed = 0;
  3740. update_shares_locked(this_rq, sd);
  3741. return ld_moved;
  3742. out_balanced:
  3743. schedstat_inc(sd, lb_balanced[CPU_NEWLY_IDLE]);
  3744. if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
  3745. !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
  3746. return -1;
  3747. sd->nr_balance_failed = 0;
  3748. return 0;
  3749. }
  3750. /*
  3751. * idle_balance is called by schedule() if this_cpu is about to become
  3752. * idle. Attempts to pull tasks from other CPUs.
  3753. */
  3754. static void idle_balance(int this_cpu, struct rq *this_rq)
  3755. {
  3756. struct sched_domain *sd;
  3757. int pulled_task = 0;
  3758. unsigned long next_balance = jiffies + HZ;
  3759. for_each_domain(this_cpu, sd) {
  3760. unsigned long interval;
  3761. if (!(sd->flags & SD_LOAD_BALANCE))
  3762. continue;
  3763. if (sd->flags & SD_BALANCE_NEWIDLE)
  3764. /* If we've pulled tasks over stop searching: */
  3765. pulled_task = load_balance_newidle(this_cpu, this_rq,
  3766. sd);
  3767. interval = msecs_to_jiffies(sd->balance_interval);
  3768. if (time_after(next_balance, sd->last_balance + interval))
  3769. next_balance = sd->last_balance + interval;
  3770. if (pulled_task)
  3771. break;
  3772. }
  3773. if (pulled_task || time_after(jiffies, this_rq->next_balance)) {
  3774. /*
  3775. * We are going idle. next_balance may be set based on
  3776. * a busy processor. So reset next_balance.
  3777. */
  3778. this_rq->next_balance = next_balance;
  3779. }
  3780. }
  3781. /*
  3782. * active_load_balance is run by migration threads. It pushes running tasks
  3783. * off the busiest CPU onto idle CPUs. It requires at least 1 task to be
  3784. * running on each physical CPU where possible, and avoids physical /
  3785. * logical imbalances.
  3786. *
  3787. * Called with busiest_rq locked.
  3788. */
  3789. static void active_load_balance(struct rq *busiest_rq, int busiest_cpu)
  3790. {
  3791. int target_cpu = busiest_rq->push_cpu;
  3792. struct sched_domain *sd;
  3793. struct rq *target_rq;
  3794. /* Is there any task to move? */
  3795. if (busiest_rq->nr_running <= 1)
  3796. return;
  3797. target_rq = cpu_rq(target_cpu);
  3798. /*
  3799. * This condition is "impossible", if it occurs
  3800. * we need to fix it. Originally reported by
  3801. * Bjorn Helgaas on a 128-cpu setup.
  3802. */
  3803. BUG_ON(busiest_rq == target_rq);
  3804. /* move a task from busiest_rq to target_rq */
  3805. double_lock_balance(busiest_rq, target_rq);
  3806. update_rq_clock(busiest_rq);
  3807. update_rq_clock(target_rq);
  3808. /* Search for an sd spanning us and the target CPU. */
  3809. for_each_domain(target_cpu, sd) {
  3810. if ((sd->flags & SD_LOAD_BALANCE) &&
  3811. cpumask_test_cpu(busiest_cpu, sched_domain_span(sd)))
  3812. break;
  3813. }
  3814. if (likely(sd)) {
  3815. schedstat_inc(sd, alb_count);
  3816. if (move_one_task(target_rq, target_cpu, busiest_rq,
  3817. sd, CPU_IDLE))
  3818. schedstat_inc(sd, alb_pushed);
  3819. else
  3820. schedstat_inc(sd, alb_failed);
  3821. }
  3822. double_unlock_balance(busiest_rq, target_rq);
  3823. }
  3824. #ifdef CONFIG_NO_HZ
  3825. static struct {
  3826. atomic_t load_balancer;
  3827. cpumask_var_t cpu_mask;
  3828. cpumask_var_t ilb_grp_nohz_mask;
  3829. } nohz ____cacheline_aligned = {
  3830. .load_balancer = ATOMIC_INIT(-1),
  3831. };
  3832. int get_nohz_load_balancer(void)
  3833. {
  3834. return atomic_read(&nohz.load_balancer);
  3835. }
  3836. #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
  3837. /**
  3838. * lowest_flag_domain - Return lowest sched_domain containing flag.
  3839. * @cpu: The cpu whose lowest level of sched domain is to
  3840. * be returned.
  3841. * @flag: The flag to check for the lowest sched_domain
  3842. * for the given cpu.
  3843. *
  3844. * Returns the lowest sched_domain of a cpu which contains the given flag.
  3845. */
  3846. static inline struct sched_domain *lowest_flag_domain(int cpu, int flag)
  3847. {
  3848. struct sched_domain *sd;
  3849. for_each_domain(cpu, sd)
  3850. if (sd && (sd->flags & flag))
  3851. break;
  3852. return sd;
  3853. }
  3854. /**
  3855. * for_each_flag_domain - Iterates over sched_domains containing the flag.
  3856. * @cpu: The cpu whose domains we're iterating over.
  3857. * @sd: variable holding the value of the power_savings_sd
  3858. * for cpu.
  3859. * @flag: The flag to filter the sched_domains to be iterated.
  3860. *
  3861. * Iterates over all the scheduler domains for a given cpu that has the 'flag'
  3862. * set, starting from the lowest sched_domain to the highest.
  3863. */
  3864. #define for_each_flag_domain(cpu, sd, flag) \
  3865. for (sd = lowest_flag_domain(cpu, flag); \
  3866. (sd && (sd->flags & flag)); sd = sd->parent)
  3867. /**
  3868. * is_semi_idle_group - Checks if the given sched_group is semi-idle.
  3869. * @ilb_group: group to be checked for semi-idleness
  3870. *
  3871. * Returns: 1 if the group is semi-idle. 0 otherwise.
  3872. *
  3873. * We define a sched_group to be semi idle if it has atleast one idle-CPU
  3874. * and atleast one non-idle CPU. This helper function checks if the given
  3875. * sched_group is semi-idle or not.
  3876. */
  3877. static inline int is_semi_idle_group(struct sched_group *ilb_group)
  3878. {
  3879. cpumask_and(nohz.ilb_grp_nohz_mask, nohz.cpu_mask,
  3880. sched_group_cpus(ilb_group));
  3881. /*
  3882. * A sched_group is semi-idle when it has atleast one busy cpu
  3883. * and atleast one idle cpu.
  3884. */
  3885. if (cpumask_empty(nohz.ilb_grp_nohz_mask))
  3886. return 0;
  3887. if (cpumask_equal(nohz.ilb_grp_nohz_mask, sched_group_cpus(ilb_group)))
  3888. return 0;
  3889. return 1;
  3890. }
  3891. /**
  3892. * find_new_ilb - Finds the optimum idle load balancer for nomination.
  3893. * @cpu: The cpu which is nominating a new idle_load_balancer.
  3894. *
  3895. * Returns: Returns the id of the idle load balancer if it exists,
  3896. * Else, returns >= nr_cpu_ids.
  3897. *
  3898. * This algorithm picks the idle load balancer such that it belongs to a
  3899. * semi-idle powersavings sched_domain. The idea is to try and avoid
  3900. * completely idle packages/cores just for the purpose of idle load balancing
  3901. * when there are other idle cpu's which are better suited for that job.
  3902. */
  3903. static int find_new_ilb(int cpu)
  3904. {
  3905. struct sched_domain *sd;
  3906. struct sched_group *ilb_group;
  3907. /*
  3908. * Have idle load balancer selection from semi-idle packages only
  3909. * when power-aware load balancing is enabled
  3910. */
  3911. if (!(sched_smt_power_savings || sched_mc_power_savings))
  3912. goto out_done;
  3913. /*
  3914. * Optimize for the case when we have no idle CPUs or only one
  3915. * idle CPU. Don't walk the sched_domain hierarchy in such cases
  3916. */
  3917. if (cpumask_weight(nohz.cpu_mask) < 2)
  3918. goto out_done;
  3919. for_each_flag_domain(cpu, sd, SD_POWERSAVINGS_BALANCE) {
  3920. ilb_group = sd->groups;
  3921. do {
  3922. if (is_semi_idle_group(ilb_group))
  3923. return cpumask_first(nohz.ilb_grp_nohz_mask);
  3924. ilb_group = ilb_group->next;
  3925. } while (ilb_group != sd->groups);
  3926. }
  3927. out_done:
  3928. return cpumask_first(nohz.cpu_mask);
  3929. }
  3930. #else /* (CONFIG_SCHED_MC || CONFIG_SCHED_SMT) */
  3931. static inline int find_new_ilb(int call_cpu)
  3932. {
  3933. return cpumask_first(nohz.cpu_mask);
  3934. }
  3935. #endif
  3936. /*
  3937. * This routine will try to nominate the ilb (idle load balancing)
  3938. * owner among the cpus whose ticks are stopped. ilb owner will do the idle
  3939. * load balancing on behalf of all those cpus. If all the cpus in the system
  3940. * go into this tickless mode, then there will be no ilb owner (as there is
  3941. * no need for one) and all the cpus will sleep till the next wakeup event
  3942. * arrives...
  3943. *
  3944. * For the ilb owner, tick is not stopped. And this tick will be used
  3945. * for idle load balancing. ilb owner will still be part of
  3946. * nohz.cpu_mask..
  3947. *
  3948. * While stopping the tick, this cpu will become the ilb owner if there
  3949. * is no other owner. And will be the owner till that cpu becomes busy
  3950. * or if all cpus in the system stop their ticks at which point
  3951. * there is no need for ilb owner.
  3952. *
  3953. * When the ilb owner becomes busy, it nominates another owner, during the
  3954. * next busy scheduler_tick()
  3955. */
  3956. int select_nohz_load_balancer(int stop_tick)
  3957. {
  3958. int cpu = smp_processor_id();
  3959. if (stop_tick) {
  3960. cpu_rq(cpu)->in_nohz_recently = 1;
  3961. if (!cpu_active(cpu)) {
  3962. if (atomic_read(&nohz.load_balancer) != cpu)
  3963. return 0;
  3964. /*
  3965. * If we are going offline and still the leader,
  3966. * give up!
  3967. */
  3968. if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu)
  3969. BUG();
  3970. return 0;
  3971. }
  3972. cpumask_set_cpu(cpu, nohz.cpu_mask);
  3973. /* time for ilb owner also to sleep */
  3974. if (cpumask_weight(nohz.cpu_mask) == num_online_cpus()) {
  3975. if (atomic_read(&nohz.load_balancer) == cpu)
  3976. atomic_set(&nohz.load_balancer, -1);
  3977. return 0;
  3978. }
  3979. if (atomic_read(&nohz.load_balancer) == -1) {
  3980. /* make me the ilb owner */
  3981. if (atomic_cmpxchg(&nohz.load_balancer, -1, cpu) == -1)
  3982. return 1;
  3983. } else if (atomic_read(&nohz.load_balancer) == cpu) {
  3984. int new_ilb;
  3985. if (!(sched_smt_power_savings ||
  3986. sched_mc_power_savings))
  3987. return 1;
  3988. /*
  3989. * Check to see if there is a more power-efficient
  3990. * ilb.
  3991. */
  3992. new_ilb = find_new_ilb(cpu);
  3993. if (new_ilb < nr_cpu_ids && new_ilb != cpu) {
  3994. atomic_set(&nohz.load_balancer, -1);
  3995. resched_cpu(new_ilb);
  3996. return 0;
  3997. }
  3998. return 1;
  3999. }
  4000. } else {
  4001. if (!cpumask_test_cpu(cpu, nohz.cpu_mask))
  4002. return 0;
  4003. cpumask_clear_cpu(cpu, nohz.cpu_mask);
  4004. if (atomic_read(&nohz.load_balancer) == cpu)
  4005. if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu)
  4006. BUG();
  4007. }
  4008. return 0;
  4009. }
  4010. #endif
  4011. static DEFINE_SPINLOCK(balancing);
  4012. /*
  4013. * It checks each scheduling domain to see if it is due to be balanced,
  4014. * and initiates a balancing operation if so.
  4015. *
  4016. * Balancing parameters are set up in arch_init_sched_domains.
  4017. */
  4018. static void rebalance_domains(int cpu, enum cpu_idle_type idle)
  4019. {
  4020. int balance = 1;
  4021. struct rq *rq = cpu_rq(cpu);
  4022. unsigned long interval;
  4023. struct sched_domain *sd;
  4024. /* Earliest time when we have to do rebalance again */
  4025. unsigned long next_balance = jiffies + 60*HZ;
  4026. int update_next_balance = 0;
  4027. int need_serialize;
  4028. for_each_domain(cpu, sd) {
  4029. if (!(sd->flags & SD_LOAD_BALANCE))
  4030. continue;
  4031. interval = sd->balance_interval;
  4032. if (idle != CPU_IDLE)
  4033. interval *= sd->busy_factor;
  4034. /* scale ms to jiffies */
  4035. interval = msecs_to_jiffies(interval);
  4036. if (unlikely(!interval))
  4037. interval = 1;
  4038. if (interval > HZ*NR_CPUS/10)
  4039. interval = HZ*NR_CPUS/10;
  4040. need_serialize = sd->flags & SD_SERIALIZE;
  4041. if (need_serialize) {
  4042. if (!spin_trylock(&balancing))
  4043. goto out;
  4044. }
  4045. if (time_after_eq(jiffies, sd->last_balance + interval)) {
  4046. if (load_balance(cpu, rq, sd, idle, &balance)) {
  4047. /*
  4048. * We've pulled tasks over so either we're no
  4049. * longer idle, or one of our SMT siblings is
  4050. * not idle.
  4051. */
  4052. idle = CPU_NOT_IDLE;
  4053. }
  4054. sd->last_balance = jiffies;
  4055. }
  4056. if (need_serialize)
  4057. spin_unlock(&balancing);
  4058. out:
  4059. if (time_after(next_balance, sd->last_balance + interval)) {
  4060. next_balance = sd->last_balance + interval;
  4061. update_next_balance = 1;
  4062. }
  4063. /*
  4064. * Stop the load balance at this level. There is another
  4065. * CPU in our sched group which is doing load balancing more
  4066. * actively.
  4067. */
  4068. if (!balance)
  4069. break;
  4070. }
  4071. /*
  4072. * next_balance will be updated only when there is a need.
  4073. * When the cpu is attached to null domain for ex, it will not be
  4074. * updated.
  4075. */
  4076. if (likely(update_next_balance))
  4077. rq->next_balance = next_balance;
  4078. }
  4079. /*
  4080. * run_rebalance_domains is triggered when needed from the scheduler tick.
  4081. * In CONFIG_NO_HZ case, the idle load balance owner will do the
  4082. * rebalancing for all the cpus for whom scheduler ticks are stopped.
  4083. */
  4084. static void run_rebalance_domains(struct softirq_action *h)
  4085. {
  4086. int this_cpu = smp_processor_id();
  4087. struct rq *this_rq = cpu_rq(this_cpu);
  4088. enum cpu_idle_type idle = this_rq->idle_at_tick ?
  4089. CPU_IDLE : CPU_NOT_IDLE;
  4090. rebalance_domains(this_cpu, idle);
  4091. #ifdef CONFIG_NO_HZ
  4092. /*
  4093. * If this cpu is the owner for idle load balancing, then do the
  4094. * balancing on behalf of the other idle cpus whose ticks are
  4095. * stopped.
  4096. */
  4097. if (this_rq->idle_at_tick &&
  4098. atomic_read(&nohz.load_balancer) == this_cpu) {
  4099. struct rq *rq;
  4100. int balance_cpu;
  4101. for_each_cpu(balance_cpu, nohz.cpu_mask) {
  4102. if (balance_cpu == this_cpu)
  4103. continue;
  4104. /*
  4105. * If this cpu gets work to do, stop the load balancing
  4106. * work being done for other cpus. Next load
  4107. * balancing owner will pick it up.
  4108. */
  4109. if (need_resched())
  4110. break;
  4111. rebalance_domains(balance_cpu, CPU_IDLE);
  4112. rq = cpu_rq(balance_cpu);
  4113. if (time_after(this_rq->next_balance, rq->next_balance))
  4114. this_rq->next_balance = rq->next_balance;
  4115. }
  4116. }
  4117. #endif
  4118. }
  4119. static inline int on_null_domain(int cpu)
  4120. {
  4121. return !rcu_dereference(cpu_rq(cpu)->sd);
  4122. }
  4123. /*
  4124. * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
  4125. *
  4126. * In case of CONFIG_NO_HZ, this is the place where we nominate a new
  4127. * idle load balancing owner or decide to stop the periodic load balancing,
  4128. * if the whole system is idle.
  4129. */
  4130. static inline void trigger_load_balance(struct rq *rq, int cpu)
  4131. {
  4132. #ifdef CONFIG_NO_HZ
  4133. /*
  4134. * If we were in the nohz mode recently and busy at the current
  4135. * scheduler tick, then check if we need to nominate new idle
  4136. * load balancer.
  4137. */
  4138. if (rq->in_nohz_recently && !rq->idle_at_tick) {
  4139. rq->in_nohz_recently = 0;
  4140. if (atomic_read(&nohz.load_balancer) == cpu) {
  4141. cpumask_clear_cpu(cpu, nohz.cpu_mask);
  4142. atomic_set(&nohz.load_balancer, -1);
  4143. }
  4144. if (atomic_read(&nohz.load_balancer) == -1) {
  4145. int ilb = find_new_ilb(cpu);
  4146. if (ilb < nr_cpu_ids)
  4147. resched_cpu(ilb);
  4148. }
  4149. }
  4150. /*
  4151. * If this cpu is idle and doing idle load balancing for all the
  4152. * cpus with ticks stopped, is it time for that to stop?
  4153. */
  4154. if (rq->idle_at_tick && atomic_read(&nohz.load_balancer) == cpu &&
  4155. cpumask_weight(nohz.cpu_mask) == num_online_cpus()) {
  4156. resched_cpu(cpu);
  4157. return;
  4158. }
  4159. /*
  4160. * If this cpu is idle and the idle load balancing is done by
  4161. * someone else, then no need raise the SCHED_SOFTIRQ
  4162. */
  4163. if (rq->idle_at_tick && atomic_read(&nohz.load_balancer) != cpu &&
  4164. cpumask_test_cpu(cpu, nohz.cpu_mask))
  4165. return;
  4166. #endif
  4167. /* Don't need to rebalance while attached to NULL domain */
  4168. if (time_after_eq(jiffies, rq->next_balance) &&
  4169. likely(!on_null_domain(cpu)))
  4170. raise_softirq(SCHED_SOFTIRQ);
  4171. }
  4172. #else /* CONFIG_SMP */
  4173. /*
  4174. * on UP we do not need to balance between CPUs:
  4175. */
  4176. static inline void idle_balance(int cpu, struct rq *rq)
  4177. {
  4178. }
  4179. #endif
  4180. DEFINE_PER_CPU(struct kernel_stat, kstat);
  4181. EXPORT_PER_CPU_SYMBOL(kstat);
  4182. /*
  4183. * Return any ns on the sched_clock that have not yet been accounted in
  4184. * @p in case that task is currently running.
  4185. *
  4186. * Called with task_rq_lock() held on @rq.
  4187. */
  4188. static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq)
  4189. {
  4190. u64 ns = 0;
  4191. if (task_current(rq, p)) {
  4192. update_rq_clock(rq);
  4193. ns = rq->clock - p->se.exec_start;
  4194. if ((s64)ns < 0)
  4195. ns = 0;
  4196. }
  4197. return ns;
  4198. }
  4199. unsigned long long task_delta_exec(struct task_struct *p)
  4200. {
  4201. unsigned long flags;
  4202. struct rq *rq;
  4203. u64 ns = 0;
  4204. rq = task_rq_lock(p, &flags);
  4205. ns = do_task_delta_exec(p, rq);
  4206. task_rq_unlock(rq, &flags);
  4207. return ns;
  4208. }
  4209. /*
  4210. * Return accounted runtime for the task.
  4211. * In case the task is currently running, return the runtime plus current's
  4212. * pending runtime that have not been accounted yet.
  4213. */
  4214. unsigned long long task_sched_runtime(struct task_struct *p)
  4215. {
  4216. unsigned long flags;
  4217. struct rq *rq;
  4218. u64 ns = 0;
  4219. rq = task_rq_lock(p, &flags);
  4220. ns = p->se.sum_exec_runtime + do_task_delta_exec(p, rq);
  4221. task_rq_unlock(rq, &flags);
  4222. return ns;
  4223. }
  4224. /*
  4225. * Return sum_exec_runtime for the thread group.
  4226. * In case the task is currently running, return the sum plus current's
  4227. * pending runtime that have not been accounted yet.
  4228. *
  4229. * Note that the thread group might have other running tasks as well,
  4230. * so the return value not includes other pending runtime that other
  4231. * running tasks might have.
  4232. */
  4233. unsigned long long thread_group_sched_runtime(struct task_struct *p)
  4234. {
  4235. struct task_cputime totals;
  4236. unsigned long flags;
  4237. struct rq *rq;
  4238. u64 ns;
  4239. rq = task_rq_lock(p, &flags);
  4240. thread_group_cputime(p, &totals);
  4241. ns = totals.sum_exec_runtime + do_task_delta_exec(p, rq);
  4242. task_rq_unlock(rq, &flags);
  4243. return ns;
  4244. }
  4245. /*
  4246. * Account user cpu time to a process.
  4247. * @p: the process that the cpu time gets accounted to
  4248. * @cputime: the cpu time spent in user space since the last update
  4249. * @cputime_scaled: cputime scaled by cpu frequency
  4250. */
  4251. void account_user_time(struct task_struct *p, cputime_t cputime,
  4252. cputime_t cputime_scaled)
  4253. {
  4254. struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
  4255. cputime64_t tmp;
  4256. /* Add user time to process. */
  4257. p->utime = cputime_add(p->utime, cputime);
  4258. p->utimescaled = cputime_add(p->utimescaled, cputime_scaled);
  4259. account_group_user_time(p, cputime);
  4260. /* Add user time to cpustat. */
  4261. tmp = cputime_to_cputime64(cputime);
  4262. if (TASK_NICE(p) > 0)
  4263. cpustat->nice = cputime64_add(cpustat->nice, tmp);
  4264. else
  4265. cpustat->user = cputime64_add(cpustat->user, tmp);
  4266. cpuacct_update_stats(p, CPUACCT_STAT_USER, cputime);
  4267. /* Account for user time used */
  4268. acct_update_integrals(p);
  4269. }
  4270. /*
  4271. * Account guest cpu time to a process.
  4272. * @p: the process that the cpu time gets accounted to
  4273. * @cputime: the cpu time spent in virtual machine since the last update
  4274. * @cputime_scaled: cputime scaled by cpu frequency
  4275. */
  4276. static void account_guest_time(struct task_struct *p, cputime_t cputime,
  4277. cputime_t cputime_scaled)
  4278. {
  4279. cputime64_t tmp;
  4280. struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
  4281. tmp = cputime_to_cputime64(cputime);
  4282. /* Add guest time to process. */
  4283. p->utime = cputime_add(p->utime, cputime);
  4284. p->utimescaled = cputime_add(p->utimescaled, cputime_scaled);
  4285. account_group_user_time(p, cputime);
  4286. p->gtime = cputime_add(p->gtime, cputime);
  4287. /* Add guest time to cpustat. */
  4288. cpustat->user = cputime64_add(cpustat->user, tmp);
  4289. cpustat->guest = cputime64_add(cpustat->guest, tmp);
  4290. }
  4291. /*
  4292. * Account system cpu time to a process.
  4293. * @p: the process that the cpu time gets accounted to
  4294. * @hardirq_offset: the offset to subtract from hardirq_count()
  4295. * @cputime: the cpu time spent in kernel space since the last update
  4296. * @cputime_scaled: cputime scaled by cpu frequency
  4297. */
  4298. void account_system_time(struct task_struct *p, int hardirq_offset,
  4299. cputime_t cputime, cputime_t cputime_scaled)
  4300. {
  4301. struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
  4302. cputime64_t tmp;
  4303. if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) {
  4304. account_guest_time(p, cputime, cputime_scaled);
  4305. return;
  4306. }
  4307. /* Add system time to process. */
  4308. p->stime = cputime_add(p->stime, cputime);
  4309. p->stimescaled = cputime_add(p->stimescaled, cputime_scaled);
  4310. account_group_system_time(p, cputime);
  4311. /* Add system time to cpustat. */
  4312. tmp = cputime_to_cputime64(cputime);
  4313. if (hardirq_count() - hardirq_offset)
  4314. cpustat->irq = cputime64_add(cpustat->irq, tmp);
  4315. else if (softirq_count())
  4316. cpustat->softirq = cputime64_add(cpustat->softirq, tmp);
  4317. else
  4318. cpustat->system = cputime64_add(cpustat->system, tmp);
  4319. cpuacct_update_stats(p, CPUACCT_STAT_SYSTEM, cputime);
  4320. /* Account for system time used */
  4321. acct_update_integrals(p);
  4322. }
  4323. /*
  4324. * Account for involuntary wait time.
  4325. * @steal: the cpu time spent in involuntary wait
  4326. */
  4327. void account_steal_time(cputime_t cputime)
  4328. {
  4329. struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
  4330. cputime64_t cputime64 = cputime_to_cputime64(cputime);
  4331. cpustat->steal = cputime64_add(cpustat->steal, cputime64);
  4332. }
  4333. /*
  4334. * Account for idle time.
  4335. * @cputime: the cpu time spent in idle wait
  4336. */
  4337. void account_idle_time(cputime_t cputime)
  4338. {
  4339. struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
  4340. cputime64_t cputime64 = cputime_to_cputime64(cputime);
  4341. struct rq *rq = this_rq();
  4342. if (atomic_read(&rq->nr_iowait) > 0)
  4343. cpustat->iowait = cputime64_add(cpustat->iowait, cputime64);
  4344. else
  4345. cpustat->idle = cputime64_add(cpustat->idle, cputime64);
  4346. }
  4347. #ifndef CONFIG_VIRT_CPU_ACCOUNTING
  4348. /*
  4349. * Account a single tick of cpu time.
  4350. * @p: the process that the cpu time gets accounted to
  4351. * @user_tick: indicates if the tick is a user or a system tick
  4352. */
  4353. void account_process_tick(struct task_struct *p, int user_tick)
  4354. {
  4355. cputime_t one_jiffy = jiffies_to_cputime(1);
  4356. cputime_t one_jiffy_scaled = cputime_to_scaled(one_jiffy);
  4357. struct rq *rq = this_rq();
  4358. if (user_tick)
  4359. account_user_time(p, one_jiffy, one_jiffy_scaled);
  4360. else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET))
  4361. account_system_time(p, HARDIRQ_OFFSET, one_jiffy,
  4362. one_jiffy_scaled);
  4363. else
  4364. account_idle_time(one_jiffy);
  4365. }
  4366. /*
  4367. * Account multiple ticks of steal time.
  4368. * @p: the process from which the cpu time has been stolen
  4369. * @ticks: number of stolen ticks
  4370. */
  4371. void account_steal_ticks(unsigned long ticks)
  4372. {
  4373. account_steal_time(jiffies_to_cputime(ticks));
  4374. }
  4375. /*
  4376. * Account multiple ticks of idle time.
  4377. * @ticks: number of stolen ticks
  4378. */
  4379. void account_idle_ticks(unsigned long ticks)
  4380. {
  4381. account_idle_time(jiffies_to_cputime(ticks));
  4382. }
  4383. #endif
  4384. /*
  4385. * Use precise platform statistics if available:
  4386. */
  4387. #ifdef CONFIG_VIRT_CPU_ACCOUNTING
  4388. cputime_t task_utime(struct task_struct *p)
  4389. {
  4390. return p->utime;
  4391. }
  4392. cputime_t task_stime(struct task_struct *p)
  4393. {
  4394. return p->stime;
  4395. }
  4396. #else
  4397. cputime_t task_utime(struct task_struct *p)
  4398. {
  4399. clock_t utime = cputime_to_clock_t(p->utime),
  4400. total = utime + cputime_to_clock_t(p->stime);
  4401. u64 temp;
  4402. /*
  4403. * Use CFS's precise accounting:
  4404. */
  4405. temp = (u64)nsec_to_clock_t(p->se.sum_exec_runtime);
  4406. if (total) {
  4407. temp *= utime;
  4408. do_div(temp, total);
  4409. }
  4410. utime = (clock_t)temp;
  4411. p->prev_utime = max(p->prev_utime, clock_t_to_cputime(utime));
  4412. return p->prev_utime;
  4413. }
  4414. cputime_t task_stime(struct task_struct *p)
  4415. {
  4416. clock_t stime;
  4417. /*
  4418. * Use CFS's precise accounting. (we subtract utime from
  4419. * the total, to make sure the total observed by userspace
  4420. * grows monotonically - apps rely on that):
  4421. */
  4422. stime = nsec_to_clock_t(p->se.sum_exec_runtime) -
  4423. cputime_to_clock_t(task_utime(p));
  4424. if (stime >= 0)
  4425. p->prev_stime = max(p->prev_stime, clock_t_to_cputime(stime));
  4426. return p->prev_stime;
  4427. }
  4428. #endif
  4429. inline cputime_t task_gtime(struct task_struct *p)
  4430. {
  4431. return p->gtime;
  4432. }
  4433. /*
  4434. * This function gets called by the timer code, with HZ frequency.
  4435. * We call it with interrupts disabled.
  4436. *
  4437. * It also gets called by the fork code, when changing the parent's
  4438. * timeslices.
  4439. */
  4440. void scheduler_tick(void)
  4441. {
  4442. int cpu = smp_processor_id();
  4443. struct rq *rq = cpu_rq(cpu);
  4444. struct task_struct *curr = rq->curr;
  4445. sched_clock_tick();
  4446. spin_lock(&rq->lock);
  4447. update_rq_clock(rq);
  4448. update_cpu_load(rq);
  4449. curr->sched_class->task_tick(rq, curr, 0);
  4450. spin_unlock(&rq->lock);
  4451. perf_counter_task_tick(curr, cpu);
  4452. #ifdef CONFIG_SMP
  4453. rq->idle_at_tick = idle_cpu(cpu);
  4454. trigger_load_balance(rq, cpu);
  4455. #endif
  4456. }
  4457. notrace unsigned long get_parent_ip(unsigned long addr)
  4458. {
  4459. if (in_lock_functions(addr)) {
  4460. addr = CALLER_ADDR2;
  4461. if (in_lock_functions(addr))
  4462. addr = CALLER_ADDR3;
  4463. }
  4464. return addr;
  4465. }
  4466. #if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
  4467. defined(CONFIG_PREEMPT_TRACER))
  4468. void __kprobes add_preempt_count(int val)
  4469. {
  4470. #ifdef CONFIG_DEBUG_PREEMPT
  4471. /*
  4472. * Underflow?
  4473. */
  4474. if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
  4475. return;
  4476. #endif
  4477. preempt_count() += val;
  4478. #ifdef CONFIG_DEBUG_PREEMPT
  4479. /*
  4480. * Spinlock count overflowing soon?
  4481. */
  4482. DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
  4483. PREEMPT_MASK - 10);
  4484. #endif
  4485. if (preempt_count() == val)
  4486. trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
  4487. }
  4488. EXPORT_SYMBOL(add_preempt_count);
  4489. void __kprobes sub_preempt_count(int val)
  4490. {
  4491. #ifdef CONFIG_DEBUG_PREEMPT
  4492. /*
  4493. * Underflow?
  4494. */
  4495. if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
  4496. return;
  4497. /*
  4498. * Is the spinlock portion underflowing?
  4499. */
  4500. if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
  4501. !(preempt_count() & PREEMPT_MASK)))
  4502. return;
  4503. #endif
  4504. if (preempt_count() == val)
  4505. trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
  4506. preempt_count() -= val;
  4507. }
  4508. EXPORT_SYMBOL(sub_preempt_count);
  4509. #endif
  4510. /*
  4511. * Print scheduling while atomic bug:
  4512. */
  4513. static noinline void __schedule_bug(struct task_struct *prev)
  4514. {
  4515. struct pt_regs *regs = get_irq_regs();
  4516. printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
  4517. prev->comm, prev->pid, preempt_count());
  4518. debug_show_held_locks(prev);
  4519. print_modules();
  4520. if (irqs_disabled())
  4521. print_irqtrace_events(prev);
  4522. if (regs)
  4523. show_regs(regs);
  4524. else
  4525. dump_stack();
  4526. }
  4527. /*
  4528. * Various schedule()-time debugging checks and statistics:
  4529. */
  4530. static inline void schedule_debug(struct task_struct *prev)
  4531. {
  4532. /*
  4533. * Test if we are atomic. Since do_exit() needs to call into
  4534. * schedule() atomically, we ignore that path for now.
  4535. * Otherwise, whine if we are scheduling when we should not be.
  4536. */
  4537. if (unlikely(in_atomic_preempt_off() && !prev->exit_state))
  4538. __schedule_bug(prev);
  4539. profile_hit(SCHED_PROFILING, __builtin_return_address(0));
  4540. schedstat_inc(this_rq(), sched_count);
  4541. #ifdef CONFIG_SCHEDSTATS
  4542. if (unlikely(prev->lock_depth >= 0)) {
  4543. schedstat_inc(this_rq(), bkl_count);
  4544. schedstat_inc(prev, sched_info.bkl_count);
  4545. }
  4546. #endif
  4547. }
  4548. static void put_prev_task(struct rq *rq, struct task_struct *prev)
  4549. {
  4550. if (prev->state == TASK_RUNNING) {
  4551. u64 runtime = prev->se.sum_exec_runtime;
  4552. runtime -= prev->se.prev_sum_exec_runtime;
  4553. runtime = min_t(u64, runtime, 2*sysctl_sched_migration_cost);
  4554. /*
  4555. * In order to avoid avg_overlap growing stale when we are
  4556. * indeed overlapping and hence not getting put to sleep, grow
  4557. * the avg_overlap on preemption.
  4558. *
  4559. * We use the average preemption runtime because that
  4560. * correlates to the amount of cache footprint a task can
  4561. * build up.
  4562. */
  4563. update_avg(&prev->se.avg_overlap, runtime);
  4564. }
  4565. prev->sched_class->put_prev_task(rq, prev);
  4566. }
  4567. /*
  4568. * Pick up the highest-prio task:
  4569. */
  4570. static inline struct task_struct *
  4571. pick_next_task(struct rq *rq)
  4572. {
  4573. const struct sched_class *class;
  4574. struct task_struct *p;
  4575. /*
  4576. * Optimization: we know that if all tasks are in
  4577. * the fair class we can call that function directly:
  4578. */
  4579. if (likely(rq->nr_running == rq->cfs.nr_running)) {
  4580. p = fair_sched_class.pick_next_task(rq);
  4581. if (likely(p))
  4582. return p;
  4583. }
  4584. class = sched_class_highest;
  4585. for ( ; ; ) {
  4586. p = class->pick_next_task(rq);
  4587. if (p)
  4588. return p;
  4589. /*
  4590. * Will never be NULL as the idle class always
  4591. * returns a non-NULL p:
  4592. */
  4593. class = class->next;
  4594. }
  4595. }
  4596. /*
  4597. * schedule() is the main scheduler function.
  4598. */
  4599. asmlinkage void __sched schedule(void)
  4600. {
  4601. struct task_struct *prev, *next;
  4602. unsigned long *switch_count;
  4603. struct rq *rq;
  4604. int cpu;
  4605. need_resched:
  4606. preempt_disable();
  4607. cpu = smp_processor_id();
  4608. rq = cpu_rq(cpu);
  4609. rcu_qsctr_inc(cpu);
  4610. prev = rq->curr;
  4611. switch_count = &prev->nivcsw;
  4612. release_kernel_lock(prev);
  4613. need_resched_nonpreemptible:
  4614. schedule_debug(prev);
  4615. if (sched_feat(HRTICK))
  4616. hrtick_clear(rq);
  4617. spin_lock_irq(&rq->lock);
  4618. update_rq_clock(rq);
  4619. clear_tsk_need_resched(prev);
  4620. if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
  4621. if (unlikely(signal_pending_state(prev->state, prev)))
  4622. prev->state = TASK_RUNNING;
  4623. else
  4624. deactivate_task(rq, prev, 1);
  4625. switch_count = &prev->nvcsw;
  4626. }
  4627. pre_schedule(rq, prev);
  4628. if (unlikely(!rq->nr_running))
  4629. idle_balance(cpu, rq);
  4630. put_prev_task(rq, prev);
  4631. next = pick_next_task(rq);
  4632. if (likely(prev != next)) {
  4633. sched_info_switch(prev, next);
  4634. perf_counter_task_sched_out(prev, next, cpu);
  4635. rq->nr_switches++;
  4636. rq->curr = next;
  4637. ++*switch_count;
  4638. context_switch(rq, prev, next); /* unlocks the rq */
  4639. /*
  4640. * the context switch might have flipped the stack from under
  4641. * us, hence refresh the local variables.
  4642. */
  4643. cpu = smp_processor_id();
  4644. rq = cpu_rq(cpu);
  4645. } else
  4646. spin_unlock_irq(&rq->lock);
  4647. post_schedule(rq);
  4648. if (unlikely(reacquire_kernel_lock(current) < 0))
  4649. goto need_resched_nonpreemptible;
  4650. preempt_enable_no_resched();
  4651. if (need_resched())
  4652. goto need_resched;
  4653. }
  4654. EXPORT_SYMBOL(schedule);
  4655. #ifdef CONFIG_SMP
  4656. /*
  4657. * Look out! "owner" is an entirely speculative pointer
  4658. * access and not reliable.
  4659. */
  4660. int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
  4661. {
  4662. unsigned int cpu;
  4663. struct rq *rq;
  4664. if (!sched_feat(OWNER_SPIN))
  4665. return 0;
  4666. #ifdef CONFIG_DEBUG_PAGEALLOC
  4667. /*
  4668. * Need to access the cpu field knowing that
  4669. * DEBUG_PAGEALLOC could have unmapped it if
  4670. * the mutex owner just released it and exited.
  4671. */
  4672. if (probe_kernel_address(&owner->cpu, cpu))
  4673. goto out;
  4674. #else
  4675. cpu = owner->cpu;
  4676. #endif
  4677. /*
  4678. * Even if the access succeeded (likely case),
  4679. * the cpu field may no longer be valid.
  4680. */
  4681. if (cpu >= nr_cpumask_bits)
  4682. goto out;
  4683. /*
  4684. * We need to validate that we can do a
  4685. * get_cpu() and that we have the percpu area.
  4686. */
  4687. if (!cpu_online(cpu))
  4688. goto out;
  4689. rq = cpu_rq(cpu);
  4690. for (;;) {
  4691. /*
  4692. * Owner changed, break to re-assess state.
  4693. */
  4694. if (lock->owner != owner)
  4695. break;
  4696. /*
  4697. * Is that owner really running on that cpu?
  4698. */
  4699. if (task_thread_info(rq->curr) != owner || need_resched())
  4700. return 0;
  4701. cpu_relax();
  4702. }
  4703. out:
  4704. return 1;
  4705. }
  4706. #endif
  4707. #ifdef CONFIG_PREEMPT
  4708. /*
  4709. * this is the entry point to schedule() from in-kernel preemption
  4710. * off of preempt_enable. Kernel preemptions off return from interrupt
  4711. * occur there and call schedule directly.
  4712. */
  4713. asmlinkage void __sched preempt_schedule(void)
  4714. {
  4715. struct thread_info *ti = current_thread_info();
  4716. /*
  4717. * If there is a non-zero preempt_count or interrupts are disabled,
  4718. * we do not want to preempt the current task. Just return..
  4719. */
  4720. if (likely(ti->preempt_count || irqs_disabled()))
  4721. return;
  4722. do {
  4723. add_preempt_count(PREEMPT_ACTIVE);
  4724. schedule();
  4725. sub_preempt_count(PREEMPT_ACTIVE);
  4726. /*
  4727. * Check again in case we missed a preemption opportunity
  4728. * between schedule and now.
  4729. */
  4730. barrier();
  4731. } while (need_resched());
  4732. }
  4733. EXPORT_SYMBOL(preempt_schedule);
  4734. /*
  4735. * this is the entry point to schedule() from kernel preemption
  4736. * off of irq context.
  4737. * Note, that this is called and return with irqs disabled. This will
  4738. * protect us against recursive calling from irq.
  4739. */
  4740. asmlinkage void __sched preempt_schedule_irq(void)
  4741. {
  4742. struct thread_info *ti = current_thread_info();
  4743. /* Catch callers which need to be fixed */
  4744. BUG_ON(ti->preempt_count || !irqs_disabled());
  4745. do {
  4746. add_preempt_count(PREEMPT_ACTIVE);
  4747. local_irq_enable();
  4748. schedule();
  4749. local_irq_disable();
  4750. sub_preempt_count(PREEMPT_ACTIVE);
  4751. /*
  4752. * Check again in case we missed a preemption opportunity
  4753. * between schedule and now.
  4754. */
  4755. barrier();
  4756. } while (need_resched());
  4757. }
  4758. #endif /* CONFIG_PREEMPT */
  4759. int default_wake_function(wait_queue_t *curr, unsigned mode, int sync,
  4760. void *key)
  4761. {
  4762. return try_to_wake_up(curr->private, mode, sync);
  4763. }
  4764. EXPORT_SYMBOL(default_wake_function);
  4765. /*
  4766. * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
  4767. * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
  4768. * number) then we wake all the non-exclusive tasks and one exclusive task.
  4769. *
  4770. * There are circumstances in which we can try to wake a task which has already
  4771. * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
  4772. * zero in this (rare) case, and we handle it by continuing to scan the queue.
  4773. */
  4774. static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
  4775. int nr_exclusive, int sync, void *key)
  4776. {
  4777. wait_queue_t *curr, *next;
  4778. list_for_each_entry_safe(curr, next, &q->task_list, task_list) {
  4779. unsigned flags = curr->flags;
  4780. if (curr->func(curr, mode, sync, key) &&
  4781. (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
  4782. break;
  4783. }
  4784. }
  4785. /**
  4786. * __wake_up - wake up threads blocked on a waitqueue.
  4787. * @q: the waitqueue
  4788. * @mode: which threads
  4789. * @nr_exclusive: how many wake-one or wake-many threads to wake up
  4790. * @key: is directly passed to the wakeup function
  4791. *
  4792. * It may be assumed that this function implies a write memory barrier before
  4793. * changing the task state if and only if any tasks are woken up.
  4794. */
  4795. void __wake_up(wait_queue_head_t *q, unsigned int mode,
  4796. int nr_exclusive, void *key)
  4797. {
  4798. unsigned long flags;
  4799. spin_lock_irqsave(&q->lock, flags);
  4800. __wake_up_common(q, mode, nr_exclusive, 0, key);
  4801. spin_unlock_irqrestore(&q->lock, flags);
  4802. }
  4803. EXPORT_SYMBOL(__wake_up);
  4804. /*
  4805. * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
  4806. */
  4807. void __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
  4808. {
  4809. __wake_up_common(q, mode, 1, 0, NULL);
  4810. }
  4811. void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key)
  4812. {
  4813. __wake_up_common(q, mode, 1, 0, key);
  4814. }
  4815. /**
  4816. * __wake_up_sync_key - wake up threads blocked on a waitqueue.
  4817. * @q: the waitqueue
  4818. * @mode: which threads
  4819. * @nr_exclusive: how many wake-one or wake-many threads to wake up
  4820. * @key: opaque value to be passed to wakeup targets
  4821. *
  4822. * The sync wakeup differs that the waker knows that it will schedule
  4823. * away soon, so while the target thread will be woken up, it will not
  4824. * be migrated to another CPU - ie. the two threads are 'synchronized'
  4825. * with each other. This can prevent needless bouncing between CPUs.
  4826. *
  4827. * On UP it can prevent extra preemption.
  4828. *
  4829. * It may be assumed that this function implies a write memory barrier before
  4830. * changing the task state if and only if any tasks are woken up.
  4831. */
  4832. void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode,
  4833. int nr_exclusive, void *key)
  4834. {
  4835. unsigned long flags;
  4836. int sync = 1;
  4837. if (unlikely(!q))
  4838. return;
  4839. if (unlikely(!nr_exclusive))
  4840. sync = 0;
  4841. spin_lock_irqsave(&q->lock, flags);
  4842. __wake_up_common(q, mode, nr_exclusive, sync, key);
  4843. spin_unlock_irqrestore(&q->lock, flags);
  4844. }
  4845. EXPORT_SYMBOL_GPL(__wake_up_sync_key);
  4846. /*
  4847. * __wake_up_sync - see __wake_up_sync_key()
  4848. */
  4849. void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
  4850. {
  4851. __wake_up_sync_key(q, mode, nr_exclusive, NULL);
  4852. }
  4853. EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
  4854. /**
  4855. * complete: - signals a single thread waiting on this completion
  4856. * @x: holds the state of this particular completion
  4857. *
  4858. * This will wake up a single thread waiting on this completion. Threads will be
  4859. * awakened in the same order in which they were queued.
  4860. *
  4861. * See also complete_all(), wait_for_completion() and related routines.
  4862. *
  4863. * It may be assumed that this function implies a write memory barrier before
  4864. * changing the task state if and only if any tasks are woken up.
  4865. */
  4866. void complete(struct completion *x)
  4867. {
  4868. unsigned long flags;
  4869. spin_lock_irqsave(&x->wait.lock, flags);
  4870. x->done++;
  4871. __wake_up_common(&x->wait, TASK_NORMAL, 1, 0, NULL);
  4872. spin_unlock_irqrestore(&x->wait.lock, flags);
  4873. }
  4874. EXPORT_SYMBOL(complete);
  4875. /**
  4876. * complete_all: - signals all threads waiting on this completion
  4877. * @x: holds the state of this particular completion
  4878. *
  4879. * This will wake up all threads waiting on this particular completion event.
  4880. *
  4881. * It may be assumed that this function implies a write memory barrier before
  4882. * changing the task state if and only if any tasks are woken up.
  4883. */
  4884. void complete_all(struct completion *x)
  4885. {
  4886. unsigned long flags;
  4887. spin_lock_irqsave(&x->wait.lock, flags);
  4888. x->done += UINT_MAX/2;
  4889. __wake_up_common(&x->wait, TASK_NORMAL, 0, 0, NULL);
  4890. spin_unlock_irqrestore(&x->wait.lock, flags);
  4891. }
  4892. EXPORT_SYMBOL(complete_all);
  4893. static inline long __sched
  4894. do_wait_for_common(struct completion *x, long timeout, int state)
  4895. {
  4896. if (!x->done) {
  4897. DECLARE_WAITQUEUE(wait, current);
  4898. wait.flags |= WQ_FLAG_EXCLUSIVE;
  4899. __add_wait_queue_tail(&x->wait, &wait);
  4900. do {
  4901. if (signal_pending_state(state, current)) {
  4902. timeout = -ERESTARTSYS;
  4903. break;
  4904. }
  4905. __set_current_state(state);
  4906. spin_unlock_irq(&x->wait.lock);
  4907. timeout = schedule_timeout(timeout);
  4908. spin_lock_irq(&x->wait.lock);
  4909. } while (!x->done && timeout);
  4910. __remove_wait_queue(&x->wait, &wait);
  4911. if (!x->done)
  4912. return timeout;
  4913. }
  4914. x->done--;
  4915. return timeout ?: 1;
  4916. }
  4917. static long __sched
  4918. wait_for_common(struct completion *x, long timeout, int state)
  4919. {
  4920. might_sleep();
  4921. spin_lock_irq(&x->wait.lock);
  4922. timeout = do_wait_for_common(x, timeout, state);
  4923. spin_unlock_irq(&x->wait.lock);
  4924. return timeout;
  4925. }
  4926. /**
  4927. * wait_for_completion: - waits for completion of a task
  4928. * @x: holds the state of this particular completion
  4929. *
  4930. * This waits to be signaled for completion of a specific task. It is NOT
  4931. * interruptible and there is no timeout.
  4932. *
  4933. * See also similar routines (i.e. wait_for_completion_timeout()) with timeout
  4934. * and interrupt capability. Also see complete().
  4935. */
  4936. void __sched wait_for_completion(struct completion *x)
  4937. {
  4938. wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
  4939. }
  4940. EXPORT_SYMBOL(wait_for_completion);
  4941. /**
  4942. * wait_for_completion_timeout: - waits for completion of a task (w/timeout)
  4943. * @x: holds the state of this particular completion
  4944. * @timeout: timeout value in jiffies
  4945. *
  4946. * This waits for either a completion of a specific task to be signaled or for a
  4947. * specified timeout to expire. The timeout is in jiffies. It is not
  4948. * interruptible.
  4949. */
  4950. unsigned long __sched
  4951. wait_for_completion_timeout(struct completion *x, unsigned long timeout)
  4952. {
  4953. return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE);
  4954. }
  4955. EXPORT_SYMBOL(wait_for_completion_timeout);
  4956. /**
  4957. * wait_for_completion_interruptible: - waits for completion of a task (w/intr)
  4958. * @x: holds the state of this particular completion
  4959. *
  4960. * This waits for completion of a specific task to be signaled. It is
  4961. * interruptible.
  4962. */
  4963. int __sched wait_for_completion_interruptible(struct completion *x)
  4964. {
  4965. long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE);
  4966. if (t == -ERESTARTSYS)
  4967. return t;
  4968. return 0;
  4969. }
  4970. EXPORT_SYMBOL(wait_for_completion_interruptible);
  4971. /**
  4972. * wait_for_completion_interruptible_timeout: - waits for completion (w/(to,intr))
  4973. * @x: holds the state of this particular completion
  4974. * @timeout: timeout value in jiffies
  4975. *
  4976. * This waits for either a completion of a specific task to be signaled or for a
  4977. * specified timeout to expire. It is interruptible. The timeout is in jiffies.
  4978. */
  4979. unsigned long __sched
  4980. wait_for_completion_interruptible_timeout(struct completion *x,
  4981. unsigned long timeout)
  4982. {
  4983. return wait_for_common(x, timeout, TASK_INTERRUPTIBLE);
  4984. }
  4985. EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
  4986. /**
  4987. * wait_for_completion_killable: - waits for completion of a task (killable)
  4988. * @x: holds the state of this particular completion
  4989. *
  4990. * This waits to be signaled for completion of a specific task. It can be
  4991. * interrupted by a kill signal.
  4992. */
  4993. int __sched wait_for_completion_killable(struct completion *x)
  4994. {
  4995. long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
  4996. if (t == -ERESTARTSYS)
  4997. return t;
  4998. return 0;
  4999. }
  5000. EXPORT_SYMBOL(wait_for_completion_killable);
  5001. /**
  5002. * try_wait_for_completion - try to decrement a completion without blocking
  5003. * @x: completion structure
  5004. *
  5005. * Returns: 0 if a decrement cannot be done without blocking
  5006. * 1 if a decrement succeeded.
  5007. *
  5008. * If a completion is being used as a counting completion,
  5009. * attempt to decrement the counter without blocking. This
  5010. * enables us to avoid waiting if the resource the completion
  5011. * is protecting is not available.
  5012. */
  5013. bool try_wait_for_completion(struct completion *x)
  5014. {
  5015. int ret = 1;
  5016. spin_lock_irq(&x->wait.lock);
  5017. if (!x->done)
  5018. ret = 0;
  5019. else
  5020. x->done--;
  5021. spin_unlock_irq(&x->wait.lock);
  5022. return ret;
  5023. }
  5024. EXPORT_SYMBOL(try_wait_for_completion);
  5025. /**
  5026. * completion_done - Test to see if a completion has any waiters
  5027. * @x: completion structure
  5028. *
  5029. * Returns: 0 if there are waiters (wait_for_completion() in progress)
  5030. * 1 if there are no waiters.
  5031. *
  5032. */
  5033. bool completion_done(struct completion *x)
  5034. {
  5035. int ret = 1;
  5036. spin_lock_irq(&x->wait.lock);
  5037. if (!x->done)
  5038. ret = 0;
  5039. spin_unlock_irq(&x->wait.lock);
  5040. return ret;
  5041. }
  5042. EXPORT_SYMBOL(completion_done);
  5043. static long __sched
  5044. sleep_on_common(wait_queue_head_t *q, int state, long timeout)
  5045. {
  5046. unsigned long flags;
  5047. wait_queue_t wait;
  5048. init_waitqueue_entry(&wait, current);
  5049. __set_current_state(state);
  5050. spin_lock_irqsave(&q->lock, flags);
  5051. __add_wait_queue(q, &wait);
  5052. spin_unlock(&q->lock);
  5053. timeout = schedule_timeout(timeout);
  5054. spin_lock_irq(&q->lock);
  5055. __remove_wait_queue(q, &wait);
  5056. spin_unlock_irqrestore(&q->lock, flags);
  5057. return timeout;
  5058. }
  5059. void __sched interruptible_sleep_on(wait_queue_head_t *q)
  5060. {
  5061. sleep_on_common(q, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
  5062. }
  5063. EXPORT_SYMBOL(interruptible_sleep_on);
  5064. long __sched
  5065. interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
  5066. {
  5067. return sleep_on_common(q, TASK_INTERRUPTIBLE, timeout);
  5068. }
  5069. EXPORT_SYMBOL(interruptible_sleep_on_timeout);
  5070. void __sched sleep_on(wait_queue_head_t *q)
  5071. {
  5072. sleep_on_common(q, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
  5073. }
  5074. EXPORT_SYMBOL(sleep_on);
  5075. long __sched sleep_on_timeout(wait_queue_head_t *q, long timeout)
  5076. {
  5077. return sleep_on_common(q, TASK_UNINTERRUPTIBLE, timeout);
  5078. }
  5079. EXPORT_SYMBOL(sleep_on_timeout);
  5080. #ifdef CONFIG_RT_MUTEXES
  5081. /*
  5082. * rt_mutex_setprio - set the current priority of a task
  5083. * @p: task
  5084. * @prio: prio value (kernel-internal form)
  5085. *
  5086. * This function changes the 'effective' priority of a task. It does
  5087. * not touch ->normal_prio like __setscheduler().
  5088. *
  5089. * Used by the rt_mutex code to implement priority inheritance logic.
  5090. */
  5091. void rt_mutex_setprio(struct task_struct *p, int prio)
  5092. {
  5093. unsigned long flags;
  5094. int oldprio, on_rq, running;
  5095. struct rq *rq;
  5096. const struct sched_class *prev_class = p->sched_class;
  5097. BUG_ON(prio < 0 || prio > MAX_PRIO);
  5098. rq = task_rq_lock(p, &flags);
  5099. update_rq_clock(rq);
  5100. oldprio = p->prio;
  5101. on_rq = p->se.on_rq;
  5102. running = task_current(rq, p);
  5103. if (on_rq)
  5104. dequeue_task(rq, p, 0);
  5105. if (running)
  5106. p->sched_class->put_prev_task(rq, p);
  5107. if (rt_prio(prio))
  5108. p->sched_class = &rt_sched_class;
  5109. else
  5110. p->sched_class = &fair_sched_class;
  5111. p->prio = prio;
  5112. if (running)
  5113. p->sched_class->set_curr_task(rq);
  5114. if (on_rq) {
  5115. enqueue_task(rq, p, 0);
  5116. check_class_changed(rq, p, prev_class, oldprio, running);
  5117. }
  5118. task_rq_unlock(rq, &flags);
  5119. }
  5120. #endif
  5121. void set_user_nice(struct task_struct *p, long nice)
  5122. {
  5123. int old_prio, delta, on_rq;
  5124. unsigned long flags;
  5125. struct rq *rq;
  5126. if (TASK_NICE(p) == nice || nice < -20 || nice > 19)
  5127. return;
  5128. /*
  5129. * We have to be careful, if called from sys_setpriority(),
  5130. * the task might be in the middle of scheduling on another CPU.
  5131. */
  5132. rq = task_rq_lock(p, &flags);
  5133. update_rq_clock(rq);
  5134. /*
  5135. * The RT priorities are set via sched_setscheduler(), but we still
  5136. * allow the 'normal' nice value to be set - but as expected
  5137. * it wont have any effect on scheduling until the task is
  5138. * SCHED_FIFO/SCHED_RR:
  5139. */
  5140. if (task_has_rt_policy(p)) {
  5141. p->static_prio = NICE_TO_PRIO(nice);
  5142. goto out_unlock;
  5143. }
  5144. on_rq = p->se.on_rq;
  5145. if (on_rq)
  5146. dequeue_task(rq, p, 0);
  5147. p->static_prio = NICE_TO_PRIO(nice);
  5148. set_load_weight(p);
  5149. old_prio = p->prio;
  5150. p->prio = effective_prio(p);
  5151. delta = p->prio - old_prio;
  5152. if (on_rq) {
  5153. enqueue_task(rq, p, 0);
  5154. /*
  5155. * If the task increased its priority or is running and
  5156. * lowered its priority, then reschedule its CPU:
  5157. */
  5158. if (delta < 0 || (delta > 0 && task_running(rq, p)))
  5159. resched_task(rq->curr);
  5160. }
  5161. out_unlock:
  5162. task_rq_unlock(rq, &flags);
  5163. }
  5164. EXPORT_SYMBOL(set_user_nice);
  5165. /*
  5166. * can_nice - check if a task can reduce its nice value
  5167. * @p: task
  5168. * @nice: nice value
  5169. */
  5170. int can_nice(const struct task_struct *p, const int nice)
  5171. {
  5172. /* convert nice value [19,-20] to rlimit style value [1,40] */
  5173. int nice_rlim = 20 - nice;
  5174. return (nice_rlim <= p->signal->rlim[RLIMIT_NICE].rlim_cur ||
  5175. capable(CAP_SYS_NICE));
  5176. }
  5177. #ifdef __ARCH_WANT_SYS_NICE
  5178. /*
  5179. * sys_nice - change the priority of the current process.
  5180. * @increment: priority increment
  5181. *
  5182. * sys_setpriority is a more generic, but much slower function that
  5183. * does similar things.
  5184. */
  5185. SYSCALL_DEFINE1(nice, int, increment)
  5186. {
  5187. long nice, retval;
  5188. /*
  5189. * Setpriority might change our priority at the same moment.
  5190. * We don't have to worry. Conceptually one call occurs first
  5191. * and we have a single winner.
  5192. */
  5193. if (increment < -40)
  5194. increment = -40;
  5195. if (increment > 40)
  5196. increment = 40;
  5197. nice = TASK_NICE(current) + increment;
  5198. if (nice < -20)
  5199. nice = -20;
  5200. if (nice > 19)
  5201. nice = 19;
  5202. if (increment < 0 && !can_nice(current, nice))
  5203. return -EPERM;
  5204. retval = security_task_setnice(current, nice);
  5205. if (retval)
  5206. return retval;
  5207. set_user_nice(current, nice);
  5208. return 0;
  5209. }
  5210. #endif
  5211. /**
  5212. * task_prio - return the priority value of a given task.
  5213. * @p: the task in question.
  5214. *
  5215. * This is the priority value as seen by users in /proc.
  5216. * RT tasks are offset by -200. Normal tasks are centered
  5217. * around 0, value goes from -16 to +15.
  5218. */
  5219. int task_prio(const struct task_struct *p)
  5220. {
  5221. return p->prio - MAX_RT_PRIO;
  5222. }
  5223. /**
  5224. * task_nice - return the nice value of a given task.
  5225. * @p: the task in question.
  5226. */
  5227. int task_nice(const struct task_struct *p)
  5228. {
  5229. return TASK_NICE(p);
  5230. }
  5231. EXPORT_SYMBOL(task_nice);
  5232. /**
  5233. * idle_cpu - is a given cpu idle currently?
  5234. * @cpu: the processor in question.
  5235. */
  5236. int idle_cpu(int cpu)
  5237. {
  5238. return cpu_curr(cpu) == cpu_rq(cpu)->idle;
  5239. }
  5240. /**
  5241. * idle_task - return the idle task for a given cpu.
  5242. * @cpu: the processor in question.
  5243. */
  5244. struct task_struct *idle_task(int cpu)
  5245. {
  5246. return cpu_rq(cpu)->idle;
  5247. }
  5248. /**
  5249. * find_process_by_pid - find a process with a matching PID value.
  5250. * @pid: the pid in question.
  5251. */
  5252. static struct task_struct *find_process_by_pid(pid_t pid)
  5253. {
  5254. return pid ? find_task_by_vpid(pid) : current;
  5255. }
  5256. /* Actually do priority change: must hold rq lock. */
  5257. static void
  5258. __setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
  5259. {
  5260. BUG_ON(p->se.on_rq);
  5261. p->policy = policy;
  5262. switch (p->policy) {
  5263. case SCHED_NORMAL:
  5264. case SCHED_BATCH:
  5265. case SCHED_IDLE:
  5266. p->sched_class = &fair_sched_class;
  5267. break;
  5268. case SCHED_FIFO:
  5269. case SCHED_RR:
  5270. p->sched_class = &rt_sched_class;
  5271. break;
  5272. }
  5273. p->rt_priority = prio;
  5274. p->normal_prio = normal_prio(p);
  5275. /* we are holding p->pi_lock already */
  5276. p->prio = rt_mutex_getprio(p);
  5277. set_load_weight(p);
  5278. }
  5279. /*
  5280. * check the target process has a UID that matches the current process's
  5281. */
  5282. static bool check_same_owner(struct task_struct *p)
  5283. {
  5284. const struct cred *cred = current_cred(), *pcred;
  5285. bool match;
  5286. rcu_read_lock();
  5287. pcred = __task_cred(p);
  5288. match = (cred->euid == pcred->euid ||
  5289. cred->euid == pcred->uid);
  5290. rcu_read_unlock();
  5291. return match;
  5292. }
  5293. static int __sched_setscheduler(struct task_struct *p, int policy,
  5294. struct sched_param *param, bool user)
  5295. {
  5296. int retval, oldprio, oldpolicy = -1, on_rq, running;
  5297. unsigned long flags;
  5298. const struct sched_class *prev_class = p->sched_class;
  5299. struct rq *rq;
  5300. int reset_on_fork;
  5301. /* may grab non-irq protected spin_locks */
  5302. BUG_ON(in_interrupt());
  5303. recheck:
  5304. /* double check policy once rq lock held */
  5305. if (policy < 0) {
  5306. reset_on_fork = p->sched_reset_on_fork;
  5307. policy = oldpolicy = p->policy;
  5308. } else {
  5309. reset_on_fork = !!(policy & SCHED_RESET_ON_FORK);
  5310. policy &= ~SCHED_RESET_ON_FORK;
  5311. if (policy != SCHED_FIFO && policy != SCHED_RR &&
  5312. policy != SCHED_NORMAL && policy != SCHED_BATCH &&
  5313. policy != SCHED_IDLE)
  5314. return -EINVAL;
  5315. }
  5316. /*
  5317. * Valid priorities for SCHED_FIFO and SCHED_RR are
  5318. * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL,
  5319. * SCHED_BATCH and SCHED_IDLE is 0.
  5320. */
  5321. if (param->sched_priority < 0 ||
  5322. (p->mm && param->sched_priority > MAX_USER_RT_PRIO-1) ||
  5323. (!p->mm && param->sched_priority > MAX_RT_PRIO-1))
  5324. return -EINVAL;
  5325. if (rt_policy(policy) != (param->sched_priority != 0))
  5326. return -EINVAL;
  5327. /*
  5328. * Allow unprivileged RT tasks to decrease priority:
  5329. */
  5330. if (user && !capable(CAP_SYS_NICE)) {
  5331. if (rt_policy(policy)) {
  5332. unsigned long rlim_rtprio;
  5333. if (!lock_task_sighand(p, &flags))
  5334. return -ESRCH;
  5335. rlim_rtprio = p->signal->rlim[RLIMIT_RTPRIO].rlim_cur;
  5336. unlock_task_sighand(p, &flags);
  5337. /* can't set/change the rt policy */
  5338. if (policy != p->policy && !rlim_rtprio)
  5339. return -EPERM;
  5340. /* can't increase priority */
  5341. if (param->sched_priority > p->rt_priority &&
  5342. param->sched_priority > rlim_rtprio)
  5343. return -EPERM;
  5344. }
  5345. /*
  5346. * Like positive nice levels, dont allow tasks to
  5347. * move out of SCHED_IDLE either:
  5348. */
  5349. if (p->policy == SCHED_IDLE && policy != SCHED_IDLE)
  5350. return -EPERM;
  5351. /* can't change other user's priorities */
  5352. if (!check_same_owner(p))
  5353. return -EPERM;
  5354. /* Normal users shall not reset the sched_reset_on_fork flag */
  5355. if (p->sched_reset_on_fork && !reset_on_fork)
  5356. return -EPERM;
  5357. }
  5358. if (user) {
  5359. #ifdef CONFIG_RT_GROUP_SCHED
  5360. /*
  5361. * Do not allow realtime tasks into groups that have no runtime
  5362. * assigned.
  5363. */
  5364. if (rt_bandwidth_enabled() && rt_policy(policy) &&
  5365. task_group(p)->rt_bandwidth.rt_runtime == 0)
  5366. return -EPERM;
  5367. #endif
  5368. retval = security_task_setscheduler(p, policy, param);
  5369. if (retval)
  5370. return retval;
  5371. }
  5372. /*
  5373. * make sure no PI-waiters arrive (or leave) while we are
  5374. * changing the priority of the task:
  5375. */
  5376. spin_lock_irqsave(&p->pi_lock, flags);
  5377. /*
  5378. * To be able to change p->policy safely, the apropriate
  5379. * runqueue lock must be held.
  5380. */
  5381. rq = __task_rq_lock(p);
  5382. /* recheck policy now with rq lock held */
  5383. if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
  5384. policy = oldpolicy = -1;
  5385. __task_rq_unlock(rq);
  5386. spin_unlock_irqrestore(&p->pi_lock, flags);
  5387. goto recheck;
  5388. }
  5389. update_rq_clock(rq);
  5390. on_rq = p->se.on_rq;
  5391. running = task_current(rq, p);
  5392. if (on_rq)
  5393. deactivate_task(rq, p, 0);
  5394. if (running)
  5395. p->sched_class->put_prev_task(rq, p);
  5396. p->sched_reset_on_fork = reset_on_fork;
  5397. oldprio = p->prio;
  5398. __setscheduler(rq, p, policy, param->sched_priority);
  5399. if (running)
  5400. p->sched_class->set_curr_task(rq);
  5401. if (on_rq) {
  5402. activate_task(rq, p, 0);
  5403. check_class_changed(rq, p, prev_class, oldprio, running);
  5404. }
  5405. __task_rq_unlock(rq);
  5406. spin_unlock_irqrestore(&p->pi_lock, flags);
  5407. rt_mutex_adjust_pi(p);
  5408. return 0;
  5409. }
  5410. /**
  5411. * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
  5412. * @p: the task in question.
  5413. * @policy: new policy.
  5414. * @param: structure containing the new RT priority.
  5415. *
  5416. * NOTE that the task may be already dead.
  5417. */
  5418. int sched_setscheduler(struct task_struct *p, int policy,
  5419. struct sched_param *param)
  5420. {
  5421. return __sched_setscheduler(p, policy, param, true);
  5422. }
  5423. EXPORT_SYMBOL_GPL(sched_setscheduler);
  5424. /**
  5425. * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
  5426. * @p: the task in question.
  5427. * @policy: new policy.
  5428. * @param: structure containing the new RT priority.
  5429. *
  5430. * Just like sched_setscheduler, only don't bother checking if the
  5431. * current context has permission. For example, this is needed in
  5432. * stop_machine(): we create temporary high priority worker threads,
  5433. * but our caller might not have that capability.
  5434. */
  5435. int sched_setscheduler_nocheck(struct task_struct *p, int policy,
  5436. struct sched_param *param)
  5437. {
  5438. return __sched_setscheduler(p, policy, param, false);
  5439. }
  5440. static int
  5441. do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
  5442. {
  5443. struct sched_param lparam;
  5444. struct task_struct *p;
  5445. int retval;
  5446. if (!param || pid < 0)
  5447. return -EINVAL;
  5448. if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
  5449. return -EFAULT;
  5450. rcu_read_lock();
  5451. retval = -ESRCH;
  5452. p = find_process_by_pid(pid);
  5453. if (p != NULL)
  5454. retval = sched_setscheduler(p, policy, &lparam);
  5455. rcu_read_unlock();
  5456. return retval;
  5457. }
  5458. /**
  5459. * sys_sched_setscheduler - set/change the scheduler policy and RT priority
  5460. * @pid: the pid in question.
  5461. * @policy: new policy.
  5462. * @param: structure containing the new RT priority.
  5463. */
  5464. SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy,
  5465. struct sched_param __user *, param)
  5466. {
  5467. /* negative values for policy are not valid */
  5468. if (policy < 0)
  5469. return -EINVAL;
  5470. return do_sched_setscheduler(pid, policy, param);
  5471. }
  5472. /**
  5473. * sys_sched_setparam - set/change the RT priority of a thread
  5474. * @pid: the pid in question.
  5475. * @param: structure containing the new RT priority.
  5476. */
  5477. SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
  5478. {
  5479. return do_sched_setscheduler(pid, -1, param);
  5480. }
  5481. /**
  5482. * sys_sched_getscheduler - get the policy (scheduling class) of a thread
  5483. * @pid: the pid in question.
  5484. */
  5485. SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
  5486. {
  5487. struct task_struct *p;
  5488. int retval;
  5489. if (pid < 0)
  5490. return -EINVAL;
  5491. retval = -ESRCH;
  5492. read_lock(&tasklist_lock);
  5493. p = find_process_by_pid(pid);
  5494. if (p) {
  5495. retval = security_task_getscheduler(p);
  5496. if (!retval)
  5497. retval = p->policy
  5498. | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0);
  5499. }
  5500. read_unlock(&tasklist_lock);
  5501. return retval;
  5502. }
  5503. /**
  5504. * sys_sched_getparam - get the RT priority of a thread
  5505. * @pid: the pid in question.
  5506. * @param: structure containing the RT priority.
  5507. */
  5508. SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
  5509. {
  5510. struct sched_param lp;
  5511. struct task_struct *p;
  5512. int retval;
  5513. if (!param || pid < 0)
  5514. return -EINVAL;
  5515. read_lock(&tasklist_lock);
  5516. p = find_process_by_pid(pid);
  5517. retval = -ESRCH;
  5518. if (!p)
  5519. goto out_unlock;
  5520. retval = security_task_getscheduler(p);
  5521. if (retval)
  5522. goto out_unlock;
  5523. lp.sched_priority = p->rt_priority;
  5524. read_unlock(&tasklist_lock);
  5525. /*
  5526. * This one might sleep, we cannot do it with a spinlock held ...
  5527. */
  5528. retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
  5529. return retval;
  5530. out_unlock:
  5531. read_unlock(&tasklist_lock);
  5532. return retval;
  5533. }
  5534. long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
  5535. {
  5536. cpumask_var_t cpus_allowed, new_mask;
  5537. struct task_struct *p;
  5538. int retval;
  5539. get_online_cpus();
  5540. read_lock(&tasklist_lock);
  5541. p = find_process_by_pid(pid);
  5542. if (!p) {
  5543. read_unlock(&tasklist_lock);
  5544. put_online_cpus();
  5545. return -ESRCH;
  5546. }
  5547. /*
  5548. * It is not safe to call set_cpus_allowed with the
  5549. * tasklist_lock held. We will bump the task_struct's
  5550. * usage count and then drop tasklist_lock.
  5551. */
  5552. get_task_struct(p);
  5553. read_unlock(&tasklist_lock);
  5554. if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
  5555. retval = -ENOMEM;
  5556. goto out_put_task;
  5557. }
  5558. if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
  5559. retval = -ENOMEM;
  5560. goto out_free_cpus_allowed;
  5561. }
  5562. retval = -EPERM;
  5563. if (!check_same_owner(p) && !capable(CAP_SYS_NICE))
  5564. goto out_unlock;
  5565. retval = security_task_setscheduler(p, 0, NULL);
  5566. if (retval)
  5567. goto out_unlock;
  5568. cpuset_cpus_allowed(p, cpus_allowed);
  5569. cpumask_and(new_mask, in_mask, cpus_allowed);
  5570. again:
  5571. retval = set_cpus_allowed_ptr(p, new_mask);
  5572. if (!retval) {
  5573. cpuset_cpus_allowed(p, cpus_allowed);
  5574. if (!cpumask_subset(new_mask, cpus_allowed)) {
  5575. /*
  5576. * We must have raced with a concurrent cpuset
  5577. * update. Just reset the cpus_allowed to the
  5578. * cpuset's cpus_allowed
  5579. */
  5580. cpumask_copy(new_mask, cpus_allowed);
  5581. goto again;
  5582. }
  5583. }
  5584. out_unlock:
  5585. free_cpumask_var(new_mask);
  5586. out_free_cpus_allowed:
  5587. free_cpumask_var(cpus_allowed);
  5588. out_put_task:
  5589. put_task_struct(p);
  5590. put_online_cpus();
  5591. return retval;
  5592. }
  5593. static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
  5594. struct cpumask *new_mask)
  5595. {
  5596. if (len < cpumask_size())
  5597. cpumask_clear(new_mask);
  5598. else if (len > cpumask_size())
  5599. len = cpumask_size();
  5600. return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
  5601. }
  5602. /**
  5603. * sys_sched_setaffinity - set the cpu affinity of a process
  5604. * @pid: pid of the process
  5605. * @len: length in bytes of the bitmask pointed to by user_mask_ptr
  5606. * @user_mask_ptr: user-space pointer to the new cpu mask
  5607. */
  5608. SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
  5609. unsigned long __user *, user_mask_ptr)
  5610. {
  5611. cpumask_var_t new_mask;
  5612. int retval;
  5613. if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
  5614. return -ENOMEM;
  5615. retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
  5616. if (retval == 0)
  5617. retval = sched_setaffinity(pid, new_mask);
  5618. free_cpumask_var(new_mask);
  5619. return retval;
  5620. }
  5621. long sched_getaffinity(pid_t pid, struct cpumask *mask)
  5622. {
  5623. struct task_struct *p;
  5624. int retval;
  5625. get_online_cpus();
  5626. read_lock(&tasklist_lock);
  5627. retval = -ESRCH;
  5628. p = find_process_by_pid(pid);
  5629. if (!p)
  5630. goto out_unlock;
  5631. retval = security_task_getscheduler(p);
  5632. if (retval)
  5633. goto out_unlock;
  5634. cpumask_and(mask, &p->cpus_allowed, cpu_online_mask);
  5635. out_unlock:
  5636. read_unlock(&tasklist_lock);
  5637. put_online_cpus();
  5638. return retval;
  5639. }
  5640. /**
  5641. * sys_sched_getaffinity - get the cpu affinity of a process
  5642. * @pid: pid of the process
  5643. * @len: length in bytes of the bitmask pointed to by user_mask_ptr
  5644. * @user_mask_ptr: user-space pointer to hold the current cpu mask
  5645. */
  5646. SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
  5647. unsigned long __user *, user_mask_ptr)
  5648. {
  5649. int ret;
  5650. cpumask_var_t mask;
  5651. if (len < cpumask_size())
  5652. return -EINVAL;
  5653. if (!alloc_cpumask_var(&mask, GFP_KERNEL))
  5654. return -ENOMEM;
  5655. ret = sched_getaffinity(pid, mask);
  5656. if (ret == 0) {
  5657. if (copy_to_user(user_mask_ptr, mask, cpumask_size()))
  5658. ret = -EFAULT;
  5659. else
  5660. ret = cpumask_size();
  5661. }
  5662. free_cpumask_var(mask);
  5663. return ret;
  5664. }
  5665. /**
  5666. * sys_sched_yield - yield the current processor to other threads.
  5667. *
  5668. * This function yields the current CPU to other tasks. If there are no
  5669. * other threads running on this CPU then this function will return.
  5670. */
  5671. SYSCALL_DEFINE0(sched_yield)
  5672. {
  5673. struct rq *rq = this_rq_lock();
  5674. schedstat_inc(rq, yld_count);
  5675. current->sched_class->yield_task(rq);
  5676. /*
  5677. * Since we are going to call schedule() anyway, there's
  5678. * no need to preempt or enable interrupts:
  5679. */
  5680. __release(rq->lock);
  5681. spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
  5682. _raw_spin_unlock(&rq->lock);
  5683. preempt_enable_no_resched();
  5684. schedule();
  5685. return 0;
  5686. }
  5687. static inline int should_resched(void)
  5688. {
  5689. return need_resched() && !(preempt_count() & PREEMPT_ACTIVE);
  5690. }
  5691. static void __cond_resched(void)
  5692. {
  5693. add_preempt_count(PREEMPT_ACTIVE);
  5694. schedule();
  5695. sub_preempt_count(PREEMPT_ACTIVE);
  5696. }
  5697. int __sched _cond_resched(void)
  5698. {
  5699. if (should_resched()) {
  5700. __cond_resched();
  5701. return 1;
  5702. }
  5703. return 0;
  5704. }
  5705. EXPORT_SYMBOL(_cond_resched);
  5706. /*
  5707. * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
  5708. * call schedule, and on return reacquire the lock.
  5709. *
  5710. * This works OK both with and without CONFIG_PREEMPT. We do strange low-level
  5711. * operations here to prevent schedule() from being called twice (once via
  5712. * spin_unlock(), once by hand).
  5713. */
  5714. int __cond_resched_lock(spinlock_t *lock)
  5715. {
  5716. int resched = should_resched();
  5717. int ret = 0;
  5718. if (spin_needbreak(lock) || resched) {
  5719. spin_unlock(lock);
  5720. if (resched)
  5721. __cond_resched();
  5722. else
  5723. cpu_relax();
  5724. ret = 1;
  5725. spin_lock(lock);
  5726. }
  5727. return ret;
  5728. }
  5729. EXPORT_SYMBOL(__cond_resched_lock);
  5730. int __sched __cond_resched_softirq(void)
  5731. {
  5732. BUG_ON(!in_softirq());
  5733. if (should_resched()) {
  5734. local_bh_enable();
  5735. __cond_resched();
  5736. local_bh_disable();
  5737. return 1;
  5738. }
  5739. return 0;
  5740. }
  5741. EXPORT_SYMBOL(__cond_resched_softirq);
  5742. /**
  5743. * yield - yield the current processor to other threads.
  5744. *
  5745. * This is a shortcut for kernel-space yielding - it marks the
  5746. * thread runnable and calls sys_sched_yield().
  5747. */
  5748. void __sched yield(void)
  5749. {
  5750. set_current_state(TASK_RUNNING);
  5751. sys_sched_yield();
  5752. }
  5753. EXPORT_SYMBOL(yield);
  5754. /*
  5755. * This task is about to go to sleep on IO. Increment rq->nr_iowait so
  5756. * that process accounting knows that this is a task in IO wait state.
  5757. *
  5758. * But don't do that if it is a deliberate, throttling IO wait (this task
  5759. * has set its backing_dev_info: the queue against which it should throttle)
  5760. */
  5761. void __sched io_schedule(void)
  5762. {
  5763. struct rq *rq = raw_rq();
  5764. delayacct_blkio_start();
  5765. atomic_inc(&rq->nr_iowait);
  5766. schedule();
  5767. atomic_dec(&rq->nr_iowait);
  5768. delayacct_blkio_end();
  5769. }
  5770. EXPORT_SYMBOL(io_schedule);
  5771. long __sched io_schedule_timeout(long timeout)
  5772. {
  5773. struct rq *rq = raw_rq();
  5774. long ret;
  5775. delayacct_blkio_start();
  5776. atomic_inc(&rq->nr_iowait);
  5777. ret = schedule_timeout(timeout);
  5778. atomic_dec(&rq->nr_iowait);
  5779. delayacct_blkio_end();
  5780. return ret;
  5781. }
  5782. /**
  5783. * sys_sched_get_priority_max - return maximum RT priority.
  5784. * @policy: scheduling class.
  5785. *
  5786. * this syscall returns the maximum rt_priority that can be used
  5787. * by a given scheduling class.
  5788. */
  5789. SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
  5790. {
  5791. int ret = -EINVAL;
  5792. switch (policy) {
  5793. case SCHED_FIFO:
  5794. case SCHED_RR:
  5795. ret = MAX_USER_RT_PRIO-1;
  5796. break;
  5797. case SCHED_NORMAL:
  5798. case SCHED_BATCH:
  5799. case SCHED_IDLE:
  5800. ret = 0;
  5801. break;
  5802. }
  5803. return ret;
  5804. }
  5805. /**
  5806. * sys_sched_get_priority_min - return minimum RT priority.
  5807. * @policy: scheduling class.
  5808. *
  5809. * this syscall returns the minimum rt_priority that can be used
  5810. * by a given scheduling class.
  5811. */
  5812. SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
  5813. {
  5814. int ret = -EINVAL;
  5815. switch (policy) {
  5816. case SCHED_FIFO:
  5817. case SCHED_RR:
  5818. ret = 1;
  5819. break;
  5820. case SCHED_NORMAL:
  5821. case SCHED_BATCH:
  5822. case SCHED_IDLE:
  5823. ret = 0;
  5824. }
  5825. return ret;
  5826. }
  5827. /**
  5828. * sys_sched_rr_get_interval - return the default timeslice of a process.
  5829. * @pid: pid of the process.
  5830. * @interval: userspace pointer to the timeslice value.
  5831. *
  5832. * this syscall writes the default timeslice value of a given process
  5833. * into the user-space timespec buffer. A value of '0' means infinity.
  5834. */
  5835. SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
  5836. struct timespec __user *, interval)
  5837. {
  5838. struct task_struct *p;
  5839. unsigned int time_slice;
  5840. int retval;
  5841. struct timespec t;
  5842. if (pid < 0)
  5843. return -EINVAL;
  5844. retval = -ESRCH;
  5845. read_lock(&tasklist_lock);
  5846. p = find_process_by_pid(pid);
  5847. if (!p)
  5848. goto out_unlock;
  5849. retval = security_task_getscheduler(p);
  5850. if (retval)
  5851. goto out_unlock;
  5852. /*
  5853. * Time slice is 0 for SCHED_FIFO tasks and for SCHED_OTHER
  5854. * tasks that are on an otherwise idle runqueue:
  5855. */
  5856. time_slice = 0;
  5857. if (p->policy == SCHED_RR) {
  5858. time_slice = DEF_TIMESLICE;
  5859. } else if (p->policy != SCHED_FIFO) {
  5860. struct sched_entity *se = &p->se;
  5861. unsigned long flags;
  5862. struct rq *rq;
  5863. rq = task_rq_lock(p, &flags);
  5864. if (rq->cfs.load.weight)
  5865. time_slice = NS_TO_JIFFIES(sched_slice(&rq->cfs, se));
  5866. task_rq_unlock(rq, &flags);
  5867. }
  5868. read_unlock(&tasklist_lock);
  5869. jiffies_to_timespec(time_slice, &t);
  5870. retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
  5871. return retval;
  5872. out_unlock:
  5873. read_unlock(&tasklist_lock);
  5874. return retval;
  5875. }
  5876. static const char stat_nam[] = TASK_STATE_TO_CHAR_STR;
  5877. void sched_show_task(struct task_struct *p)
  5878. {
  5879. unsigned long free = 0;
  5880. unsigned state;
  5881. state = p->state ? __ffs(p->state) + 1 : 0;
  5882. printk(KERN_INFO "%-13.13s %c", p->comm,
  5883. state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
  5884. #if BITS_PER_LONG == 32
  5885. if (state == TASK_RUNNING)
  5886. printk(KERN_CONT " running ");
  5887. else
  5888. printk(KERN_CONT " %08lx ", thread_saved_pc(p));
  5889. #else
  5890. if (state == TASK_RUNNING)
  5891. printk(KERN_CONT " running task ");
  5892. else
  5893. printk(KERN_CONT " %016lx ", thread_saved_pc(p));
  5894. #endif
  5895. #ifdef CONFIG_DEBUG_STACK_USAGE
  5896. free = stack_not_used(p);
  5897. #endif
  5898. printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free,
  5899. task_pid_nr(p), task_pid_nr(p->real_parent),
  5900. (unsigned long)task_thread_info(p)->flags);
  5901. show_stack(p, NULL);
  5902. }
  5903. void show_state_filter(unsigned long state_filter)
  5904. {
  5905. struct task_struct *g, *p;
  5906. #if BITS_PER_LONG == 32
  5907. printk(KERN_INFO
  5908. " task PC stack pid father\n");
  5909. #else
  5910. printk(KERN_INFO
  5911. " task PC stack pid father\n");
  5912. #endif
  5913. read_lock(&tasklist_lock);
  5914. do_each_thread(g, p) {
  5915. /*
  5916. * reset the NMI-timeout, listing all files on a slow
  5917. * console might take alot of time:
  5918. */
  5919. touch_nmi_watchdog();
  5920. if (!state_filter || (p->state & state_filter))
  5921. sched_show_task(p);
  5922. } while_each_thread(g, p);
  5923. touch_all_softlockup_watchdogs();
  5924. #ifdef CONFIG_SCHED_DEBUG
  5925. sysrq_sched_debug_show();
  5926. #endif
  5927. read_unlock(&tasklist_lock);
  5928. /*
  5929. * Only show locks if all tasks are dumped:
  5930. */
  5931. if (state_filter == -1)
  5932. debug_show_all_locks();
  5933. }
  5934. void __cpuinit init_idle_bootup_task(struct task_struct *idle)
  5935. {
  5936. idle->sched_class = &idle_sched_class;
  5937. }
  5938. /**
  5939. * init_idle - set up an idle thread for a given CPU
  5940. * @idle: task in question
  5941. * @cpu: cpu the idle task belongs to
  5942. *
  5943. * NOTE: this function does not set the idle thread's NEED_RESCHED
  5944. * flag, to make booting more robust.
  5945. */
  5946. void __cpuinit init_idle(struct task_struct *idle, int cpu)
  5947. {
  5948. struct rq *rq = cpu_rq(cpu);
  5949. unsigned long flags;
  5950. spin_lock_irqsave(&rq->lock, flags);
  5951. __sched_fork(idle);
  5952. idle->se.exec_start = sched_clock();
  5953. idle->prio = idle->normal_prio = MAX_PRIO;
  5954. cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu));
  5955. __set_task_cpu(idle, cpu);
  5956. rq->curr = rq->idle = idle;
  5957. #if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
  5958. idle->oncpu = 1;
  5959. #endif
  5960. spin_unlock_irqrestore(&rq->lock, flags);
  5961. /* Set the preempt count _outside_ the spinlocks! */
  5962. #if defined(CONFIG_PREEMPT)
  5963. task_thread_info(idle)->preempt_count = (idle->lock_depth >= 0);
  5964. #else
  5965. task_thread_info(idle)->preempt_count = 0;
  5966. #endif
  5967. /*
  5968. * The idle tasks have their own, simple scheduling class:
  5969. */
  5970. idle->sched_class = &idle_sched_class;
  5971. ftrace_graph_init_task(idle);
  5972. }
  5973. /*
  5974. * In a system that switches off the HZ timer nohz_cpu_mask
  5975. * indicates which cpus entered this state. This is used
  5976. * in the rcu update to wait only for active cpus. For system
  5977. * which do not switch off the HZ timer nohz_cpu_mask should
  5978. * always be CPU_BITS_NONE.
  5979. */
  5980. cpumask_var_t nohz_cpu_mask;
  5981. /*
  5982. * Increase the granularity value when there are more CPUs,
  5983. * because with more CPUs the 'effective latency' as visible
  5984. * to users decreases. But the relationship is not linear,
  5985. * so pick a second-best guess by going with the log2 of the
  5986. * number of CPUs.
  5987. *
  5988. * This idea comes from the SD scheduler of Con Kolivas:
  5989. */
  5990. static inline void sched_init_granularity(void)
  5991. {
  5992. unsigned int factor = 1 + ilog2(num_online_cpus());
  5993. const unsigned long limit = 200000000;
  5994. sysctl_sched_min_granularity *= factor;
  5995. if (sysctl_sched_min_granularity > limit)
  5996. sysctl_sched_min_granularity = limit;
  5997. sysctl_sched_latency *= factor;
  5998. if (sysctl_sched_latency > limit)
  5999. sysctl_sched_latency = limit;
  6000. sysctl_sched_wakeup_granularity *= factor;
  6001. sysctl_sched_shares_ratelimit *= factor;
  6002. }
  6003. #ifdef CONFIG_SMP
  6004. /*
  6005. * This is how migration works:
  6006. *
  6007. * 1) we queue a struct migration_req structure in the source CPU's
  6008. * runqueue and wake up that CPU's migration thread.
  6009. * 2) we down() the locked semaphore => thread blocks.
  6010. * 3) migration thread wakes up (implicitly it forces the migrated
  6011. * thread off the CPU)
  6012. * 4) it gets the migration request and checks whether the migrated
  6013. * task is still in the wrong runqueue.
  6014. * 5) if it's in the wrong runqueue then the migration thread removes
  6015. * it and puts it into the right queue.
  6016. * 6) migration thread up()s the semaphore.
  6017. * 7) we wake up and the migration is done.
  6018. */
  6019. /*
  6020. * Change a given task's CPU affinity. Migrate the thread to a
  6021. * proper CPU and schedule it away if the CPU it's executing on
  6022. * is removed from the allowed bitmask.
  6023. *
  6024. * NOTE: the caller must have a valid reference to the task, the
  6025. * task must not exit() & deallocate itself prematurely. The
  6026. * call is not atomic; no spinlocks may be held.
  6027. */
  6028. int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
  6029. {
  6030. struct migration_req req;
  6031. unsigned long flags;
  6032. struct rq *rq;
  6033. int ret = 0;
  6034. rq = task_rq_lock(p, &flags);
  6035. if (!cpumask_intersects(new_mask, cpu_online_mask)) {
  6036. ret = -EINVAL;
  6037. goto out;
  6038. }
  6039. if (unlikely((p->flags & PF_THREAD_BOUND) && p != current &&
  6040. !cpumask_equal(&p->cpus_allowed, new_mask))) {
  6041. ret = -EINVAL;
  6042. goto out;
  6043. }
  6044. if (p->sched_class->set_cpus_allowed)
  6045. p->sched_class->set_cpus_allowed(p, new_mask);
  6046. else {
  6047. cpumask_copy(&p->cpus_allowed, new_mask);
  6048. p->rt.nr_cpus_allowed = cpumask_weight(new_mask);
  6049. }
  6050. /* Can the task run on the task's current CPU? If so, we're done */
  6051. if (cpumask_test_cpu(task_cpu(p), new_mask))
  6052. goto out;
  6053. if (migrate_task(p, cpumask_any_and(cpu_online_mask, new_mask), &req)) {
  6054. /* Need help from migration thread: drop lock and wait. */
  6055. struct task_struct *mt = rq->migration_thread;
  6056. get_task_struct(mt);
  6057. task_rq_unlock(rq, &flags);
  6058. wake_up_process(rq->migration_thread);
  6059. put_task_struct(mt);
  6060. wait_for_completion(&req.done);
  6061. tlb_migrate_finish(p->mm);
  6062. return 0;
  6063. }
  6064. out:
  6065. task_rq_unlock(rq, &flags);
  6066. return ret;
  6067. }
  6068. EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
  6069. /*
  6070. * Move (not current) task off this cpu, onto dest cpu. We're doing
  6071. * this because either it can't run here any more (set_cpus_allowed()
  6072. * away from this CPU, or CPU going down), or because we're
  6073. * attempting to rebalance this task on exec (sched_exec).
  6074. *
  6075. * So we race with normal scheduler movements, but that's OK, as long
  6076. * as the task is no longer on this CPU.
  6077. *
  6078. * Returns non-zero if task was successfully migrated.
  6079. */
  6080. static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
  6081. {
  6082. struct rq *rq_dest, *rq_src;
  6083. int ret = 0, on_rq;
  6084. if (unlikely(!cpu_active(dest_cpu)))
  6085. return ret;
  6086. rq_src = cpu_rq(src_cpu);
  6087. rq_dest = cpu_rq(dest_cpu);
  6088. double_rq_lock(rq_src, rq_dest);
  6089. /* Already moved. */
  6090. if (task_cpu(p) != src_cpu)
  6091. goto done;
  6092. /* Affinity changed (again). */
  6093. if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
  6094. goto fail;
  6095. on_rq = p->se.on_rq;
  6096. if (on_rq)
  6097. deactivate_task(rq_src, p, 0);
  6098. set_task_cpu(p, dest_cpu);
  6099. if (on_rq) {
  6100. activate_task(rq_dest, p, 0);
  6101. check_preempt_curr(rq_dest, p, 0);
  6102. }
  6103. done:
  6104. ret = 1;
  6105. fail:
  6106. double_rq_unlock(rq_src, rq_dest);
  6107. return ret;
  6108. }
  6109. /*
  6110. * migration_thread - this is a highprio system thread that performs
  6111. * thread migration by bumping thread off CPU then 'pushing' onto
  6112. * another runqueue.
  6113. */
  6114. static int migration_thread(void *data)
  6115. {
  6116. int cpu = (long)data;
  6117. struct rq *rq;
  6118. rq = cpu_rq(cpu);
  6119. BUG_ON(rq->migration_thread != current);
  6120. set_current_state(TASK_INTERRUPTIBLE);
  6121. while (!kthread_should_stop()) {
  6122. struct migration_req *req;
  6123. struct list_head *head;
  6124. spin_lock_irq(&rq->lock);
  6125. if (cpu_is_offline(cpu)) {
  6126. spin_unlock_irq(&rq->lock);
  6127. break;
  6128. }
  6129. if (rq->active_balance) {
  6130. active_load_balance(rq, cpu);
  6131. rq->active_balance = 0;
  6132. }
  6133. head = &rq->migration_queue;
  6134. if (list_empty(head)) {
  6135. spin_unlock_irq(&rq->lock);
  6136. schedule();
  6137. set_current_state(TASK_INTERRUPTIBLE);
  6138. continue;
  6139. }
  6140. req = list_entry(head->next, struct migration_req, list);
  6141. list_del_init(head->next);
  6142. spin_unlock(&rq->lock);
  6143. __migrate_task(req->task, cpu, req->dest_cpu);
  6144. local_irq_enable();
  6145. complete(&req->done);
  6146. }
  6147. __set_current_state(TASK_RUNNING);
  6148. return 0;
  6149. }
  6150. #ifdef CONFIG_HOTPLUG_CPU
  6151. static int __migrate_task_irq(struct task_struct *p, int src_cpu, int dest_cpu)
  6152. {
  6153. int ret;
  6154. local_irq_disable();
  6155. ret = __migrate_task(p, src_cpu, dest_cpu);
  6156. local_irq_enable();
  6157. return ret;
  6158. }
  6159. /*
  6160. * Figure out where task on dead CPU should go, use force if necessary.
  6161. */
  6162. static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
  6163. {
  6164. int dest_cpu;
  6165. const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(dead_cpu));
  6166. again:
  6167. /* Look for allowed, online CPU in same node. */
  6168. for_each_cpu_and(dest_cpu, nodemask, cpu_online_mask)
  6169. if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
  6170. goto move;
  6171. /* Any allowed, online CPU? */
  6172. dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_online_mask);
  6173. if (dest_cpu < nr_cpu_ids)
  6174. goto move;
  6175. /* No more Mr. Nice Guy. */
  6176. if (dest_cpu >= nr_cpu_ids) {
  6177. cpuset_cpus_allowed_locked(p, &p->cpus_allowed);
  6178. dest_cpu = cpumask_any_and(cpu_online_mask, &p->cpus_allowed);
  6179. /*
  6180. * Don't tell them about moving exiting tasks or
  6181. * kernel threads (both mm NULL), since they never
  6182. * leave kernel.
  6183. */
  6184. if (p->mm && printk_ratelimit()) {
  6185. printk(KERN_INFO "process %d (%s) no "
  6186. "longer affine to cpu%d\n",
  6187. task_pid_nr(p), p->comm, dead_cpu);
  6188. }
  6189. }
  6190. move:
  6191. /* It can have affinity changed while we were choosing. */
  6192. if (unlikely(!__migrate_task_irq(p, dead_cpu, dest_cpu)))
  6193. goto again;
  6194. }
  6195. /*
  6196. * While a dead CPU has no uninterruptible tasks queued at this point,
  6197. * it might still have a nonzero ->nr_uninterruptible counter, because
  6198. * for performance reasons the counter is not stricly tracking tasks to
  6199. * their home CPUs. So we just add the counter to another CPU's counter,
  6200. * to keep the global sum constant after CPU-down:
  6201. */
  6202. static void migrate_nr_uninterruptible(struct rq *rq_src)
  6203. {
  6204. struct rq *rq_dest = cpu_rq(cpumask_any(cpu_online_mask));
  6205. unsigned long flags;
  6206. local_irq_save(flags);
  6207. double_rq_lock(rq_src, rq_dest);
  6208. rq_dest->nr_uninterruptible += rq_src->nr_uninterruptible;
  6209. rq_src->nr_uninterruptible = 0;
  6210. double_rq_unlock(rq_src, rq_dest);
  6211. local_irq_restore(flags);
  6212. }
  6213. /* Run through task list and migrate tasks from the dead cpu. */
  6214. static void migrate_live_tasks(int src_cpu)
  6215. {
  6216. struct task_struct *p, *t;
  6217. read_lock(&tasklist_lock);
  6218. do_each_thread(t, p) {
  6219. if (p == current)
  6220. continue;
  6221. if (task_cpu(p) == src_cpu)
  6222. move_task_off_dead_cpu(src_cpu, p);
  6223. } while_each_thread(t, p);
  6224. read_unlock(&tasklist_lock);
  6225. }
  6226. /*
  6227. * Schedules idle task to be the next runnable task on current CPU.
  6228. * It does so by boosting its priority to highest possible.
  6229. * Used by CPU offline code.
  6230. */
  6231. void sched_idle_next(void)
  6232. {
  6233. int this_cpu = smp_processor_id();
  6234. struct rq *rq = cpu_rq(this_cpu);
  6235. struct task_struct *p = rq->idle;
  6236. unsigned long flags;
  6237. /* cpu has to be offline */
  6238. BUG_ON(cpu_online(this_cpu));
  6239. /*
  6240. * Strictly not necessary since rest of the CPUs are stopped by now
  6241. * and interrupts disabled on the current cpu.
  6242. */
  6243. spin_lock_irqsave(&rq->lock, flags);
  6244. __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1);
  6245. update_rq_clock(rq);
  6246. activate_task(rq, p, 0);
  6247. spin_unlock_irqrestore(&rq->lock, flags);
  6248. }
  6249. /*
  6250. * Ensures that the idle task is using init_mm right before its cpu goes
  6251. * offline.
  6252. */
  6253. void idle_task_exit(void)
  6254. {
  6255. struct mm_struct *mm = current->active_mm;
  6256. BUG_ON(cpu_online(smp_processor_id()));
  6257. if (mm != &init_mm)
  6258. switch_mm(mm, &init_mm, current);
  6259. mmdrop(mm);
  6260. }
  6261. /* called under rq->lock with disabled interrupts */
  6262. static void migrate_dead(unsigned int dead_cpu, struct task_struct *p)
  6263. {
  6264. struct rq *rq = cpu_rq(dead_cpu);
  6265. /* Must be exiting, otherwise would be on tasklist. */
  6266. BUG_ON(!p->exit_state);
  6267. /* Cannot have done final schedule yet: would have vanished. */
  6268. BUG_ON(p->state == TASK_DEAD);
  6269. get_task_struct(p);
  6270. /*
  6271. * Drop lock around migration; if someone else moves it,
  6272. * that's OK. No task can be added to this CPU, so iteration is
  6273. * fine.
  6274. */
  6275. spin_unlock_irq(&rq->lock);
  6276. move_task_off_dead_cpu(dead_cpu, p);
  6277. spin_lock_irq(&rq->lock);
  6278. put_task_struct(p);
  6279. }
  6280. /* release_task() removes task from tasklist, so we won't find dead tasks. */
  6281. static void migrate_dead_tasks(unsigned int dead_cpu)
  6282. {
  6283. struct rq *rq = cpu_rq(dead_cpu);
  6284. struct task_struct *next;
  6285. for ( ; ; ) {
  6286. if (!rq->nr_running)
  6287. break;
  6288. update_rq_clock(rq);
  6289. next = pick_next_task(rq);
  6290. if (!next)
  6291. break;
  6292. next->sched_class->put_prev_task(rq, next);
  6293. migrate_dead(dead_cpu, next);
  6294. }
  6295. }
  6296. /*
  6297. * remove the tasks which were accounted by rq from calc_load_tasks.
  6298. */
  6299. static void calc_global_load_remove(struct rq *rq)
  6300. {
  6301. atomic_long_sub(rq->calc_load_active, &calc_load_tasks);
  6302. rq->calc_load_active = 0;
  6303. }
  6304. #endif /* CONFIG_HOTPLUG_CPU */
  6305. #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
  6306. static struct ctl_table sd_ctl_dir[] = {
  6307. {
  6308. .procname = "sched_domain",
  6309. .mode = 0555,
  6310. },
  6311. {0, },
  6312. };
  6313. static struct ctl_table sd_ctl_root[] = {
  6314. {
  6315. .ctl_name = CTL_KERN,
  6316. .procname = "kernel",
  6317. .mode = 0555,
  6318. .child = sd_ctl_dir,
  6319. },
  6320. {0, },
  6321. };
  6322. static struct ctl_table *sd_alloc_ctl_entry(int n)
  6323. {
  6324. struct ctl_table *entry =
  6325. kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
  6326. return entry;
  6327. }
  6328. static void sd_free_ctl_entry(struct ctl_table **tablep)
  6329. {
  6330. struct ctl_table *entry;
  6331. /*
  6332. * In the intermediate directories, both the child directory and
  6333. * procname are dynamically allocated and could fail but the mode
  6334. * will always be set. In the lowest directory the names are
  6335. * static strings and all have proc handlers.
  6336. */
  6337. for (entry = *tablep; entry->mode; entry++) {
  6338. if (entry->child)
  6339. sd_free_ctl_entry(&entry->child);
  6340. if (entry->proc_handler == NULL)
  6341. kfree(entry->procname);
  6342. }
  6343. kfree(*tablep);
  6344. *tablep = NULL;
  6345. }
  6346. static void
  6347. set_table_entry(struct ctl_table *entry,
  6348. const char *procname, void *data, int maxlen,
  6349. mode_t mode, proc_handler *proc_handler)
  6350. {
  6351. entry->procname = procname;
  6352. entry->data = data;
  6353. entry->maxlen = maxlen;
  6354. entry->mode = mode;
  6355. entry->proc_handler = proc_handler;
  6356. }
  6357. static struct ctl_table *
  6358. sd_alloc_ctl_domain_table(struct sched_domain *sd)
  6359. {
  6360. struct ctl_table *table = sd_alloc_ctl_entry(13);
  6361. if (table == NULL)
  6362. return NULL;
  6363. set_table_entry(&table[0], "min_interval", &sd->min_interval,
  6364. sizeof(long), 0644, proc_doulongvec_minmax);
  6365. set_table_entry(&table[1], "max_interval", &sd->max_interval,
  6366. sizeof(long), 0644, proc_doulongvec_minmax);
  6367. set_table_entry(&table[2], "busy_idx", &sd->busy_idx,
  6368. sizeof(int), 0644, proc_dointvec_minmax);
  6369. set_table_entry(&table[3], "idle_idx", &sd->idle_idx,
  6370. sizeof(int), 0644, proc_dointvec_minmax);
  6371. set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx,
  6372. sizeof(int), 0644, proc_dointvec_minmax);
  6373. set_table_entry(&table[5], "wake_idx", &sd->wake_idx,
  6374. sizeof(int), 0644, proc_dointvec_minmax);
  6375. set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx,
  6376. sizeof(int), 0644, proc_dointvec_minmax);
  6377. set_table_entry(&table[7], "busy_factor", &sd->busy_factor,
  6378. sizeof(int), 0644, proc_dointvec_minmax);
  6379. set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct,
  6380. sizeof(int), 0644, proc_dointvec_minmax);
  6381. set_table_entry(&table[9], "cache_nice_tries",
  6382. &sd->cache_nice_tries,
  6383. sizeof(int), 0644, proc_dointvec_minmax);
  6384. set_table_entry(&table[10], "flags", &sd->flags,
  6385. sizeof(int), 0644, proc_dointvec_minmax);
  6386. set_table_entry(&table[11], "name", sd->name,
  6387. CORENAME_MAX_SIZE, 0444, proc_dostring);
  6388. /* &table[12] is terminator */
  6389. return table;
  6390. }
  6391. static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
  6392. {
  6393. struct ctl_table *entry, *table;
  6394. struct sched_domain *sd;
  6395. int domain_num = 0, i;
  6396. char buf[32];
  6397. for_each_domain(cpu, sd)
  6398. domain_num++;
  6399. entry = table = sd_alloc_ctl_entry(domain_num + 1);
  6400. if (table == NULL)
  6401. return NULL;
  6402. i = 0;
  6403. for_each_domain(cpu, sd) {
  6404. snprintf(buf, 32, "domain%d", i);
  6405. entry->procname = kstrdup(buf, GFP_KERNEL);
  6406. entry->mode = 0555;
  6407. entry->child = sd_alloc_ctl_domain_table(sd);
  6408. entry++;
  6409. i++;
  6410. }
  6411. return table;
  6412. }
  6413. static struct ctl_table_header *sd_sysctl_header;
  6414. static void register_sched_domain_sysctl(void)
  6415. {
  6416. int i, cpu_num = num_online_cpus();
  6417. struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
  6418. char buf[32];
  6419. WARN_ON(sd_ctl_dir[0].child);
  6420. sd_ctl_dir[0].child = entry;
  6421. if (entry == NULL)
  6422. return;
  6423. for_each_online_cpu(i) {
  6424. snprintf(buf, 32, "cpu%d", i);
  6425. entry->procname = kstrdup(buf, GFP_KERNEL);
  6426. entry->mode = 0555;
  6427. entry->child = sd_alloc_ctl_cpu_table(i);
  6428. entry++;
  6429. }
  6430. WARN_ON(sd_sysctl_header);
  6431. sd_sysctl_header = register_sysctl_table(sd_ctl_root);
  6432. }
  6433. /* may be called multiple times per register */
  6434. static void unregister_sched_domain_sysctl(void)
  6435. {
  6436. if (sd_sysctl_header)
  6437. unregister_sysctl_table(sd_sysctl_header);
  6438. sd_sysctl_header = NULL;
  6439. if (sd_ctl_dir[0].child)
  6440. sd_free_ctl_entry(&sd_ctl_dir[0].child);
  6441. }
  6442. #else
  6443. static void register_sched_domain_sysctl(void)
  6444. {
  6445. }
  6446. static void unregister_sched_domain_sysctl(void)
  6447. {
  6448. }
  6449. #endif
  6450. static void set_rq_online(struct rq *rq)
  6451. {
  6452. if (!rq->online) {
  6453. const struct sched_class *class;
  6454. cpumask_set_cpu(rq->cpu, rq->rd->online);
  6455. rq->online = 1;
  6456. for_each_class(class) {
  6457. if (class->rq_online)
  6458. class->rq_online(rq);
  6459. }
  6460. }
  6461. }
  6462. static void set_rq_offline(struct rq *rq)
  6463. {
  6464. if (rq->online) {
  6465. const struct sched_class *class;
  6466. for_each_class(class) {
  6467. if (class->rq_offline)
  6468. class->rq_offline(rq);
  6469. }
  6470. cpumask_clear_cpu(rq->cpu, rq->rd->online);
  6471. rq->online = 0;
  6472. }
  6473. }
  6474. /*
  6475. * migration_call - callback that gets triggered when a CPU is added.
  6476. * Here we can start up the necessary migration thread for the new CPU.
  6477. */
  6478. static int __cpuinit
  6479. migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
  6480. {
  6481. struct task_struct *p;
  6482. int cpu = (long)hcpu;
  6483. unsigned long flags;
  6484. struct rq *rq;
  6485. switch (action) {
  6486. case CPU_UP_PREPARE:
  6487. case CPU_UP_PREPARE_FROZEN:
  6488. p = kthread_create(migration_thread, hcpu, "migration/%d", cpu);
  6489. if (IS_ERR(p))
  6490. return NOTIFY_BAD;
  6491. kthread_bind(p, cpu);
  6492. /* Must be high prio: stop_machine expects to yield to it. */
  6493. rq = task_rq_lock(p, &flags);
  6494. __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1);
  6495. task_rq_unlock(rq, &flags);
  6496. get_task_struct(p);
  6497. cpu_rq(cpu)->migration_thread = p;
  6498. rq->calc_load_update = calc_load_update;
  6499. break;
  6500. case CPU_ONLINE:
  6501. case CPU_ONLINE_FROZEN:
  6502. /* Strictly unnecessary, as first user will wake it. */
  6503. wake_up_process(cpu_rq(cpu)->migration_thread);
  6504. /* Update our root-domain */
  6505. rq = cpu_rq(cpu);
  6506. spin_lock_irqsave(&rq->lock, flags);
  6507. if (rq->rd) {
  6508. BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
  6509. set_rq_online(rq);
  6510. }
  6511. spin_unlock_irqrestore(&rq->lock, flags);
  6512. break;
  6513. #ifdef CONFIG_HOTPLUG_CPU
  6514. case CPU_UP_CANCELED:
  6515. case CPU_UP_CANCELED_FROZEN:
  6516. if (!cpu_rq(cpu)->migration_thread)
  6517. break;
  6518. /* Unbind it from offline cpu so it can run. Fall thru. */
  6519. kthread_bind(cpu_rq(cpu)->migration_thread,
  6520. cpumask_any(cpu_online_mask));
  6521. kthread_stop(cpu_rq(cpu)->migration_thread);
  6522. put_task_struct(cpu_rq(cpu)->migration_thread);
  6523. cpu_rq(cpu)->migration_thread = NULL;
  6524. break;
  6525. case CPU_DEAD:
  6526. case CPU_DEAD_FROZEN:
  6527. cpuset_lock(); /* around calls to cpuset_cpus_allowed_lock() */
  6528. migrate_live_tasks(cpu);
  6529. rq = cpu_rq(cpu);
  6530. kthread_stop(rq->migration_thread);
  6531. put_task_struct(rq->migration_thread);
  6532. rq->migration_thread = NULL;
  6533. /* Idle task back to normal (off runqueue, low prio) */
  6534. spin_lock_irq(&rq->lock);
  6535. update_rq_clock(rq);
  6536. deactivate_task(rq, rq->idle, 0);
  6537. rq->idle->static_prio = MAX_PRIO;
  6538. __setscheduler(rq, rq->idle, SCHED_NORMAL, 0);
  6539. rq->idle->sched_class = &idle_sched_class;
  6540. migrate_dead_tasks(cpu);
  6541. spin_unlock_irq(&rq->lock);
  6542. cpuset_unlock();
  6543. migrate_nr_uninterruptible(rq);
  6544. BUG_ON(rq->nr_running != 0);
  6545. calc_global_load_remove(rq);
  6546. /*
  6547. * No need to migrate the tasks: it was best-effort if
  6548. * they didn't take sched_hotcpu_mutex. Just wake up
  6549. * the requestors.
  6550. */
  6551. spin_lock_irq(&rq->lock);
  6552. while (!list_empty(&rq->migration_queue)) {
  6553. struct migration_req *req;
  6554. req = list_entry(rq->migration_queue.next,
  6555. struct migration_req, list);
  6556. list_del_init(&req->list);
  6557. spin_unlock_irq(&rq->lock);
  6558. complete(&req->done);
  6559. spin_lock_irq(&rq->lock);
  6560. }
  6561. spin_unlock_irq(&rq->lock);
  6562. break;
  6563. case CPU_DYING:
  6564. case CPU_DYING_FROZEN:
  6565. /* Update our root-domain */
  6566. rq = cpu_rq(cpu);
  6567. spin_lock_irqsave(&rq->lock, flags);
  6568. if (rq->rd) {
  6569. BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
  6570. set_rq_offline(rq);
  6571. }
  6572. spin_unlock_irqrestore(&rq->lock, flags);
  6573. break;
  6574. #endif
  6575. }
  6576. return NOTIFY_OK;
  6577. }
  6578. /*
  6579. * Register at high priority so that task migration (migrate_all_tasks)
  6580. * happens before everything else. This has to be lower priority than
  6581. * the notifier in the perf_counter subsystem, though.
  6582. */
  6583. static struct notifier_block __cpuinitdata migration_notifier = {
  6584. .notifier_call = migration_call,
  6585. .priority = 10
  6586. };
  6587. static int __init migration_init(void)
  6588. {
  6589. void *cpu = (void *)(long)smp_processor_id();
  6590. int err;
  6591. /* Start one for the boot CPU: */
  6592. err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu);
  6593. BUG_ON(err == NOTIFY_BAD);
  6594. migration_call(&migration_notifier, CPU_ONLINE, cpu);
  6595. register_cpu_notifier(&migration_notifier);
  6596. return 0;
  6597. }
  6598. early_initcall(migration_init);
  6599. #endif
  6600. #ifdef CONFIG_SMP
  6601. #ifdef CONFIG_SCHED_DEBUG
  6602. static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
  6603. struct cpumask *groupmask)
  6604. {
  6605. struct sched_group *group = sd->groups;
  6606. char str[256];
  6607. cpulist_scnprintf(str, sizeof(str), sched_domain_span(sd));
  6608. cpumask_clear(groupmask);
  6609. printk(KERN_DEBUG "%*s domain %d: ", level, "", level);
  6610. if (!(sd->flags & SD_LOAD_BALANCE)) {
  6611. printk("does not load-balance\n");
  6612. if (sd->parent)
  6613. printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain"
  6614. " has parent");
  6615. return -1;
  6616. }
  6617. printk(KERN_CONT "span %s level %s\n", str, sd->name);
  6618. if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
  6619. printk(KERN_ERR "ERROR: domain->span does not contain "
  6620. "CPU%d\n", cpu);
  6621. }
  6622. if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) {
  6623. printk(KERN_ERR "ERROR: domain->groups does not contain"
  6624. " CPU%d\n", cpu);
  6625. }
  6626. printk(KERN_DEBUG "%*s groups:", level + 1, "");
  6627. do {
  6628. if (!group) {
  6629. printk("\n");
  6630. printk(KERN_ERR "ERROR: group is NULL\n");
  6631. break;
  6632. }
  6633. if (!group->__cpu_power) {
  6634. printk(KERN_CONT "\n");
  6635. printk(KERN_ERR "ERROR: domain->cpu_power not "
  6636. "set\n");
  6637. break;
  6638. }
  6639. if (!cpumask_weight(sched_group_cpus(group))) {
  6640. printk(KERN_CONT "\n");
  6641. printk(KERN_ERR "ERROR: empty group\n");
  6642. break;
  6643. }
  6644. if (cpumask_intersects(groupmask, sched_group_cpus(group))) {
  6645. printk(KERN_CONT "\n");
  6646. printk(KERN_ERR "ERROR: repeated CPUs\n");
  6647. break;
  6648. }
  6649. cpumask_or(groupmask, groupmask, sched_group_cpus(group));
  6650. cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group));
  6651. printk(KERN_CONT " %s", str);
  6652. if (group->__cpu_power != SCHED_LOAD_SCALE) {
  6653. printk(KERN_CONT " (__cpu_power = %d)",
  6654. group->__cpu_power);
  6655. }
  6656. group = group->next;
  6657. } while (group != sd->groups);
  6658. printk(KERN_CONT "\n");
  6659. if (!cpumask_equal(sched_domain_span(sd), groupmask))
  6660. printk(KERN_ERR "ERROR: groups don't span domain->span\n");
  6661. if (sd->parent &&
  6662. !cpumask_subset(groupmask, sched_domain_span(sd->parent)))
  6663. printk(KERN_ERR "ERROR: parent span is not a superset "
  6664. "of domain->span\n");
  6665. return 0;
  6666. }
  6667. static void sched_domain_debug(struct sched_domain *sd, int cpu)
  6668. {
  6669. cpumask_var_t groupmask;
  6670. int level = 0;
  6671. if (!sd) {
  6672. printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
  6673. return;
  6674. }
  6675. printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu);
  6676. if (!alloc_cpumask_var(&groupmask, GFP_KERNEL)) {
  6677. printk(KERN_DEBUG "Cannot load-balance (out of memory)\n");
  6678. return;
  6679. }
  6680. for (;;) {
  6681. if (sched_domain_debug_one(sd, cpu, level, groupmask))
  6682. break;
  6683. level++;
  6684. sd = sd->parent;
  6685. if (!sd)
  6686. break;
  6687. }
  6688. free_cpumask_var(groupmask);
  6689. }
  6690. #else /* !CONFIG_SCHED_DEBUG */
  6691. # define sched_domain_debug(sd, cpu) do { } while (0)
  6692. #endif /* CONFIG_SCHED_DEBUG */
  6693. static int sd_degenerate(struct sched_domain *sd)
  6694. {
  6695. if (cpumask_weight(sched_domain_span(sd)) == 1)
  6696. return 1;
  6697. /* Following flags need at least 2 groups */
  6698. if (sd->flags & (SD_LOAD_BALANCE |
  6699. SD_BALANCE_NEWIDLE |
  6700. SD_BALANCE_FORK |
  6701. SD_BALANCE_EXEC |
  6702. SD_SHARE_CPUPOWER |
  6703. SD_SHARE_PKG_RESOURCES)) {
  6704. if (sd->groups != sd->groups->next)
  6705. return 0;
  6706. }
  6707. /* Following flags don't use groups */
  6708. if (sd->flags & (SD_WAKE_IDLE |
  6709. SD_WAKE_AFFINE |
  6710. SD_WAKE_BALANCE))
  6711. return 0;
  6712. return 1;
  6713. }
  6714. static int
  6715. sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
  6716. {
  6717. unsigned long cflags = sd->flags, pflags = parent->flags;
  6718. if (sd_degenerate(parent))
  6719. return 1;
  6720. if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent)))
  6721. return 0;
  6722. /* Does parent contain flags not in child? */
  6723. /* WAKE_BALANCE is a subset of WAKE_AFFINE */
  6724. if (cflags & SD_WAKE_AFFINE)
  6725. pflags &= ~SD_WAKE_BALANCE;
  6726. /* Flags needing groups don't count if only 1 group in parent */
  6727. if (parent->groups == parent->groups->next) {
  6728. pflags &= ~(SD_LOAD_BALANCE |
  6729. SD_BALANCE_NEWIDLE |
  6730. SD_BALANCE_FORK |
  6731. SD_BALANCE_EXEC |
  6732. SD_SHARE_CPUPOWER |
  6733. SD_SHARE_PKG_RESOURCES);
  6734. if (nr_node_ids == 1)
  6735. pflags &= ~SD_SERIALIZE;
  6736. }
  6737. if (~cflags & pflags)
  6738. return 0;
  6739. return 1;
  6740. }
  6741. static void free_rootdomain(struct root_domain *rd)
  6742. {
  6743. cpupri_cleanup(&rd->cpupri);
  6744. free_cpumask_var(rd->rto_mask);
  6745. free_cpumask_var(rd->online);
  6746. free_cpumask_var(rd->span);
  6747. kfree(rd);
  6748. }
  6749. static void rq_attach_root(struct rq *rq, struct root_domain *rd)
  6750. {
  6751. struct root_domain *old_rd = NULL;
  6752. unsigned long flags;
  6753. spin_lock_irqsave(&rq->lock, flags);
  6754. if (rq->rd) {
  6755. old_rd = rq->rd;
  6756. if (cpumask_test_cpu(rq->cpu, old_rd->online))
  6757. set_rq_offline(rq);
  6758. cpumask_clear_cpu(rq->cpu, old_rd->span);
  6759. /*
  6760. * If we dont want to free the old_rt yet then
  6761. * set old_rd to NULL to skip the freeing later
  6762. * in this function:
  6763. */
  6764. if (!atomic_dec_and_test(&old_rd->refcount))
  6765. old_rd = NULL;
  6766. }
  6767. atomic_inc(&rd->refcount);
  6768. rq->rd = rd;
  6769. cpumask_set_cpu(rq->cpu, rd->span);
  6770. if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
  6771. set_rq_online(rq);
  6772. spin_unlock_irqrestore(&rq->lock, flags);
  6773. if (old_rd)
  6774. free_rootdomain(old_rd);
  6775. }
  6776. static int init_rootdomain(struct root_domain *rd, bool bootmem)
  6777. {
  6778. gfp_t gfp = GFP_KERNEL;
  6779. memset(rd, 0, sizeof(*rd));
  6780. if (bootmem)
  6781. gfp = GFP_NOWAIT;
  6782. if (!alloc_cpumask_var(&rd->span, gfp))
  6783. goto out;
  6784. if (!alloc_cpumask_var(&rd->online, gfp))
  6785. goto free_span;
  6786. if (!alloc_cpumask_var(&rd->rto_mask, gfp))
  6787. goto free_online;
  6788. if (cpupri_init(&rd->cpupri, bootmem) != 0)
  6789. goto free_rto_mask;
  6790. return 0;
  6791. free_rto_mask:
  6792. free_cpumask_var(rd->rto_mask);
  6793. free_online:
  6794. free_cpumask_var(rd->online);
  6795. free_span:
  6796. free_cpumask_var(rd->span);
  6797. out:
  6798. return -ENOMEM;
  6799. }
  6800. static void init_defrootdomain(void)
  6801. {
  6802. init_rootdomain(&def_root_domain, true);
  6803. atomic_set(&def_root_domain.refcount, 1);
  6804. }
  6805. static struct root_domain *alloc_rootdomain(void)
  6806. {
  6807. struct root_domain *rd;
  6808. rd = kmalloc(sizeof(*rd), GFP_KERNEL);
  6809. if (!rd)
  6810. return NULL;
  6811. if (init_rootdomain(rd, false) != 0) {
  6812. kfree(rd);
  6813. return NULL;
  6814. }
  6815. return rd;
  6816. }
  6817. /*
  6818. * Attach the domain 'sd' to 'cpu' as its base domain. Callers must
  6819. * hold the hotplug lock.
  6820. */
  6821. static void
  6822. cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
  6823. {
  6824. struct rq *rq = cpu_rq(cpu);
  6825. struct sched_domain *tmp;
  6826. /* Remove the sched domains which do not contribute to scheduling. */
  6827. for (tmp = sd; tmp; ) {
  6828. struct sched_domain *parent = tmp->parent;
  6829. if (!parent)
  6830. break;
  6831. if (sd_parent_degenerate(tmp, parent)) {
  6832. tmp->parent = parent->parent;
  6833. if (parent->parent)
  6834. parent->parent->child = tmp;
  6835. } else
  6836. tmp = tmp->parent;
  6837. }
  6838. if (sd && sd_degenerate(sd)) {
  6839. sd = sd->parent;
  6840. if (sd)
  6841. sd->child = NULL;
  6842. }
  6843. sched_domain_debug(sd, cpu);
  6844. rq_attach_root(rq, rd);
  6845. rcu_assign_pointer(rq->sd, sd);
  6846. }
  6847. /* cpus with isolated domains */
  6848. static cpumask_var_t cpu_isolated_map;
  6849. /* Setup the mask of cpus configured for isolated domains */
  6850. static int __init isolated_cpu_setup(char *str)
  6851. {
  6852. cpulist_parse(str, cpu_isolated_map);
  6853. return 1;
  6854. }
  6855. __setup("isolcpus=", isolated_cpu_setup);
  6856. /*
  6857. * init_sched_build_groups takes the cpumask we wish to span, and a pointer
  6858. * to a function which identifies what group(along with sched group) a CPU
  6859. * belongs to. The return value of group_fn must be a >= 0 and < nr_cpu_ids
  6860. * (due to the fact that we keep track of groups covered with a struct cpumask).
  6861. *
  6862. * init_sched_build_groups will build a circular linked list of the groups
  6863. * covered by the given span, and will set each group's ->cpumask correctly,
  6864. * and ->cpu_power to 0.
  6865. */
  6866. static void
  6867. init_sched_build_groups(const struct cpumask *span,
  6868. const struct cpumask *cpu_map,
  6869. int (*group_fn)(int cpu, const struct cpumask *cpu_map,
  6870. struct sched_group **sg,
  6871. struct cpumask *tmpmask),
  6872. struct cpumask *covered, struct cpumask *tmpmask)
  6873. {
  6874. struct sched_group *first = NULL, *last = NULL;
  6875. int i;
  6876. cpumask_clear(covered);
  6877. for_each_cpu(i, span) {
  6878. struct sched_group *sg;
  6879. int group = group_fn(i, cpu_map, &sg, tmpmask);
  6880. int j;
  6881. if (cpumask_test_cpu(i, covered))
  6882. continue;
  6883. cpumask_clear(sched_group_cpus(sg));
  6884. sg->__cpu_power = 0;
  6885. for_each_cpu(j, span) {
  6886. if (group_fn(j, cpu_map, NULL, tmpmask) != group)
  6887. continue;
  6888. cpumask_set_cpu(j, covered);
  6889. cpumask_set_cpu(j, sched_group_cpus(sg));
  6890. }
  6891. if (!first)
  6892. first = sg;
  6893. if (last)
  6894. last->next = sg;
  6895. last = sg;
  6896. }
  6897. last->next = first;
  6898. }
  6899. #define SD_NODES_PER_DOMAIN 16
  6900. #ifdef CONFIG_NUMA
  6901. /**
  6902. * find_next_best_node - find the next node to include in a sched_domain
  6903. * @node: node whose sched_domain we're building
  6904. * @used_nodes: nodes already in the sched_domain
  6905. *
  6906. * Find the next node to include in a given scheduling domain. Simply
  6907. * finds the closest node not already in the @used_nodes map.
  6908. *
  6909. * Should use nodemask_t.
  6910. */
  6911. static int find_next_best_node(int node, nodemask_t *used_nodes)
  6912. {
  6913. int i, n, val, min_val, best_node = 0;
  6914. min_val = INT_MAX;
  6915. for (i = 0; i < nr_node_ids; i++) {
  6916. /* Start at @node */
  6917. n = (node + i) % nr_node_ids;
  6918. if (!nr_cpus_node(n))
  6919. continue;
  6920. /* Skip already used nodes */
  6921. if (node_isset(n, *used_nodes))
  6922. continue;
  6923. /* Simple min distance search */
  6924. val = node_distance(node, n);
  6925. if (val < min_val) {
  6926. min_val = val;
  6927. best_node = n;
  6928. }
  6929. }
  6930. node_set(best_node, *used_nodes);
  6931. return best_node;
  6932. }
  6933. /**
  6934. * sched_domain_node_span - get a cpumask for a node's sched_domain
  6935. * @node: node whose cpumask we're constructing
  6936. * @span: resulting cpumask
  6937. *
  6938. * Given a node, construct a good cpumask for its sched_domain to span. It
  6939. * should be one that prevents unnecessary balancing, but also spreads tasks
  6940. * out optimally.
  6941. */
  6942. static void sched_domain_node_span(int node, struct cpumask *span)
  6943. {
  6944. nodemask_t used_nodes;
  6945. int i;
  6946. cpumask_clear(span);
  6947. nodes_clear(used_nodes);
  6948. cpumask_or(span, span, cpumask_of_node(node));
  6949. node_set(node, used_nodes);
  6950. for (i = 1; i < SD_NODES_PER_DOMAIN; i++) {
  6951. int next_node = find_next_best_node(node, &used_nodes);
  6952. cpumask_or(span, span, cpumask_of_node(next_node));
  6953. }
  6954. }
  6955. #endif /* CONFIG_NUMA */
  6956. int sched_smt_power_savings = 0, sched_mc_power_savings = 0;
  6957. /*
  6958. * The cpus mask in sched_group and sched_domain hangs off the end.
  6959. *
  6960. * ( See the the comments in include/linux/sched.h:struct sched_group
  6961. * and struct sched_domain. )
  6962. */
  6963. struct static_sched_group {
  6964. struct sched_group sg;
  6965. DECLARE_BITMAP(cpus, CONFIG_NR_CPUS);
  6966. };
  6967. struct static_sched_domain {
  6968. struct sched_domain sd;
  6969. DECLARE_BITMAP(span, CONFIG_NR_CPUS);
  6970. };
  6971. /*
  6972. * SMT sched-domains:
  6973. */
  6974. #ifdef CONFIG_SCHED_SMT
  6975. static DEFINE_PER_CPU(struct static_sched_domain, cpu_domains);
  6976. static DEFINE_PER_CPU(struct static_sched_group, sched_group_cpus);
  6977. static int
  6978. cpu_to_cpu_group(int cpu, const struct cpumask *cpu_map,
  6979. struct sched_group **sg, struct cpumask *unused)
  6980. {
  6981. if (sg)
  6982. *sg = &per_cpu(sched_group_cpus, cpu).sg;
  6983. return cpu;
  6984. }
  6985. #endif /* CONFIG_SCHED_SMT */
  6986. /*
  6987. * multi-core sched-domains:
  6988. */
  6989. #ifdef CONFIG_SCHED_MC
  6990. static DEFINE_PER_CPU(struct static_sched_domain, core_domains);
  6991. static DEFINE_PER_CPU(struct static_sched_group, sched_group_core);
  6992. #endif /* CONFIG_SCHED_MC */
  6993. #if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT)
  6994. static int
  6995. cpu_to_core_group(int cpu, const struct cpumask *cpu_map,
  6996. struct sched_group **sg, struct cpumask *mask)
  6997. {
  6998. int group;
  6999. cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map);
  7000. group = cpumask_first(mask);
  7001. if (sg)
  7002. *sg = &per_cpu(sched_group_core, group).sg;
  7003. return group;
  7004. }
  7005. #elif defined(CONFIG_SCHED_MC)
  7006. static int
  7007. cpu_to_core_group(int cpu, const struct cpumask *cpu_map,
  7008. struct sched_group **sg, struct cpumask *unused)
  7009. {
  7010. if (sg)
  7011. *sg = &per_cpu(sched_group_core, cpu).sg;
  7012. return cpu;
  7013. }
  7014. #endif
  7015. static DEFINE_PER_CPU(struct static_sched_domain, phys_domains);
  7016. static DEFINE_PER_CPU(struct static_sched_group, sched_group_phys);
  7017. static int
  7018. cpu_to_phys_group(int cpu, const struct cpumask *cpu_map,
  7019. struct sched_group **sg, struct cpumask *mask)
  7020. {
  7021. int group;
  7022. #ifdef CONFIG_SCHED_MC
  7023. cpumask_and(mask, cpu_coregroup_mask(cpu), cpu_map);
  7024. group = cpumask_first(mask);
  7025. #elif defined(CONFIG_SCHED_SMT)
  7026. cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map);
  7027. group = cpumask_first(mask);
  7028. #else
  7029. group = cpu;
  7030. #endif
  7031. if (sg)
  7032. *sg = &per_cpu(sched_group_phys, group).sg;
  7033. return group;
  7034. }
  7035. #ifdef CONFIG_NUMA
  7036. /*
  7037. * The init_sched_build_groups can't handle what we want to do with node
  7038. * groups, so roll our own. Now each node has its own list of groups which
  7039. * gets dynamically allocated.
  7040. */
  7041. static DEFINE_PER_CPU(struct static_sched_domain, node_domains);
  7042. static struct sched_group ***sched_group_nodes_bycpu;
  7043. static DEFINE_PER_CPU(struct static_sched_domain, allnodes_domains);
  7044. static DEFINE_PER_CPU(struct static_sched_group, sched_group_allnodes);
  7045. static int cpu_to_allnodes_group(int cpu, const struct cpumask *cpu_map,
  7046. struct sched_group **sg,
  7047. struct cpumask *nodemask)
  7048. {
  7049. int group;
  7050. cpumask_and(nodemask, cpumask_of_node(cpu_to_node(cpu)), cpu_map);
  7051. group = cpumask_first(nodemask);
  7052. if (sg)
  7053. *sg = &per_cpu(sched_group_allnodes, group).sg;
  7054. return group;
  7055. }
  7056. static void init_numa_sched_groups_power(struct sched_group *group_head)
  7057. {
  7058. struct sched_group *sg = group_head;
  7059. int j;
  7060. if (!sg)
  7061. return;
  7062. do {
  7063. for_each_cpu(j, sched_group_cpus(sg)) {
  7064. struct sched_domain *sd;
  7065. sd = &per_cpu(phys_domains, j).sd;
  7066. if (j != group_first_cpu(sd->groups)) {
  7067. /*
  7068. * Only add "power" once for each
  7069. * physical package.
  7070. */
  7071. continue;
  7072. }
  7073. sg_inc_cpu_power(sg, sd->groups->__cpu_power);
  7074. }
  7075. sg = sg->next;
  7076. } while (sg != group_head);
  7077. }
  7078. #endif /* CONFIG_NUMA */
  7079. #ifdef CONFIG_NUMA
  7080. /* Free memory allocated for various sched_group structures */
  7081. static void free_sched_groups(const struct cpumask *cpu_map,
  7082. struct cpumask *nodemask)
  7083. {
  7084. int cpu, i;
  7085. for_each_cpu(cpu, cpu_map) {
  7086. struct sched_group **sched_group_nodes
  7087. = sched_group_nodes_bycpu[cpu];
  7088. if (!sched_group_nodes)
  7089. continue;
  7090. for (i = 0; i < nr_node_ids; i++) {
  7091. struct sched_group *oldsg, *sg = sched_group_nodes[i];
  7092. cpumask_and(nodemask, cpumask_of_node(i), cpu_map);
  7093. if (cpumask_empty(nodemask))
  7094. continue;
  7095. if (sg == NULL)
  7096. continue;
  7097. sg = sg->next;
  7098. next_sg:
  7099. oldsg = sg;
  7100. sg = sg->next;
  7101. kfree(oldsg);
  7102. if (oldsg != sched_group_nodes[i])
  7103. goto next_sg;
  7104. }
  7105. kfree(sched_group_nodes);
  7106. sched_group_nodes_bycpu[cpu] = NULL;
  7107. }
  7108. }
  7109. #else /* !CONFIG_NUMA */
  7110. static void free_sched_groups(const struct cpumask *cpu_map,
  7111. struct cpumask *nodemask)
  7112. {
  7113. }
  7114. #endif /* CONFIG_NUMA */
  7115. /*
  7116. * Initialize sched groups cpu_power.
  7117. *
  7118. * cpu_power indicates the capacity of sched group, which is used while
  7119. * distributing the load between different sched groups in a sched domain.
  7120. * Typically cpu_power for all the groups in a sched domain will be same unless
  7121. * there are asymmetries in the topology. If there are asymmetries, group
  7122. * having more cpu_power will pickup more load compared to the group having
  7123. * less cpu_power.
  7124. *
  7125. * cpu_power will be a multiple of SCHED_LOAD_SCALE. This multiple represents
  7126. * the maximum number of tasks a group can handle in the presence of other idle
  7127. * or lightly loaded groups in the same sched domain.
  7128. */
  7129. static void init_sched_groups_power(int cpu, struct sched_domain *sd)
  7130. {
  7131. struct sched_domain *child;
  7132. struct sched_group *group;
  7133. WARN_ON(!sd || !sd->groups);
  7134. if (cpu != group_first_cpu(sd->groups))
  7135. return;
  7136. child = sd->child;
  7137. sd->groups->__cpu_power = 0;
  7138. /*
  7139. * For perf policy, if the groups in child domain share resources
  7140. * (for example cores sharing some portions of the cache hierarchy
  7141. * or SMT), then set this domain groups cpu_power such that each group
  7142. * can handle only one task, when there are other idle groups in the
  7143. * same sched domain.
  7144. */
  7145. if (!child || (!(sd->flags & SD_POWERSAVINGS_BALANCE) &&
  7146. (child->flags &
  7147. (SD_SHARE_CPUPOWER | SD_SHARE_PKG_RESOURCES)))) {
  7148. sg_inc_cpu_power(sd->groups, SCHED_LOAD_SCALE);
  7149. return;
  7150. }
  7151. /*
  7152. * add cpu_power of each child group to this groups cpu_power
  7153. */
  7154. group = child->groups;
  7155. do {
  7156. sg_inc_cpu_power(sd->groups, group->__cpu_power);
  7157. group = group->next;
  7158. } while (group != child->groups);
  7159. }
  7160. /*
  7161. * Initializers for schedule domains
  7162. * Non-inlined to reduce accumulated stack pressure in build_sched_domains()
  7163. */
  7164. #ifdef CONFIG_SCHED_DEBUG
  7165. # define SD_INIT_NAME(sd, type) sd->name = #type
  7166. #else
  7167. # define SD_INIT_NAME(sd, type) do { } while (0)
  7168. #endif
  7169. #define SD_INIT(sd, type) sd_init_##type(sd)
  7170. #define SD_INIT_FUNC(type) \
  7171. static noinline void sd_init_##type(struct sched_domain *sd) \
  7172. { \
  7173. memset(sd, 0, sizeof(*sd)); \
  7174. *sd = SD_##type##_INIT; \
  7175. sd->level = SD_LV_##type; \
  7176. SD_INIT_NAME(sd, type); \
  7177. }
  7178. SD_INIT_FUNC(CPU)
  7179. #ifdef CONFIG_NUMA
  7180. SD_INIT_FUNC(ALLNODES)
  7181. SD_INIT_FUNC(NODE)
  7182. #endif
  7183. #ifdef CONFIG_SCHED_SMT
  7184. SD_INIT_FUNC(SIBLING)
  7185. #endif
  7186. #ifdef CONFIG_SCHED_MC
  7187. SD_INIT_FUNC(MC)
  7188. #endif
  7189. static int default_relax_domain_level = -1;
  7190. static int __init setup_relax_domain_level(char *str)
  7191. {
  7192. unsigned long val;
  7193. val = simple_strtoul(str, NULL, 0);
  7194. if (val < SD_LV_MAX)
  7195. default_relax_domain_level = val;
  7196. return 1;
  7197. }
  7198. __setup("relax_domain_level=", setup_relax_domain_level);
  7199. static void set_domain_attribute(struct sched_domain *sd,
  7200. struct sched_domain_attr *attr)
  7201. {
  7202. int request;
  7203. if (!attr || attr->relax_domain_level < 0) {
  7204. if (default_relax_domain_level < 0)
  7205. return;
  7206. else
  7207. request = default_relax_domain_level;
  7208. } else
  7209. request = attr->relax_domain_level;
  7210. if (request < sd->level) {
  7211. /* turn off idle balance on this domain */
  7212. sd->flags &= ~(SD_WAKE_IDLE|SD_BALANCE_NEWIDLE);
  7213. } else {
  7214. /* turn on idle balance on this domain */
  7215. sd->flags |= (SD_WAKE_IDLE_FAR|SD_BALANCE_NEWIDLE);
  7216. }
  7217. }
  7218. /*
  7219. * Build sched domains for a given set of cpus and attach the sched domains
  7220. * to the individual cpus
  7221. */
  7222. static int __build_sched_domains(const struct cpumask *cpu_map,
  7223. struct sched_domain_attr *attr)
  7224. {
  7225. int i, err = -ENOMEM;
  7226. struct root_domain *rd;
  7227. cpumask_var_t nodemask, this_sibling_map, this_core_map, send_covered,
  7228. tmpmask;
  7229. #ifdef CONFIG_NUMA
  7230. cpumask_var_t domainspan, covered, notcovered;
  7231. struct sched_group **sched_group_nodes = NULL;
  7232. int sd_allnodes = 0;
  7233. if (!alloc_cpumask_var(&domainspan, GFP_KERNEL))
  7234. goto out;
  7235. if (!alloc_cpumask_var(&covered, GFP_KERNEL))
  7236. goto free_domainspan;
  7237. if (!alloc_cpumask_var(&notcovered, GFP_KERNEL))
  7238. goto free_covered;
  7239. #endif
  7240. if (!alloc_cpumask_var(&nodemask, GFP_KERNEL))
  7241. goto free_notcovered;
  7242. if (!alloc_cpumask_var(&this_sibling_map, GFP_KERNEL))
  7243. goto free_nodemask;
  7244. if (!alloc_cpumask_var(&this_core_map, GFP_KERNEL))
  7245. goto free_this_sibling_map;
  7246. if (!alloc_cpumask_var(&send_covered, GFP_KERNEL))
  7247. goto free_this_core_map;
  7248. if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL))
  7249. goto free_send_covered;
  7250. #ifdef CONFIG_NUMA
  7251. /*
  7252. * Allocate the per-node list of sched groups
  7253. */
  7254. sched_group_nodes = kcalloc(nr_node_ids, sizeof(struct sched_group *),
  7255. GFP_KERNEL);
  7256. if (!sched_group_nodes) {
  7257. printk(KERN_WARNING "Can not alloc sched group node list\n");
  7258. goto free_tmpmask;
  7259. }
  7260. #endif
  7261. rd = alloc_rootdomain();
  7262. if (!rd) {
  7263. printk(KERN_WARNING "Cannot alloc root domain\n");
  7264. goto free_sched_groups;
  7265. }
  7266. #ifdef CONFIG_NUMA
  7267. sched_group_nodes_bycpu[cpumask_first(cpu_map)] = sched_group_nodes;
  7268. #endif
  7269. /*
  7270. * Set up domains for cpus specified by the cpu_map.
  7271. */
  7272. for_each_cpu(i, cpu_map) {
  7273. struct sched_domain *sd = NULL, *p;
  7274. cpumask_and(nodemask, cpumask_of_node(cpu_to_node(i)), cpu_map);
  7275. #ifdef CONFIG_NUMA
  7276. if (cpumask_weight(cpu_map) >
  7277. SD_NODES_PER_DOMAIN*cpumask_weight(nodemask)) {
  7278. sd = &per_cpu(allnodes_domains, i).sd;
  7279. SD_INIT(sd, ALLNODES);
  7280. set_domain_attribute(sd, attr);
  7281. cpumask_copy(sched_domain_span(sd), cpu_map);
  7282. cpu_to_allnodes_group(i, cpu_map, &sd->groups, tmpmask);
  7283. p = sd;
  7284. sd_allnodes = 1;
  7285. } else
  7286. p = NULL;
  7287. sd = &per_cpu(node_domains, i).sd;
  7288. SD_INIT(sd, NODE);
  7289. set_domain_attribute(sd, attr);
  7290. sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd));
  7291. sd->parent = p;
  7292. if (p)
  7293. p->child = sd;
  7294. cpumask_and(sched_domain_span(sd),
  7295. sched_domain_span(sd), cpu_map);
  7296. #endif
  7297. p = sd;
  7298. sd = &per_cpu(phys_domains, i).sd;
  7299. SD_INIT(sd, CPU);
  7300. set_domain_attribute(sd, attr);
  7301. cpumask_copy(sched_domain_span(sd), nodemask);
  7302. sd->parent = p;
  7303. if (p)
  7304. p->child = sd;
  7305. cpu_to_phys_group(i, cpu_map, &sd->groups, tmpmask);
  7306. #ifdef CONFIG_SCHED_MC
  7307. p = sd;
  7308. sd = &per_cpu(core_domains, i).sd;
  7309. SD_INIT(sd, MC);
  7310. set_domain_attribute(sd, attr);
  7311. cpumask_and(sched_domain_span(sd), cpu_map,
  7312. cpu_coregroup_mask(i));
  7313. sd->parent = p;
  7314. p->child = sd;
  7315. cpu_to_core_group(i, cpu_map, &sd->groups, tmpmask);
  7316. #endif
  7317. #ifdef CONFIG_SCHED_SMT
  7318. p = sd;
  7319. sd = &per_cpu(cpu_domains, i).sd;
  7320. SD_INIT(sd, SIBLING);
  7321. set_domain_attribute(sd, attr);
  7322. cpumask_and(sched_domain_span(sd),
  7323. topology_thread_cpumask(i), cpu_map);
  7324. sd->parent = p;
  7325. p->child = sd;
  7326. cpu_to_cpu_group(i, cpu_map, &sd->groups, tmpmask);
  7327. #endif
  7328. }
  7329. #ifdef CONFIG_SCHED_SMT
  7330. /* Set up CPU (sibling) groups */
  7331. for_each_cpu(i, cpu_map) {
  7332. cpumask_and(this_sibling_map,
  7333. topology_thread_cpumask(i), cpu_map);
  7334. if (i != cpumask_first(this_sibling_map))
  7335. continue;
  7336. init_sched_build_groups(this_sibling_map, cpu_map,
  7337. &cpu_to_cpu_group,
  7338. send_covered, tmpmask);
  7339. }
  7340. #endif
  7341. #ifdef CONFIG_SCHED_MC
  7342. /* Set up multi-core groups */
  7343. for_each_cpu(i, cpu_map) {
  7344. cpumask_and(this_core_map, cpu_coregroup_mask(i), cpu_map);
  7345. if (i != cpumask_first(this_core_map))
  7346. continue;
  7347. init_sched_build_groups(this_core_map, cpu_map,
  7348. &cpu_to_core_group,
  7349. send_covered, tmpmask);
  7350. }
  7351. #endif
  7352. /* Set up physical groups */
  7353. for (i = 0; i < nr_node_ids; i++) {
  7354. cpumask_and(nodemask, cpumask_of_node(i), cpu_map);
  7355. if (cpumask_empty(nodemask))
  7356. continue;
  7357. init_sched_build_groups(nodemask, cpu_map,
  7358. &cpu_to_phys_group,
  7359. send_covered, tmpmask);
  7360. }
  7361. #ifdef CONFIG_NUMA
  7362. /* Set up node groups */
  7363. if (sd_allnodes) {
  7364. init_sched_build_groups(cpu_map, cpu_map,
  7365. &cpu_to_allnodes_group,
  7366. send_covered, tmpmask);
  7367. }
  7368. for (i = 0; i < nr_node_ids; i++) {
  7369. /* Set up node groups */
  7370. struct sched_group *sg, *prev;
  7371. int j;
  7372. cpumask_clear(covered);
  7373. cpumask_and(nodemask, cpumask_of_node(i), cpu_map);
  7374. if (cpumask_empty(nodemask)) {
  7375. sched_group_nodes[i] = NULL;
  7376. continue;
  7377. }
  7378. sched_domain_node_span(i, domainspan);
  7379. cpumask_and(domainspan, domainspan, cpu_map);
  7380. sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(),
  7381. GFP_KERNEL, i);
  7382. if (!sg) {
  7383. printk(KERN_WARNING "Can not alloc domain group for "
  7384. "node %d\n", i);
  7385. goto error;
  7386. }
  7387. sched_group_nodes[i] = sg;
  7388. for_each_cpu(j, nodemask) {
  7389. struct sched_domain *sd;
  7390. sd = &per_cpu(node_domains, j).sd;
  7391. sd->groups = sg;
  7392. }
  7393. sg->__cpu_power = 0;
  7394. cpumask_copy(sched_group_cpus(sg), nodemask);
  7395. sg->next = sg;
  7396. cpumask_or(covered, covered, nodemask);
  7397. prev = sg;
  7398. for (j = 0; j < nr_node_ids; j++) {
  7399. int n = (i + j) % nr_node_ids;
  7400. cpumask_complement(notcovered, covered);
  7401. cpumask_and(tmpmask, notcovered, cpu_map);
  7402. cpumask_and(tmpmask, tmpmask, domainspan);
  7403. if (cpumask_empty(tmpmask))
  7404. break;
  7405. cpumask_and(tmpmask, tmpmask, cpumask_of_node(n));
  7406. if (cpumask_empty(tmpmask))
  7407. continue;
  7408. sg = kmalloc_node(sizeof(struct sched_group) +
  7409. cpumask_size(),
  7410. GFP_KERNEL, i);
  7411. if (!sg) {
  7412. printk(KERN_WARNING
  7413. "Can not alloc domain group for node %d\n", j);
  7414. goto error;
  7415. }
  7416. sg->__cpu_power = 0;
  7417. cpumask_copy(sched_group_cpus(sg), tmpmask);
  7418. sg->next = prev->next;
  7419. cpumask_or(covered, covered, tmpmask);
  7420. prev->next = sg;
  7421. prev = sg;
  7422. }
  7423. }
  7424. #endif
  7425. /* Calculate CPU power for physical packages and nodes */
  7426. #ifdef CONFIG_SCHED_SMT
  7427. for_each_cpu(i, cpu_map) {
  7428. struct sched_domain *sd = &per_cpu(cpu_domains, i).sd;
  7429. init_sched_groups_power(i, sd);
  7430. }
  7431. #endif
  7432. #ifdef CONFIG_SCHED_MC
  7433. for_each_cpu(i, cpu_map) {
  7434. struct sched_domain *sd = &per_cpu(core_domains, i).sd;
  7435. init_sched_groups_power(i, sd);
  7436. }
  7437. #endif
  7438. for_each_cpu(i, cpu_map) {
  7439. struct sched_domain *sd = &per_cpu(phys_domains, i).sd;
  7440. init_sched_groups_power(i, sd);
  7441. }
  7442. #ifdef CONFIG_NUMA
  7443. for (i = 0; i < nr_node_ids; i++)
  7444. init_numa_sched_groups_power(sched_group_nodes[i]);
  7445. if (sd_allnodes) {
  7446. struct sched_group *sg;
  7447. cpu_to_allnodes_group(cpumask_first(cpu_map), cpu_map, &sg,
  7448. tmpmask);
  7449. init_numa_sched_groups_power(sg);
  7450. }
  7451. #endif
  7452. /* Attach the domains */
  7453. for_each_cpu(i, cpu_map) {
  7454. struct sched_domain *sd;
  7455. #ifdef CONFIG_SCHED_SMT
  7456. sd = &per_cpu(cpu_domains, i).sd;
  7457. #elif defined(CONFIG_SCHED_MC)
  7458. sd = &per_cpu(core_domains, i).sd;
  7459. #else
  7460. sd = &per_cpu(phys_domains, i).sd;
  7461. #endif
  7462. cpu_attach_domain(sd, rd, i);
  7463. }
  7464. err = 0;
  7465. free_tmpmask:
  7466. free_cpumask_var(tmpmask);
  7467. free_send_covered:
  7468. free_cpumask_var(send_covered);
  7469. free_this_core_map:
  7470. free_cpumask_var(this_core_map);
  7471. free_this_sibling_map:
  7472. free_cpumask_var(this_sibling_map);
  7473. free_nodemask:
  7474. free_cpumask_var(nodemask);
  7475. free_notcovered:
  7476. #ifdef CONFIG_NUMA
  7477. free_cpumask_var(notcovered);
  7478. free_covered:
  7479. free_cpumask_var(covered);
  7480. free_domainspan:
  7481. free_cpumask_var(domainspan);
  7482. out:
  7483. #endif
  7484. return err;
  7485. free_sched_groups:
  7486. #ifdef CONFIG_NUMA
  7487. kfree(sched_group_nodes);
  7488. #endif
  7489. goto free_tmpmask;
  7490. #ifdef CONFIG_NUMA
  7491. error:
  7492. free_sched_groups(cpu_map, tmpmask);
  7493. free_rootdomain(rd);
  7494. goto free_tmpmask;
  7495. #endif
  7496. }
  7497. static int build_sched_domains(const struct cpumask *cpu_map)
  7498. {
  7499. return __build_sched_domains(cpu_map, NULL);
  7500. }
  7501. static struct cpumask *doms_cur; /* current sched domains */
  7502. static int ndoms_cur; /* number of sched domains in 'doms_cur' */
  7503. static struct sched_domain_attr *dattr_cur;
  7504. /* attribues of custom domains in 'doms_cur' */
  7505. /*
  7506. * Special case: If a kmalloc of a doms_cur partition (array of
  7507. * cpumask) fails, then fallback to a single sched domain,
  7508. * as determined by the single cpumask fallback_doms.
  7509. */
  7510. static cpumask_var_t fallback_doms;
  7511. /*
  7512. * arch_update_cpu_topology lets virtualized architectures update the
  7513. * cpu core maps. It is supposed to return 1 if the topology changed
  7514. * or 0 if it stayed the same.
  7515. */
  7516. int __attribute__((weak)) arch_update_cpu_topology(void)
  7517. {
  7518. return 0;
  7519. }
  7520. /*
  7521. * Set up scheduler domains and groups. Callers must hold the hotplug lock.
  7522. * For now this just excludes isolated cpus, but could be used to
  7523. * exclude other special cases in the future.
  7524. */
  7525. static int arch_init_sched_domains(const struct cpumask *cpu_map)
  7526. {
  7527. int err;
  7528. arch_update_cpu_topology();
  7529. ndoms_cur = 1;
  7530. doms_cur = kmalloc(cpumask_size(), GFP_KERNEL);
  7531. if (!doms_cur)
  7532. doms_cur = fallback_doms;
  7533. cpumask_andnot(doms_cur, cpu_map, cpu_isolated_map);
  7534. dattr_cur = NULL;
  7535. err = build_sched_domains(doms_cur);
  7536. register_sched_domain_sysctl();
  7537. return err;
  7538. }
  7539. static void arch_destroy_sched_domains(const struct cpumask *cpu_map,
  7540. struct cpumask *tmpmask)
  7541. {
  7542. free_sched_groups(cpu_map, tmpmask);
  7543. }
  7544. /*
  7545. * Detach sched domains from a group of cpus specified in cpu_map
  7546. * These cpus will now be attached to the NULL domain
  7547. */
  7548. static void detach_destroy_domains(const struct cpumask *cpu_map)
  7549. {
  7550. /* Save because hotplug lock held. */
  7551. static DECLARE_BITMAP(tmpmask, CONFIG_NR_CPUS);
  7552. int i;
  7553. for_each_cpu(i, cpu_map)
  7554. cpu_attach_domain(NULL, &def_root_domain, i);
  7555. synchronize_sched();
  7556. arch_destroy_sched_domains(cpu_map, to_cpumask(tmpmask));
  7557. }
  7558. /* handle null as "default" */
  7559. static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
  7560. struct sched_domain_attr *new, int idx_new)
  7561. {
  7562. struct sched_domain_attr tmp;
  7563. /* fast path */
  7564. if (!new && !cur)
  7565. return 1;
  7566. tmp = SD_ATTR_INIT;
  7567. return !memcmp(cur ? (cur + idx_cur) : &tmp,
  7568. new ? (new + idx_new) : &tmp,
  7569. sizeof(struct sched_domain_attr));
  7570. }
  7571. /*
  7572. * Partition sched domains as specified by the 'ndoms_new'
  7573. * cpumasks in the array doms_new[] of cpumasks. This compares
  7574. * doms_new[] to the current sched domain partitioning, doms_cur[].
  7575. * It destroys each deleted domain and builds each new domain.
  7576. *
  7577. * 'doms_new' is an array of cpumask's of length 'ndoms_new'.
  7578. * The masks don't intersect (don't overlap.) We should setup one
  7579. * sched domain for each mask. CPUs not in any of the cpumasks will
  7580. * not be load balanced. If the same cpumask appears both in the
  7581. * current 'doms_cur' domains and in the new 'doms_new', we can leave
  7582. * it as it is.
  7583. *
  7584. * The passed in 'doms_new' should be kmalloc'd. This routine takes
  7585. * ownership of it and will kfree it when done with it. If the caller
  7586. * failed the kmalloc call, then it can pass in doms_new == NULL &&
  7587. * ndoms_new == 1, and partition_sched_domains() will fallback to
  7588. * the single partition 'fallback_doms', it also forces the domains
  7589. * to be rebuilt.
  7590. *
  7591. * If doms_new == NULL it will be replaced with cpu_online_mask.
  7592. * ndoms_new == 0 is a special case for destroying existing domains,
  7593. * and it will not create the default domain.
  7594. *
  7595. * Call with hotplug lock held
  7596. */
  7597. /* FIXME: Change to struct cpumask *doms_new[] */
  7598. void partition_sched_domains(int ndoms_new, struct cpumask *doms_new,
  7599. struct sched_domain_attr *dattr_new)
  7600. {
  7601. int i, j, n;
  7602. int new_topology;
  7603. mutex_lock(&sched_domains_mutex);
  7604. /* always unregister in case we don't destroy any domains */
  7605. unregister_sched_domain_sysctl();
  7606. /* Let architecture update cpu core mappings. */
  7607. new_topology = arch_update_cpu_topology();
  7608. n = doms_new ? ndoms_new : 0;
  7609. /* Destroy deleted domains */
  7610. for (i = 0; i < ndoms_cur; i++) {
  7611. for (j = 0; j < n && !new_topology; j++) {
  7612. if (cpumask_equal(&doms_cur[i], &doms_new[j])
  7613. && dattrs_equal(dattr_cur, i, dattr_new, j))
  7614. goto match1;
  7615. }
  7616. /* no match - a current sched domain not in new doms_new[] */
  7617. detach_destroy_domains(doms_cur + i);
  7618. match1:
  7619. ;
  7620. }
  7621. if (doms_new == NULL) {
  7622. ndoms_cur = 0;
  7623. doms_new = fallback_doms;
  7624. cpumask_andnot(&doms_new[0], cpu_online_mask, cpu_isolated_map);
  7625. WARN_ON_ONCE(dattr_new);
  7626. }
  7627. /* Build new domains */
  7628. for (i = 0; i < ndoms_new; i++) {
  7629. for (j = 0; j < ndoms_cur && !new_topology; j++) {
  7630. if (cpumask_equal(&doms_new[i], &doms_cur[j])
  7631. && dattrs_equal(dattr_new, i, dattr_cur, j))
  7632. goto match2;
  7633. }
  7634. /* no match - add a new doms_new */
  7635. __build_sched_domains(doms_new + i,
  7636. dattr_new ? dattr_new + i : NULL);
  7637. match2:
  7638. ;
  7639. }
  7640. /* Remember the new sched domains */
  7641. if (doms_cur != fallback_doms)
  7642. kfree(doms_cur);
  7643. kfree(dattr_cur); /* kfree(NULL) is safe */
  7644. doms_cur = doms_new;
  7645. dattr_cur = dattr_new;
  7646. ndoms_cur = ndoms_new;
  7647. register_sched_domain_sysctl();
  7648. mutex_unlock(&sched_domains_mutex);
  7649. }
  7650. #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
  7651. static void arch_reinit_sched_domains(void)
  7652. {
  7653. get_online_cpus();
  7654. /* Destroy domains first to force the rebuild */
  7655. partition_sched_domains(0, NULL, NULL);
  7656. rebuild_sched_domains();
  7657. put_online_cpus();
  7658. }
  7659. static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt)
  7660. {
  7661. unsigned int level = 0;
  7662. if (sscanf(buf, "%u", &level) != 1)
  7663. return -EINVAL;
  7664. /*
  7665. * level is always be positive so don't check for
  7666. * level < POWERSAVINGS_BALANCE_NONE which is 0
  7667. * What happens on 0 or 1 byte write,
  7668. * need to check for count as well?
  7669. */
  7670. if (level >= MAX_POWERSAVINGS_BALANCE_LEVELS)
  7671. return -EINVAL;
  7672. if (smt)
  7673. sched_smt_power_savings = level;
  7674. else
  7675. sched_mc_power_savings = level;
  7676. arch_reinit_sched_domains();
  7677. return count;
  7678. }
  7679. #ifdef CONFIG_SCHED_MC
  7680. static ssize_t sched_mc_power_savings_show(struct sysdev_class *class,
  7681. char *page)
  7682. {
  7683. return sprintf(page, "%u\n", sched_mc_power_savings);
  7684. }
  7685. static ssize_t sched_mc_power_savings_store(struct sysdev_class *class,
  7686. const char *buf, size_t count)
  7687. {
  7688. return sched_power_savings_store(buf, count, 0);
  7689. }
  7690. static SYSDEV_CLASS_ATTR(sched_mc_power_savings, 0644,
  7691. sched_mc_power_savings_show,
  7692. sched_mc_power_savings_store);
  7693. #endif
  7694. #ifdef CONFIG_SCHED_SMT
  7695. static ssize_t sched_smt_power_savings_show(struct sysdev_class *dev,
  7696. char *page)
  7697. {
  7698. return sprintf(page, "%u\n", sched_smt_power_savings);
  7699. }
  7700. static ssize_t sched_smt_power_savings_store(struct sysdev_class *dev,
  7701. const char *buf, size_t count)
  7702. {
  7703. return sched_power_savings_store(buf, count, 1);
  7704. }
  7705. static SYSDEV_CLASS_ATTR(sched_smt_power_savings, 0644,
  7706. sched_smt_power_savings_show,
  7707. sched_smt_power_savings_store);
  7708. #endif
  7709. int __init sched_create_sysfs_power_savings_entries(struct sysdev_class *cls)
  7710. {
  7711. int err = 0;
  7712. #ifdef CONFIG_SCHED_SMT
  7713. if (smt_capable())
  7714. err = sysfs_create_file(&cls->kset.kobj,
  7715. &attr_sched_smt_power_savings.attr);
  7716. #endif
  7717. #ifdef CONFIG_SCHED_MC
  7718. if (!err && mc_capable())
  7719. err = sysfs_create_file(&cls->kset.kobj,
  7720. &attr_sched_mc_power_savings.attr);
  7721. #endif
  7722. return err;
  7723. }
  7724. #endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
  7725. #ifndef CONFIG_CPUSETS
  7726. /*
  7727. * Add online and remove offline CPUs from the scheduler domains.
  7728. * When cpusets are enabled they take over this function.
  7729. */
  7730. static int update_sched_domains(struct notifier_block *nfb,
  7731. unsigned long action, void *hcpu)
  7732. {
  7733. switch (action) {
  7734. case CPU_ONLINE:
  7735. case CPU_ONLINE_FROZEN:
  7736. case CPU_DEAD:
  7737. case CPU_DEAD_FROZEN:
  7738. partition_sched_domains(1, NULL, NULL);
  7739. return NOTIFY_OK;
  7740. default:
  7741. return NOTIFY_DONE;
  7742. }
  7743. }
  7744. #endif
  7745. static int update_runtime(struct notifier_block *nfb,
  7746. unsigned long action, void *hcpu)
  7747. {
  7748. int cpu = (int)(long)hcpu;
  7749. switch (action) {
  7750. case CPU_DOWN_PREPARE:
  7751. case CPU_DOWN_PREPARE_FROZEN:
  7752. disable_runtime(cpu_rq(cpu));
  7753. return NOTIFY_OK;
  7754. case CPU_DOWN_FAILED:
  7755. case CPU_DOWN_FAILED_FROZEN:
  7756. case CPU_ONLINE:
  7757. case CPU_ONLINE_FROZEN:
  7758. enable_runtime(cpu_rq(cpu));
  7759. return NOTIFY_OK;
  7760. default:
  7761. return NOTIFY_DONE;
  7762. }
  7763. }
  7764. void __init sched_init_smp(void)
  7765. {
  7766. cpumask_var_t non_isolated_cpus;
  7767. alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL);
  7768. #if defined(CONFIG_NUMA)
  7769. sched_group_nodes_bycpu = kzalloc(nr_cpu_ids * sizeof(void **),
  7770. GFP_KERNEL);
  7771. BUG_ON(sched_group_nodes_bycpu == NULL);
  7772. #endif
  7773. get_online_cpus();
  7774. mutex_lock(&sched_domains_mutex);
  7775. arch_init_sched_domains(cpu_online_mask);
  7776. cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
  7777. if (cpumask_empty(non_isolated_cpus))
  7778. cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);
  7779. mutex_unlock(&sched_domains_mutex);
  7780. put_online_cpus();
  7781. #ifndef CONFIG_CPUSETS
  7782. /* XXX: Theoretical race here - CPU may be hotplugged now */
  7783. hotcpu_notifier(update_sched_domains, 0);
  7784. #endif
  7785. /* RT runtime code needs to handle some hotplug events */
  7786. hotcpu_notifier(update_runtime, 0);
  7787. init_hrtick();
  7788. /* Move init over to a non-isolated CPU */
  7789. if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0)
  7790. BUG();
  7791. sched_init_granularity();
  7792. free_cpumask_var(non_isolated_cpus);
  7793. alloc_cpumask_var(&fallback_doms, GFP_KERNEL);
  7794. init_sched_rt_class();
  7795. }
  7796. #else
  7797. void __init sched_init_smp(void)
  7798. {
  7799. sched_init_granularity();
  7800. }
  7801. #endif /* CONFIG_SMP */
  7802. const_debug unsigned int sysctl_timer_migration = 1;
  7803. int in_sched_functions(unsigned long addr)
  7804. {
  7805. return in_lock_functions(addr) ||
  7806. (addr >= (unsigned long)__sched_text_start
  7807. && addr < (unsigned long)__sched_text_end);
  7808. }
  7809. static void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq)
  7810. {
  7811. cfs_rq->tasks_timeline = RB_ROOT;
  7812. INIT_LIST_HEAD(&cfs_rq->tasks);
  7813. #ifdef CONFIG_FAIR_GROUP_SCHED
  7814. cfs_rq->rq = rq;
  7815. #endif
  7816. cfs_rq->min_vruntime = (u64)(-(1LL << 20));
  7817. }
  7818. static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
  7819. {
  7820. struct rt_prio_array *array;
  7821. int i;
  7822. array = &rt_rq->active;
  7823. for (i = 0; i < MAX_RT_PRIO; i++) {
  7824. INIT_LIST_HEAD(array->queue + i);
  7825. __clear_bit(i, array->bitmap);
  7826. }
  7827. /* delimiter for bitsearch: */
  7828. __set_bit(MAX_RT_PRIO, array->bitmap);
  7829. #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
  7830. rt_rq->highest_prio.curr = MAX_RT_PRIO;
  7831. #ifdef CONFIG_SMP
  7832. rt_rq->highest_prio.next = MAX_RT_PRIO;
  7833. #endif
  7834. #endif
  7835. #ifdef CONFIG_SMP
  7836. rt_rq->rt_nr_migratory = 0;
  7837. rt_rq->overloaded = 0;
  7838. plist_head_init(&rt_rq->pushable_tasks, &rq->lock);
  7839. #endif
  7840. rt_rq->rt_time = 0;
  7841. rt_rq->rt_throttled = 0;
  7842. rt_rq->rt_runtime = 0;
  7843. spin_lock_init(&rt_rq->rt_runtime_lock);
  7844. #ifdef CONFIG_RT_GROUP_SCHED
  7845. rt_rq->rt_nr_boosted = 0;
  7846. rt_rq->rq = rq;
  7847. #endif
  7848. }
  7849. #ifdef CONFIG_FAIR_GROUP_SCHED
  7850. static void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
  7851. struct sched_entity *se, int cpu, int add,
  7852. struct sched_entity *parent)
  7853. {
  7854. struct rq *rq = cpu_rq(cpu);
  7855. tg->cfs_rq[cpu] = cfs_rq;
  7856. init_cfs_rq(cfs_rq, rq);
  7857. cfs_rq->tg = tg;
  7858. if (add)
  7859. list_add(&cfs_rq->leaf_cfs_rq_list, &rq->leaf_cfs_rq_list);
  7860. tg->se[cpu] = se;
  7861. /* se could be NULL for init_task_group */
  7862. if (!se)
  7863. return;
  7864. if (!parent)
  7865. se->cfs_rq = &rq->cfs;
  7866. else
  7867. se->cfs_rq = parent->my_q;
  7868. se->my_q = cfs_rq;
  7869. se->load.weight = tg->shares;
  7870. se->load.inv_weight = 0;
  7871. se->parent = parent;
  7872. }
  7873. #endif
  7874. #ifdef CONFIG_RT_GROUP_SCHED
  7875. static void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
  7876. struct sched_rt_entity *rt_se, int cpu, int add,
  7877. struct sched_rt_entity *parent)
  7878. {
  7879. struct rq *rq = cpu_rq(cpu);
  7880. tg->rt_rq[cpu] = rt_rq;
  7881. init_rt_rq(rt_rq, rq);
  7882. rt_rq->tg = tg;
  7883. rt_rq->rt_se = rt_se;
  7884. rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
  7885. if (add)
  7886. list_add(&rt_rq->leaf_rt_rq_list, &rq->leaf_rt_rq_list);
  7887. tg->rt_se[cpu] = rt_se;
  7888. if (!rt_se)
  7889. return;
  7890. if (!parent)
  7891. rt_se->rt_rq = &rq->rt;
  7892. else
  7893. rt_se->rt_rq = parent->my_q;
  7894. rt_se->my_q = rt_rq;
  7895. rt_se->parent = parent;
  7896. INIT_LIST_HEAD(&rt_se->run_list);
  7897. }
  7898. #endif
  7899. void __init sched_init(void)
  7900. {
  7901. int i, j;
  7902. unsigned long alloc_size = 0, ptr;
  7903. #ifdef CONFIG_FAIR_GROUP_SCHED
  7904. alloc_size += 2 * nr_cpu_ids * sizeof(void **);
  7905. #endif
  7906. #ifdef CONFIG_RT_GROUP_SCHED
  7907. alloc_size += 2 * nr_cpu_ids * sizeof(void **);
  7908. #endif
  7909. #ifdef CONFIG_USER_SCHED
  7910. alloc_size *= 2;
  7911. #endif
  7912. #ifdef CONFIG_CPUMASK_OFFSTACK
  7913. alloc_size += num_possible_cpus() * cpumask_size();
  7914. #endif
  7915. /*
  7916. * As sched_init() is called before page_alloc is setup,
  7917. * we use alloc_bootmem().
  7918. */
  7919. if (alloc_size) {
  7920. ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
  7921. #ifdef CONFIG_FAIR_GROUP_SCHED
  7922. init_task_group.se = (struct sched_entity **)ptr;
  7923. ptr += nr_cpu_ids * sizeof(void **);
  7924. init_task_group.cfs_rq = (struct cfs_rq **)ptr;
  7925. ptr += nr_cpu_ids * sizeof(void **);
  7926. #ifdef CONFIG_USER_SCHED
  7927. root_task_group.se = (struct sched_entity **)ptr;
  7928. ptr += nr_cpu_ids * sizeof(void **);
  7929. root_task_group.cfs_rq = (struct cfs_rq **)ptr;
  7930. ptr += nr_cpu_ids * sizeof(void **);
  7931. #endif /* CONFIG_USER_SCHED */
  7932. #endif /* CONFIG_FAIR_GROUP_SCHED */
  7933. #ifdef CONFIG_RT_GROUP_SCHED
  7934. init_task_group.rt_se = (struct sched_rt_entity **)ptr;
  7935. ptr += nr_cpu_ids * sizeof(void **);
  7936. init_task_group.rt_rq = (struct rt_rq **)ptr;
  7937. ptr += nr_cpu_ids * sizeof(void **);
  7938. #ifdef CONFIG_USER_SCHED
  7939. root_task_group.rt_se = (struct sched_rt_entity **)ptr;
  7940. ptr += nr_cpu_ids * sizeof(void **);
  7941. root_task_group.rt_rq = (struct rt_rq **)ptr;
  7942. ptr += nr_cpu_ids * sizeof(void **);
  7943. #endif /* CONFIG_USER_SCHED */
  7944. #endif /* CONFIG_RT_GROUP_SCHED */
  7945. #ifdef CONFIG_CPUMASK_OFFSTACK
  7946. for_each_possible_cpu(i) {
  7947. per_cpu(load_balance_tmpmask, i) = (void *)ptr;
  7948. ptr += cpumask_size();
  7949. }
  7950. #endif /* CONFIG_CPUMASK_OFFSTACK */
  7951. }
  7952. #ifdef CONFIG_SMP
  7953. init_defrootdomain();
  7954. #endif
  7955. init_rt_bandwidth(&def_rt_bandwidth,
  7956. global_rt_period(), global_rt_runtime());
  7957. #ifdef CONFIG_RT_GROUP_SCHED
  7958. init_rt_bandwidth(&init_task_group.rt_bandwidth,
  7959. global_rt_period(), global_rt_runtime());
  7960. #ifdef CONFIG_USER_SCHED
  7961. init_rt_bandwidth(&root_task_group.rt_bandwidth,
  7962. global_rt_period(), RUNTIME_INF);
  7963. #endif /* CONFIG_USER_SCHED */
  7964. #endif /* CONFIG_RT_GROUP_SCHED */
  7965. #ifdef CONFIG_GROUP_SCHED
  7966. list_add(&init_task_group.list, &task_groups);
  7967. INIT_LIST_HEAD(&init_task_group.children);
  7968. #ifdef CONFIG_USER_SCHED
  7969. INIT_LIST_HEAD(&root_task_group.children);
  7970. init_task_group.parent = &root_task_group;
  7971. list_add(&init_task_group.siblings, &root_task_group.children);
  7972. #endif /* CONFIG_USER_SCHED */
  7973. #endif /* CONFIG_GROUP_SCHED */
  7974. for_each_possible_cpu(i) {
  7975. struct rq *rq;
  7976. rq = cpu_rq(i);
  7977. spin_lock_init(&rq->lock);
  7978. rq->nr_running = 0;
  7979. rq->calc_load_active = 0;
  7980. rq->calc_load_update = jiffies + LOAD_FREQ;
  7981. init_cfs_rq(&rq->cfs, rq);
  7982. init_rt_rq(&rq->rt, rq);
  7983. #ifdef CONFIG_FAIR_GROUP_SCHED
  7984. init_task_group.shares = init_task_group_load;
  7985. INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
  7986. #ifdef CONFIG_CGROUP_SCHED
  7987. /*
  7988. * How much cpu bandwidth does init_task_group get?
  7989. *
  7990. * In case of task-groups formed thr' the cgroup filesystem, it
  7991. * gets 100% of the cpu resources in the system. This overall
  7992. * system cpu resource is divided among the tasks of
  7993. * init_task_group and its child task-groups in a fair manner,
  7994. * based on each entity's (task or task-group's) weight
  7995. * (se->load.weight).
  7996. *
  7997. * In other words, if init_task_group has 10 tasks of weight
  7998. * 1024) and two child groups A0 and A1 (of weight 1024 each),
  7999. * then A0's share of the cpu resource is:
  8000. *
  8001. * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
  8002. *
  8003. * We achieve this by letting init_task_group's tasks sit
  8004. * directly in rq->cfs (i.e init_task_group->se[] = NULL).
  8005. */
  8006. init_tg_cfs_entry(&init_task_group, &rq->cfs, NULL, i, 1, NULL);
  8007. #elif defined CONFIG_USER_SCHED
  8008. root_task_group.shares = NICE_0_LOAD;
  8009. init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, 0, NULL);
  8010. /*
  8011. * In case of task-groups formed thr' the user id of tasks,
  8012. * init_task_group represents tasks belonging to root user.
  8013. * Hence it forms a sibling of all subsequent groups formed.
  8014. * In this case, init_task_group gets only a fraction of overall
  8015. * system cpu resource, based on the weight assigned to root
  8016. * user's cpu share (INIT_TASK_GROUP_LOAD). This is accomplished
  8017. * by letting tasks of init_task_group sit in a separate cfs_rq
  8018. * (init_tg_cfs_rq) and having one entity represent this group of
  8019. * tasks in rq->cfs (i.e init_task_group->se[] != NULL).
  8020. */
  8021. init_tg_cfs_entry(&init_task_group,
  8022. &per_cpu(init_tg_cfs_rq, i),
  8023. &per_cpu(init_sched_entity, i), i, 1,
  8024. root_task_group.se[i]);
  8025. #endif
  8026. #endif /* CONFIG_FAIR_GROUP_SCHED */
  8027. rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
  8028. #ifdef CONFIG_RT_GROUP_SCHED
  8029. INIT_LIST_HEAD(&rq->leaf_rt_rq_list);
  8030. #ifdef CONFIG_CGROUP_SCHED
  8031. init_tg_rt_entry(&init_task_group, &rq->rt, NULL, i, 1, NULL);
  8032. #elif defined CONFIG_USER_SCHED
  8033. init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, 0, NULL);
  8034. init_tg_rt_entry(&init_task_group,
  8035. &per_cpu(init_rt_rq, i),
  8036. &per_cpu(init_sched_rt_entity, i), i, 1,
  8037. root_task_group.rt_se[i]);
  8038. #endif
  8039. #endif
  8040. for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
  8041. rq->cpu_load[j] = 0;
  8042. #ifdef CONFIG_SMP
  8043. rq->sd = NULL;
  8044. rq->rd = NULL;
  8045. rq->post_schedule = 0;
  8046. rq->active_balance = 0;
  8047. rq->next_balance = jiffies;
  8048. rq->push_cpu = 0;
  8049. rq->cpu = i;
  8050. rq->online = 0;
  8051. rq->migration_thread = NULL;
  8052. INIT_LIST_HEAD(&rq->migration_queue);
  8053. rq_attach_root(rq, &def_root_domain);
  8054. #endif
  8055. init_rq_hrtick(rq);
  8056. atomic_set(&rq->nr_iowait, 0);
  8057. }
  8058. set_load_weight(&init_task);
  8059. #ifdef CONFIG_PREEMPT_NOTIFIERS
  8060. INIT_HLIST_HEAD(&init_task.preempt_notifiers);
  8061. #endif
  8062. #ifdef CONFIG_SMP
  8063. open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
  8064. #endif
  8065. #ifdef CONFIG_RT_MUTEXES
  8066. plist_head_init(&init_task.pi_waiters, &init_task.pi_lock);
  8067. #endif
  8068. /*
  8069. * The boot idle thread does lazy MMU switching as well:
  8070. */
  8071. atomic_inc(&init_mm.mm_count);
  8072. enter_lazy_tlb(&init_mm, current);
  8073. /*
  8074. * Make us the idle thread. Technically, schedule() should not be
  8075. * called from this thread, however somewhere below it might be,
  8076. * but because we are the idle thread, we just pick up running again
  8077. * when this runqueue becomes "idle".
  8078. */
  8079. init_idle(current, smp_processor_id());
  8080. calc_load_update = jiffies + LOAD_FREQ;
  8081. /*
  8082. * During early bootup we pretend to be a normal task:
  8083. */
  8084. current->sched_class = &fair_sched_class;
  8085. /* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */
  8086. alloc_cpumask_var(&nohz_cpu_mask, GFP_NOWAIT);
  8087. #ifdef CONFIG_SMP
  8088. #ifdef CONFIG_NO_HZ
  8089. alloc_cpumask_var(&nohz.cpu_mask, GFP_NOWAIT);
  8090. alloc_cpumask_var(&nohz.ilb_grp_nohz_mask, GFP_NOWAIT);
  8091. #endif
  8092. alloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
  8093. #endif /* SMP */
  8094. perf_counter_init();
  8095. scheduler_running = 1;
  8096. }
  8097. #ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
  8098. static inline int preempt_count_equals(int preempt_offset)
  8099. {
  8100. int nested = preempt_count() & ~PREEMPT_ACTIVE;
  8101. return (nested == PREEMPT_INATOMIC_BASE + preempt_offset);
  8102. }
  8103. void __might_sleep(char *file, int line, int preempt_offset)
  8104. {
  8105. #ifdef in_atomic
  8106. static unsigned long prev_jiffy; /* ratelimiting */
  8107. if ((preempt_count_equals(preempt_offset) && !irqs_disabled()) ||
  8108. system_state != SYSTEM_RUNNING || oops_in_progress)
  8109. return;
  8110. if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
  8111. return;
  8112. prev_jiffy = jiffies;
  8113. printk(KERN_ERR
  8114. "BUG: sleeping function called from invalid context at %s:%d\n",
  8115. file, line);
  8116. printk(KERN_ERR
  8117. "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
  8118. in_atomic(), irqs_disabled(),
  8119. current->pid, current->comm);
  8120. debug_show_held_locks(current);
  8121. if (irqs_disabled())
  8122. print_irqtrace_events(current);
  8123. dump_stack();
  8124. #endif
  8125. }
  8126. EXPORT_SYMBOL(__might_sleep);
  8127. #endif
  8128. #ifdef CONFIG_MAGIC_SYSRQ
  8129. static void normalize_task(struct rq *rq, struct task_struct *p)
  8130. {
  8131. int on_rq;
  8132. update_rq_clock(rq);
  8133. on_rq = p->se.on_rq;
  8134. if (on_rq)
  8135. deactivate_task(rq, p, 0);
  8136. __setscheduler(rq, p, SCHED_NORMAL, 0);
  8137. if (on_rq) {
  8138. activate_task(rq, p, 0);
  8139. resched_task(rq->curr);
  8140. }
  8141. }
  8142. void normalize_rt_tasks(void)
  8143. {
  8144. struct task_struct *g, *p;
  8145. unsigned long flags;
  8146. struct rq *rq;
  8147. read_lock_irqsave(&tasklist_lock, flags);
  8148. do_each_thread(g, p) {
  8149. /*
  8150. * Only normalize user tasks:
  8151. */
  8152. if (!p->mm)
  8153. continue;
  8154. p->se.exec_start = 0;
  8155. #ifdef CONFIG_SCHEDSTATS
  8156. p->se.wait_start = 0;
  8157. p->se.sleep_start = 0;
  8158. p->se.block_start = 0;
  8159. #endif
  8160. if (!rt_task(p)) {
  8161. /*
  8162. * Renice negative nice level userspace
  8163. * tasks back to 0:
  8164. */
  8165. if (TASK_NICE(p) < 0 && p->mm)
  8166. set_user_nice(p, 0);
  8167. continue;
  8168. }
  8169. spin_lock(&p->pi_lock);
  8170. rq = __task_rq_lock(p);
  8171. normalize_task(rq, p);
  8172. __task_rq_unlock(rq);
  8173. spin_unlock(&p->pi_lock);
  8174. } while_each_thread(g, p);
  8175. read_unlock_irqrestore(&tasklist_lock, flags);
  8176. }
  8177. #endif /* CONFIG_MAGIC_SYSRQ */
  8178. #ifdef CONFIG_IA64
  8179. /*
  8180. * These functions are only useful for the IA64 MCA handling.
  8181. *
  8182. * They can only be called when the whole system has been
  8183. * stopped - every CPU needs to be quiescent, and no scheduling
  8184. * activity can take place. Using them for anything else would
  8185. * be a serious bug, and as a result, they aren't even visible
  8186. * under any other configuration.
  8187. */
  8188. /**
  8189. * curr_task - return the current task for a given cpu.
  8190. * @cpu: the processor in question.
  8191. *
  8192. * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
  8193. */
  8194. struct task_struct *curr_task(int cpu)
  8195. {
  8196. return cpu_curr(cpu);
  8197. }
  8198. /**
  8199. * set_curr_task - set the current task for a given cpu.
  8200. * @cpu: the processor in question.
  8201. * @p: the task pointer to set.
  8202. *
  8203. * Description: This function must only be used when non-maskable interrupts
  8204. * are serviced on a separate stack. It allows the architecture to switch the
  8205. * notion of the current task on a cpu in a non-blocking manner. This function
  8206. * must be called with all CPU's synchronized, and interrupts disabled, the
  8207. * and caller must save the original value of the current task (see
  8208. * curr_task() above) and restore that value before reenabling interrupts and
  8209. * re-starting the system.
  8210. *
  8211. * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
  8212. */
  8213. void set_curr_task(int cpu, struct task_struct *p)
  8214. {
  8215. cpu_curr(cpu) = p;
  8216. }
  8217. #endif
  8218. #ifdef CONFIG_FAIR_GROUP_SCHED
  8219. static void free_fair_sched_group(struct task_group *tg)
  8220. {
  8221. int i;
  8222. for_each_possible_cpu(i) {
  8223. if (tg->cfs_rq)
  8224. kfree(tg->cfs_rq[i]);
  8225. if (tg->se)
  8226. kfree(tg->se[i]);
  8227. }
  8228. kfree(tg->cfs_rq);
  8229. kfree(tg->se);
  8230. }
  8231. static
  8232. int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
  8233. {
  8234. struct cfs_rq *cfs_rq;
  8235. struct sched_entity *se;
  8236. struct rq *rq;
  8237. int i;
  8238. tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
  8239. if (!tg->cfs_rq)
  8240. goto err;
  8241. tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL);
  8242. if (!tg->se)
  8243. goto err;
  8244. tg->shares = NICE_0_LOAD;
  8245. for_each_possible_cpu(i) {
  8246. rq = cpu_rq(i);
  8247. cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
  8248. GFP_KERNEL, cpu_to_node(i));
  8249. if (!cfs_rq)
  8250. goto err;
  8251. se = kzalloc_node(sizeof(struct sched_entity),
  8252. GFP_KERNEL, cpu_to_node(i));
  8253. if (!se)
  8254. goto err;
  8255. init_tg_cfs_entry(tg, cfs_rq, se, i, 0, parent->se[i]);
  8256. }
  8257. return 1;
  8258. err:
  8259. return 0;
  8260. }
  8261. static inline void register_fair_sched_group(struct task_group *tg, int cpu)
  8262. {
  8263. list_add_rcu(&tg->cfs_rq[cpu]->leaf_cfs_rq_list,
  8264. &cpu_rq(cpu)->leaf_cfs_rq_list);
  8265. }
  8266. static inline void unregister_fair_sched_group(struct task_group *tg, int cpu)
  8267. {
  8268. list_del_rcu(&tg->cfs_rq[cpu]->leaf_cfs_rq_list);
  8269. }
  8270. #else /* !CONFG_FAIR_GROUP_SCHED */
  8271. static inline void free_fair_sched_group(struct task_group *tg)
  8272. {
  8273. }
  8274. static inline
  8275. int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
  8276. {
  8277. return 1;
  8278. }
  8279. static inline void register_fair_sched_group(struct task_group *tg, int cpu)
  8280. {
  8281. }
  8282. static inline void unregister_fair_sched_group(struct task_group *tg, int cpu)
  8283. {
  8284. }
  8285. #endif /* CONFIG_FAIR_GROUP_SCHED */
  8286. #ifdef CONFIG_RT_GROUP_SCHED
  8287. static void free_rt_sched_group(struct task_group *tg)
  8288. {
  8289. int i;
  8290. destroy_rt_bandwidth(&tg->rt_bandwidth);
  8291. for_each_possible_cpu(i) {
  8292. if (tg->rt_rq)
  8293. kfree(tg->rt_rq[i]);
  8294. if (tg->rt_se)
  8295. kfree(tg->rt_se[i]);
  8296. }
  8297. kfree(tg->rt_rq);
  8298. kfree(tg->rt_se);
  8299. }
  8300. static
  8301. int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
  8302. {
  8303. struct rt_rq *rt_rq;
  8304. struct sched_rt_entity *rt_se;
  8305. struct rq *rq;
  8306. int i;
  8307. tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL);
  8308. if (!tg->rt_rq)
  8309. goto err;
  8310. tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL);
  8311. if (!tg->rt_se)
  8312. goto err;
  8313. init_rt_bandwidth(&tg->rt_bandwidth,
  8314. ktime_to_ns(def_rt_bandwidth.rt_period), 0);
  8315. for_each_possible_cpu(i) {
  8316. rq = cpu_rq(i);
  8317. rt_rq = kzalloc_node(sizeof(struct rt_rq),
  8318. GFP_KERNEL, cpu_to_node(i));
  8319. if (!rt_rq)
  8320. goto err;
  8321. rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
  8322. GFP_KERNEL, cpu_to_node(i));
  8323. if (!rt_se)
  8324. goto err;
  8325. init_tg_rt_entry(tg, rt_rq, rt_se, i, 0, parent->rt_se[i]);
  8326. }
  8327. return 1;
  8328. err:
  8329. return 0;
  8330. }
  8331. static inline void register_rt_sched_group(struct task_group *tg, int cpu)
  8332. {
  8333. list_add_rcu(&tg->rt_rq[cpu]->leaf_rt_rq_list,
  8334. &cpu_rq(cpu)->leaf_rt_rq_list);
  8335. }
  8336. static inline void unregister_rt_sched_group(struct task_group *tg, int cpu)
  8337. {
  8338. list_del_rcu(&tg->rt_rq[cpu]->leaf_rt_rq_list);
  8339. }
  8340. #else /* !CONFIG_RT_GROUP_SCHED */
  8341. static inline void free_rt_sched_group(struct task_group *tg)
  8342. {
  8343. }
  8344. static inline
  8345. int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
  8346. {
  8347. return 1;
  8348. }
  8349. static inline void register_rt_sched_group(struct task_group *tg, int cpu)
  8350. {
  8351. }
  8352. static inline void unregister_rt_sched_group(struct task_group *tg, int cpu)
  8353. {
  8354. }
  8355. #endif /* CONFIG_RT_GROUP_SCHED */
  8356. #ifdef CONFIG_GROUP_SCHED
  8357. static void free_sched_group(struct task_group *tg)
  8358. {
  8359. free_fair_sched_group(tg);
  8360. free_rt_sched_group(tg);
  8361. kfree(tg);
  8362. }
  8363. /* allocate runqueue etc for a new task group */
  8364. struct task_group *sched_create_group(struct task_group *parent)
  8365. {
  8366. struct task_group *tg;
  8367. unsigned long flags;
  8368. int i;
  8369. tg = kzalloc(sizeof(*tg), GFP_KERNEL);
  8370. if (!tg)
  8371. return ERR_PTR(-ENOMEM);
  8372. if (!alloc_fair_sched_group(tg, parent))
  8373. goto err;
  8374. if (!alloc_rt_sched_group(tg, parent))
  8375. goto err;
  8376. spin_lock_irqsave(&task_group_lock, flags);
  8377. for_each_possible_cpu(i) {
  8378. register_fair_sched_group(tg, i);
  8379. register_rt_sched_group(tg, i);
  8380. }
  8381. list_add_rcu(&tg->list, &task_groups);
  8382. WARN_ON(!parent); /* root should already exist */
  8383. tg->parent = parent;
  8384. INIT_LIST_HEAD(&tg->children);
  8385. list_add_rcu(&tg->siblings, &parent->children);
  8386. spin_unlock_irqrestore(&task_group_lock, flags);
  8387. return tg;
  8388. err:
  8389. free_sched_group(tg);
  8390. return ERR_PTR(-ENOMEM);
  8391. }
  8392. /* rcu callback to free various structures associated with a task group */
  8393. static void free_sched_group_rcu(struct rcu_head *rhp)
  8394. {
  8395. /* now it should be safe to free those cfs_rqs */
  8396. free_sched_group(container_of(rhp, struct task_group, rcu));
  8397. }
  8398. /* Destroy runqueue etc associated with a task group */
  8399. void sched_destroy_group(struct task_group *tg)
  8400. {
  8401. unsigned long flags;
  8402. int i;
  8403. spin_lock_irqsave(&task_group_lock, flags);
  8404. for_each_possible_cpu(i) {
  8405. unregister_fair_sched_group(tg, i);
  8406. unregister_rt_sched_group(tg, i);
  8407. }
  8408. list_del_rcu(&tg->list);
  8409. list_del_rcu(&tg->siblings);
  8410. spin_unlock_irqrestore(&task_group_lock, flags);
  8411. /* wait for possible concurrent references to cfs_rqs complete */
  8412. call_rcu(&tg->rcu, free_sched_group_rcu);
  8413. }
  8414. /* change task's runqueue when it moves between groups.
  8415. * The caller of this function should have put the task in its new group
  8416. * by now. This function just updates tsk->se.cfs_rq and tsk->se.parent to
  8417. * reflect its new group.
  8418. */
  8419. void sched_move_task(struct task_struct *tsk)
  8420. {
  8421. int on_rq, running;
  8422. unsigned long flags;
  8423. struct rq *rq;
  8424. rq = task_rq_lock(tsk, &flags);
  8425. update_rq_clock(rq);
  8426. running = task_current(rq, tsk);
  8427. on_rq = tsk->se.on_rq;
  8428. if (on_rq)
  8429. dequeue_task(rq, tsk, 0);
  8430. if (unlikely(running))
  8431. tsk->sched_class->put_prev_task(rq, tsk);
  8432. set_task_rq(tsk, task_cpu(tsk));
  8433. #ifdef CONFIG_FAIR_GROUP_SCHED
  8434. if (tsk->sched_class->moved_group)
  8435. tsk->sched_class->moved_group(tsk);
  8436. #endif
  8437. if (unlikely(running))
  8438. tsk->sched_class->set_curr_task(rq);
  8439. if (on_rq)
  8440. enqueue_task(rq, tsk, 0);
  8441. task_rq_unlock(rq, &flags);
  8442. }
  8443. #endif /* CONFIG_GROUP_SCHED */
  8444. #ifdef CONFIG_FAIR_GROUP_SCHED
  8445. static void __set_se_shares(struct sched_entity *se, unsigned long shares)
  8446. {
  8447. struct cfs_rq *cfs_rq = se->cfs_rq;
  8448. int on_rq;
  8449. on_rq = se->on_rq;
  8450. if (on_rq)
  8451. dequeue_entity(cfs_rq, se, 0);
  8452. se->load.weight = shares;
  8453. se->load.inv_weight = 0;
  8454. if (on_rq)
  8455. enqueue_entity(cfs_rq, se, 0);
  8456. }
  8457. static void set_se_shares(struct sched_entity *se, unsigned long shares)
  8458. {
  8459. struct cfs_rq *cfs_rq = se->cfs_rq;
  8460. struct rq *rq = cfs_rq->rq;
  8461. unsigned long flags;
  8462. spin_lock_irqsave(&rq->lock, flags);
  8463. __set_se_shares(se, shares);
  8464. spin_unlock_irqrestore(&rq->lock, flags);
  8465. }
  8466. static DEFINE_MUTEX(shares_mutex);
  8467. int sched_group_set_shares(struct task_group *tg, unsigned long shares)
  8468. {
  8469. int i;
  8470. unsigned long flags;
  8471. /*
  8472. * We can't change the weight of the root cgroup.
  8473. */
  8474. if (!tg->se[0])
  8475. return -EINVAL;
  8476. if (shares < MIN_SHARES)
  8477. shares = MIN_SHARES;
  8478. else if (shares > MAX_SHARES)
  8479. shares = MAX_SHARES;
  8480. mutex_lock(&shares_mutex);
  8481. if (tg->shares == shares)
  8482. goto done;
  8483. spin_lock_irqsave(&task_group_lock, flags);
  8484. for_each_possible_cpu(i)
  8485. unregister_fair_sched_group(tg, i);
  8486. list_del_rcu(&tg->siblings);
  8487. spin_unlock_irqrestore(&task_group_lock, flags);
  8488. /* wait for any ongoing reference to this group to finish */
  8489. synchronize_sched();
  8490. /*
  8491. * Now we are free to modify the group's share on each cpu
  8492. * w/o tripping rebalance_share or load_balance_fair.
  8493. */
  8494. tg->shares = shares;
  8495. for_each_possible_cpu(i) {
  8496. /*
  8497. * force a rebalance
  8498. */
  8499. cfs_rq_set_shares(tg->cfs_rq[i], 0);
  8500. set_se_shares(tg->se[i], shares);
  8501. }
  8502. /*
  8503. * Enable load balance activity on this group, by inserting it back on
  8504. * each cpu's rq->leaf_cfs_rq_list.
  8505. */
  8506. spin_lock_irqsave(&task_group_lock, flags);
  8507. for_each_possible_cpu(i)
  8508. register_fair_sched_group(tg, i);
  8509. list_add_rcu(&tg->siblings, &tg->parent->children);
  8510. spin_unlock_irqrestore(&task_group_lock, flags);
  8511. done:
  8512. mutex_unlock(&shares_mutex);
  8513. return 0;
  8514. }
  8515. unsigned long sched_group_shares(struct task_group *tg)
  8516. {
  8517. return tg->shares;
  8518. }
  8519. #endif
  8520. #ifdef CONFIG_RT_GROUP_SCHED
  8521. /*
  8522. * Ensure that the real time constraints are schedulable.
  8523. */
  8524. static DEFINE_MUTEX(rt_constraints_mutex);
  8525. static unsigned long to_ratio(u64 period, u64 runtime)
  8526. {
  8527. if (runtime == RUNTIME_INF)
  8528. return 1ULL << 20;
  8529. return div64_u64(runtime << 20, period);
  8530. }
  8531. /* Must be called with tasklist_lock held */
  8532. static inline int tg_has_rt_tasks(struct task_group *tg)
  8533. {
  8534. struct task_struct *g, *p;
  8535. do_each_thread(g, p) {
  8536. if (rt_task(p) && rt_rq_of_se(&p->rt)->tg == tg)
  8537. return 1;
  8538. } while_each_thread(g, p);
  8539. return 0;
  8540. }
  8541. struct rt_schedulable_data {
  8542. struct task_group *tg;
  8543. u64 rt_period;
  8544. u64 rt_runtime;
  8545. };
  8546. static int tg_schedulable(struct task_group *tg, void *data)
  8547. {
  8548. struct rt_schedulable_data *d = data;
  8549. struct task_group *child;
  8550. unsigned long total, sum = 0;
  8551. u64 period, runtime;
  8552. period = ktime_to_ns(tg->rt_bandwidth.rt_period);
  8553. runtime = tg->rt_bandwidth.rt_runtime;
  8554. if (tg == d->tg) {
  8555. period = d->rt_period;
  8556. runtime = d->rt_runtime;
  8557. }
  8558. #ifdef CONFIG_USER_SCHED
  8559. if (tg == &root_task_group) {
  8560. period = global_rt_period();
  8561. runtime = global_rt_runtime();
  8562. }
  8563. #endif
  8564. /*
  8565. * Cannot have more runtime than the period.
  8566. */
  8567. if (runtime > period && runtime != RUNTIME_INF)
  8568. return -EINVAL;
  8569. /*
  8570. * Ensure we don't starve existing RT tasks.
  8571. */
  8572. if (rt_bandwidth_enabled() && !runtime && tg_has_rt_tasks(tg))
  8573. return -EBUSY;
  8574. total = to_ratio(period, runtime);
  8575. /*
  8576. * Nobody can have more than the global setting allows.
  8577. */
  8578. if (total > to_ratio(global_rt_period(), global_rt_runtime()))
  8579. return -EINVAL;
  8580. /*
  8581. * The sum of our children's runtime should not exceed our own.
  8582. */
  8583. list_for_each_entry_rcu(child, &tg->children, siblings) {
  8584. period = ktime_to_ns(child->rt_bandwidth.rt_period);
  8585. runtime = child->rt_bandwidth.rt_runtime;
  8586. if (child == d->tg) {
  8587. period = d->rt_period;
  8588. runtime = d->rt_runtime;
  8589. }
  8590. sum += to_ratio(period, runtime);
  8591. }
  8592. if (sum > total)
  8593. return -EINVAL;
  8594. return 0;
  8595. }
  8596. static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
  8597. {
  8598. struct rt_schedulable_data data = {
  8599. .tg = tg,
  8600. .rt_period = period,
  8601. .rt_runtime = runtime,
  8602. };
  8603. return walk_tg_tree(tg_schedulable, tg_nop, &data);
  8604. }
  8605. static int tg_set_bandwidth(struct task_group *tg,
  8606. u64 rt_period, u64 rt_runtime)
  8607. {
  8608. int i, err = 0;
  8609. mutex_lock(&rt_constraints_mutex);
  8610. read_lock(&tasklist_lock);
  8611. err = __rt_schedulable(tg, rt_period, rt_runtime);
  8612. if (err)
  8613. goto unlock;
  8614. spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
  8615. tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
  8616. tg->rt_bandwidth.rt_runtime = rt_runtime;
  8617. for_each_possible_cpu(i) {
  8618. struct rt_rq *rt_rq = tg->rt_rq[i];
  8619. spin_lock(&rt_rq->rt_runtime_lock);
  8620. rt_rq->rt_runtime = rt_runtime;
  8621. spin_unlock(&rt_rq->rt_runtime_lock);
  8622. }
  8623. spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
  8624. unlock:
  8625. read_unlock(&tasklist_lock);
  8626. mutex_unlock(&rt_constraints_mutex);
  8627. return err;
  8628. }
  8629. int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
  8630. {
  8631. u64 rt_runtime, rt_period;
  8632. rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
  8633. rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
  8634. if (rt_runtime_us < 0)
  8635. rt_runtime = RUNTIME_INF;
  8636. return tg_set_bandwidth(tg, rt_period, rt_runtime);
  8637. }
  8638. long sched_group_rt_runtime(struct task_group *tg)
  8639. {
  8640. u64 rt_runtime_us;
  8641. if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF)
  8642. return -1;
  8643. rt_runtime_us = tg->rt_bandwidth.rt_runtime;
  8644. do_div(rt_runtime_us, NSEC_PER_USEC);
  8645. return rt_runtime_us;
  8646. }
  8647. int sched_group_set_rt_period(struct task_group *tg, long rt_period_us)
  8648. {
  8649. u64 rt_runtime, rt_period;
  8650. rt_period = (u64)rt_period_us * NSEC_PER_USEC;
  8651. rt_runtime = tg->rt_bandwidth.rt_runtime;
  8652. if (rt_period == 0)
  8653. return -EINVAL;
  8654. return tg_set_bandwidth(tg, rt_period, rt_runtime);
  8655. }
  8656. long sched_group_rt_period(struct task_group *tg)
  8657. {
  8658. u64 rt_period_us;
  8659. rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period);
  8660. do_div(rt_period_us, NSEC_PER_USEC);
  8661. return rt_period_us;
  8662. }
  8663. static int sched_rt_global_constraints(void)
  8664. {
  8665. u64 runtime, period;
  8666. int ret = 0;
  8667. if (sysctl_sched_rt_period <= 0)
  8668. return -EINVAL;
  8669. runtime = global_rt_runtime();
  8670. period = global_rt_period();
  8671. /*
  8672. * Sanity check on the sysctl variables.
  8673. */
  8674. if (runtime > period && runtime != RUNTIME_INF)
  8675. return -EINVAL;
  8676. mutex_lock(&rt_constraints_mutex);
  8677. read_lock(&tasklist_lock);
  8678. ret = __rt_schedulable(NULL, 0, 0);
  8679. read_unlock(&tasklist_lock);
  8680. mutex_unlock(&rt_constraints_mutex);
  8681. return ret;
  8682. }
  8683. int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
  8684. {
  8685. /* Don't accept realtime tasks when there is no way for them to run */
  8686. if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
  8687. return 0;
  8688. return 1;
  8689. }
  8690. #else /* !CONFIG_RT_GROUP_SCHED */
  8691. static int sched_rt_global_constraints(void)
  8692. {
  8693. unsigned long flags;
  8694. int i;
  8695. if (sysctl_sched_rt_period <= 0)
  8696. return -EINVAL;
  8697. /*
  8698. * There's always some RT tasks in the root group
  8699. * -- migration, kstopmachine etc..
  8700. */
  8701. if (sysctl_sched_rt_runtime == 0)
  8702. return -EBUSY;
  8703. spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
  8704. for_each_possible_cpu(i) {
  8705. struct rt_rq *rt_rq = &cpu_rq(i)->rt;
  8706. spin_lock(&rt_rq->rt_runtime_lock);
  8707. rt_rq->rt_runtime = global_rt_runtime();
  8708. spin_unlock(&rt_rq->rt_runtime_lock);
  8709. }
  8710. spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
  8711. return 0;
  8712. }
  8713. #endif /* CONFIG_RT_GROUP_SCHED */
  8714. int sched_rt_handler(struct ctl_table *table, int write,
  8715. struct file *filp, void __user *buffer, size_t *lenp,
  8716. loff_t *ppos)
  8717. {
  8718. int ret;
  8719. int old_period, old_runtime;
  8720. static DEFINE_MUTEX(mutex);
  8721. mutex_lock(&mutex);
  8722. old_period = sysctl_sched_rt_period;
  8723. old_runtime = sysctl_sched_rt_runtime;
  8724. ret = proc_dointvec(table, write, filp, buffer, lenp, ppos);
  8725. if (!ret && write) {
  8726. ret = sched_rt_global_constraints();
  8727. if (ret) {
  8728. sysctl_sched_rt_period = old_period;
  8729. sysctl_sched_rt_runtime = old_runtime;
  8730. } else {
  8731. def_rt_bandwidth.rt_runtime = global_rt_runtime();
  8732. def_rt_bandwidth.rt_period =
  8733. ns_to_ktime(global_rt_period());
  8734. }
  8735. }
  8736. mutex_unlock(&mutex);
  8737. return ret;
  8738. }
  8739. #ifdef CONFIG_CGROUP_SCHED
  8740. /* return corresponding task_group object of a cgroup */
  8741. static inline struct task_group *cgroup_tg(struct cgroup *cgrp)
  8742. {
  8743. return container_of(cgroup_subsys_state(cgrp, cpu_cgroup_subsys_id),
  8744. struct task_group, css);
  8745. }
  8746. static struct cgroup_subsys_state *
  8747. cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cgrp)
  8748. {
  8749. struct task_group *tg, *parent;
  8750. if (!cgrp->parent) {
  8751. /* This is early initialization for the top cgroup */
  8752. return &init_task_group.css;
  8753. }
  8754. parent = cgroup_tg(cgrp->parent);
  8755. tg = sched_create_group(parent);
  8756. if (IS_ERR(tg))
  8757. return ERR_PTR(-ENOMEM);
  8758. return &tg->css;
  8759. }
  8760. static void
  8761. cpu_cgroup_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
  8762. {
  8763. struct task_group *tg = cgroup_tg(cgrp);
  8764. sched_destroy_group(tg);
  8765. }
  8766. static int
  8767. cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
  8768. struct task_struct *tsk)
  8769. {
  8770. #ifdef CONFIG_RT_GROUP_SCHED
  8771. if (!sched_rt_can_attach(cgroup_tg(cgrp), tsk))
  8772. return -EINVAL;
  8773. #else
  8774. /* We don't support RT-tasks being in separate groups */
  8775. if (tsk->sched_class != &fair_sched_class)
  8776. return -EINVAL;
  8777. #endif
  8778. return 0;
  8779. }
  8780. static void
  8781. cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
  8782. struct cgroup *old_cont, struct task_struct *tsk)
  8783. {
  8784. sched_move_task(tsk);
  8785. }
  8786. #ifdef CONFIG_FAIR_GROUP_SCHED
  8787. static int cpu_shares_write_u64(struct cgroup *cgrp, struct cftype *cftype,
  8788. u64 shareval)
  8789. {
  8790. return sched_group_set_shares(cgroup_tg(cgrp), shareval);
  8791. }
  8792. static u64 cpu_shares_read_u64(struct cgroup *cgrp, struct cftype *cft)
  8793. {
  8794. struct task_group *tg = cgroup_tg(cgrp);
  8795. return (u64) tg->shares;
  8796. }
  8797. #endif /* CONFIG_FAIR_GROUP_SCHED */
  8798. #ifdef CONFIG_RT_GROUP_SCHED
  8799. static int cpu_rt_runtime_write(struct cgroup *cgrp, struct cftype *cft,
  8800. s64 val)
  8801. {
  8802. return sched_group_set_rt_runtime(cgroup_tg(cgrp), val);
  8803. }
  8804. static s64 cpu_rt_runtime_read(struct cgroup *cgrp, struct cftype *cft)
  8805. {
  8806. return sched_group_rt_runtime(cgroup_tg(cgrp));
  8807. }
  8808. static int cpu_rt_period_write_uint(struct cgroup *cgrp, struct cftype *cftype,
  8809. u64 rt_period_us)
  8810. {
  8811. return sched_group_set_rt_period(cgroup_tg(cgrp), rt_period_us);
  8812. }
  8813. static u64 cpu_rt_period_read_uint(struct cgroup *cgrp, struct cftype *cft)
  8814. {
  8815. return sched_group_rt_period(cgroup_tg(cgrp));
  8816. }
  8817. #endif /* CONFIG_RT_GROUP_SCHED */
  8818. static struct cftype cpu_files[] = {
  8819. #ifdef CONFIG_FAIR_GROUP_SCHED
  8820. {
  8821. .name = "shares",
  8822. .read_u64 = cpu_shares_read_u64,
  8823. .write_u64 = cpu_shares_write_u64,
  8824. },
  8825. #endif
  8826. #ifdef CONFIG_RT_GROUP_SCHED
  8827. {
  8828. .name = "rt_runtime_us",
  8829. .read_s64 = cpu_rt_runtime_read,
  8830. .write_s64 = cpu_rt_runtime_write,
  8831. },
  8832. {
  8833. .name = "rt_period_us",
  8834. .read_u64 = cpu_rt_period_read_uint,
  8835. .write_u64 = cpu_rt_period_write_uint,
  8836. },
  8837. #endif
  8838. };
  8839. static int cpu_cgroup_populate(struct cgroup_subsys *ss, struct cgroup *cont)
  8840. {
  8841. return cgroup_add_files(cont, ss, cpu_files, ARRAY_SIZE(cpu_files));
  8842. }
  8843. struct cgroup_subsys cpu_cgroup_subsys = {
  8844. .name = "cpu",
  8845. .create = cpu_cgroup_create,
  8846. .destroy = cpu_cgroup_destroy,
  8847. .can_attach = cpu_cgroup_can_attach,
  8848. .attach = cpu_cgroup_attach,
  8849. .populate = cpu_cgroup_populate,
  8850. .subsys_id = cpu_cgroup_subsys_id,
  8851. .early_init = 1,
  8852. };
  8853. #endif /* CONFIG_CGROUP_SCHED */
  8854. #ifdef CONFIG_CGROUP_CPUACCT
  8855. /*
  8856. * CPU accounting code for task groups.
  8857. *
  8858. * Based on the work by Paul Menage (menage@google.com) and Balbir Singh
  8859. * (balbir@in.ibm.com).
  8860. */
  8861. /* track cpu usage of a group of tasks and its child groups */
  8862. struct cpuacct {
  8863. struct cgroup_subsys_state css;
  8864. /* cpuusage holds pointer to a u64-type object on every cpu */
  8865. u64 *cpuusage;
  8866. struct percpu_counter cpustat[CPUACCT_STAT_NSTATS];
  8867. struct cpuacct *parent;
  8868. };
  8869. struct cgroup_subsys cpuacct_subsys;
  8870. /* return cpu accounting group corresponding to this container */
  8871. static inline struct cpuacct *cgroup_ca(struct cgroup *cgrp)
  8872. {
  8873. return container_of(cgroup_subsys_state(cgrp, cpuacct_subsys_id),
  8874. struct cpuacct, css);
  8875. }
  8876. /* return cpu accounting group to which this task belongs */
  8877. static inline struct cpuacct *task_ca(struct task_struct *tsk)
  8878. {
  8879. return container_of(task_subsys_state(tsk, cpuacct_subsys_id),
  8880. struct cpuacct, css);
  8881. }
  8882. /* create a new cpu accounting group */
  8883. static struct cgroup_subsys_state *cpuacct_create(
  8884. struct cgroup_subsys *ss, struct cgroup *cgrp)
  8885. {
  8886. struct cpuacct *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
  8887. int i;
  8888. if (!ca)
  8889. goto out;
  8890. ca->cpuusage = alloc_percpu(u64);
  8891. if (!ca->cpuusage)
  8892. goto out_free_ca;
  8893. for (i = 0; i < CPUACCT_STAT_NSTATS; i++)
  8894. if (percpu_counter_init(&ca->cpustat[i], 0))
  8895. goto out_free_counters;
  8896. if (cgrp->parent)
  8897. ca->parent = cgroup_ca(cgrp->parent);
  8898. return &ca->css;
  8899. out_free_counters:
  8900. while (--i >= 0)
  8901. percpu_counter_destroy(&ca->cpustat[i]);
  8902. free_percpu(ca->cpuusage);
  8903. out_free_ca:
  8904. kfree(ca);
  8905. out:
  8906. return ERR_PTR(-ENOMEM);
  8907. }
  8908. /* destroy an existing cpu accounting group */
  8909. static void
  8910. cpuacct_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
  8911. {
  8912. struct cpuacct *ca = cgroup_ca(cgrp);
  8913. int i;
  8914. for (i = 0; i < CPUACCT_STAT_NSTATS; i++)
  8915. percpu_counter_destroy(&ca->cpustat[i]);
  8916. free_percpu(ca->cpuusage);
  8917. kfree(ca);
  8918. }
  8919. static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu)
  8920. {
  8921. u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
  8922. u64 data;
  8923. #ifndef CONFIG_64BIT
  8924. /*
  8925. * Take rq->lock to make 64-bit read safe on 32-bit platforms.
  8926. */
  8927. spin_lock_irq(&cpu_rq(cpu)->lock);
  8928. data = *cpuusage;
  8929. spin_unlock_irq(&cpu_rq(cpu)->lock);
  8930. #else
  8931. data = *cpuusage;
  8932. #endif
  8933. return data;
  8934. }
  8935. static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val)
  8936. {
  8937. u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
  8938. #ifndef CONFIG_64BIT
  8939. /*
  8940. * Take rq->lock to make 64-bit write safe on 32-bit platforms.
  8941. */
  8942. spin_lock_irq(&cpu_rq(cpu)->lock);
  8943. *cpuusage = val;
  8944. spin_unlock_irq(&cpu_rq(cpu)->lock);
  8945. #else
  8946. *cpuusage = val;
  8947. #endif
  8948. }
  8949. /* return total cpu usage (in nanoseconds) of a group */
  8950. static u64 cpuusage_read(struct cgroup *cgrp, struct cftype *cft)
  8951. {
  8952. struct cpuacct *ca = cgroup_ca(cgrp);
  8953. u64 totalcpuusage = 0;
  8954. int i;
  8955. for_each_present_cpu(i)
  8956. totalcpuusage += cpuacct_cpuusage_read(ca, i);
  8957. return totalcpuusage;
  8958. }
  8959. static int cpuusage_write(struct cgroup *cgrp, struct cftype *cftype,
  8960. u64 reset)
  8961. {
  8962. struct cpuacct *ca = cgroup_ca(cgrp);
  8963. int err = 0;
  8964. int i;
  8965. if (reset) {
  8966. err = -EINVAL;
  8967. goto out;
  8968. }
  8969. for_each_present_cpu(i)
  8970. cpuacct_cpuusage_write(ca, i, 0);
  8971. out:
  8972. return err;
  8973. }
  8974. static int cpuacct_percpu_seq_read(struct cgroup *cgroup, struct cftype *cft,
  8975. struct seq_file *m)
  8976. {
  8977. struct cpuacct *ca = cgroup_ca(cgroup);
  8978. u64 percpu;
  8979. int i;
  8980. for_each_present_cpu(i) {
  8981. percpu = cpuacct_cpuusage_read(ca, i);
  8982. seq_printf(m, "%llu ", (unsigned long long) percpu);
  8983. }
  8984. seq_printf(m, "\n");
  8985. return 0;
  8986. }
  8987. static const char *cpuacct_stat_desc[] = {
  8988. [CPUACCT_STAT_USER] = "user",
  8989. [CPUACCT_STAT_SYSTEM] = "system",
  8990. };
  8991. static int cpuacct_stats_show(struct cgroup *cgrp, struct cftype *cft,
  8992. struct cgroup_map_cb *cb)
  8993. {
  8994. struct cpuacct *ca = cgroup_ca(cgrp);
  8995. int i;
  8996. for (i = 0; i < CPUACCT_STAT_NSTATS; i++) {
  8997. s64 val = percpu_counter_read(&ca->cpustat[i]);
  8998. val = cputime64_to_clock_t(val);
  8999. cb->fill(cb, cpuacct_stat_desc[i], val);
  9000. }
  9001. return 0;
  9002. }
  9003. static struct cftype files[] = {
  9004. {
  9005. .name = "usage",
  9006. .read_u64 = cpuusage_read,
  9007. .write_u64 = cpuusage_write,
  9008. },
  9009. {
  9010. .name = "usage_percpu",
  9011. .read_seq_string = cpuacct_percpu_seq_read,
  9012. },
  9013. {
  9014. .name = "stat",
  9015. .read_map = cpuacct_stats_show,
  9016. },
  9017. };
  9018. static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cgrp)
  9019. {
  9020. return cgroup_add_files(cgrp, ss, files, ARRAY_SIZE(files));
  9021. }
  9022. /*
  9023. * charge this task's execution time to its accounting group.
  9024. *
  9025. * called with rq->lock held.
  9026. */
  9027. static void cpuacct_charge(struct task_struct *tsk, u64 cputime)
  9028. {
  9029. struct cpuacct *ca;
  9030. int cpu;
  9031. if (unlikely(!cpuacct_subsys.active))
  9032. return;
  9033. cpu = task_cpu(tsk);
  9034. rcu_read_lock();
  9035. ca = task_ca(tsk);
  9036. for (; ca; ca = ca->parent) {
  9037. u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
  9038. *cpuusage += cputime;
  9039. }
  9040. rcu_read_unlock();
  9041. }
  9042. /*
  9043. * Charge the system/user time to the task's accounting group.
  9044. */
  9045. static void cpuacct_update_stats(struct task_struct *tsk,
  9046. enum cpuacct_stat_index idx, cputime_t val)
  9047. {
  9048. struct cpuacct *ca;
  9049. if (unlikely(!cpuacct_subsys.active))
  9050. return;
  9051. rcu_read_lock();
  9052. ca = task_ca(tsk);
  9053. do {
  9054. percpu_counter_add(&ca->cpustat[idx], val);
  9055. ca = ca->parent;
  9056. } while (ca);
  9057. rcu_read_unlock();
  9058. }
  9059. struct cgroup_subsys cpuacct_subsys = {
  9060. .name = "cpuacct",
  9061. .create = cpuacct_create,
  9062. .destroy = cpuacct_destroy,
  9063. .populate = cpuacct_populate,
  9064. .subsys_id = cpuacct_subsys_id,
  9065. };
  9066. #endif /* CONFIG_CGROUP_CPUACCT */