core.c 205 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875687668776878687968806881688268836884688568866887688868896890689168926893689468956896689768986899690069016902690369046905690669076908690969106911691269136914691569166917691869196920692169226923692469256926692769286929693069316932693369346935693669376938693969406941694269436944694569466947694869496950695169526953695469556956695769586959696069616962696369646965696669676968696969706971697269736974697569766977697869796980698169826983698469856986698769886989699069916992699369946995699669976998699970007001700270037004700570067007700870097010701170127013701470157016701770187019702070217022702370247025702670277028702970307031703270337034703570367037703870397040704170427043704470457046704770487049705070517052705370547055705670577058705970607061706270637064706570667067706870697070707170727073707470757076707770787079708070817082708370847085708670877088708970907091709270937094709570967097709870997100710171027103710471057106710771087109711071117112711371147115711671177118711971207121712271237124712571267127712871297130713171327133713471357136713771387139714071417142714371447145714671477148714971507151715271537154715571567157715871597160716171627163716471657166716771687169717071717172717371747175717671777178717971807181718271837184718571867187718871897190719171927193719471957196719771987199720072017202720372047205720672077208720972107211721272137214721572167217721872197220722172227223722472257226722772287229723072317232723372347235723672377238723972407241724272437244724572467247724872497250725172527253725472557256725772587259726072617262726372647265726672677268726972707271727272737274727572767277727872797280728172827283728472857286728772887289729072917292729372947295729672977298729973007301730273037304730573067307730873097310731173127313731473157316731773187319732073217322732373247325732673277328732973307331733273337334733573367337733873397340734173427343734473457346734773487349735073517352735373547355735673577358735973607361736273637364736573667367736873697370737173727373737473757376737773787379738073817382738373847385738673877388738973907391739273937394739573967397739873997400740174027403740474057406740774087409741074117412741374147415741674177418741974207421742274237424742574267427742874297430743174327433743474357436743774387439744074417442744374447445744674477448744974507451745274537454745574567457745874597460746174627463746474657466746774687469747074717472747374747475747674777478747974807481748274837484748574867487748874897490749174927493749474957496749774987499750075017502750375047505750675077508750975107511751275137514751575167517751875197520752175227523752475257526752775287529753075317532753375347535753675377538753975407541754275437544754575467547754875497550755175527553755475557556755775587559756075617562756375647565756675677568756975707571757275737574757575767577757875797580758175827583758475857586758775887589759075917592759375947595759675977598759976007601760276037604760576067607760876097610761176127613761476157616761776187619762076217622762376247625762676277628762976307631763276337634763576367637763876397640764176427643764476457646764776487649765076517652765376547655765676577658765976607661766276637664766576667667766876697670767176727673767476757676767776787679768076817682768376847685768676877688768976907691769276937694769576967697769876997700770177027703770477057706770777087709771077117712771377147715771677177718771977207721772277237724772577267727772877297730773177327733773477357736773777387739774077417742774377447745774677477748774977507751775277537754775577567757775877597760776177627763776477657766776777687769777077717772777377747775777677777778777977807781778277837784778577867787778877897790779177927793779477957796779777987799780078017802780378047805780678077808780978107811781278137814781578167817781878197820782178227823782478257826782778287829783078317832783378347835783678377838783978407841784278437844784578467847784878497850785178527853785478557856785778587859786078617862786378647865786678677868786978707871787278737874787578767877787878797880788178827883788478857886788778887889789078917892789378947895789678977898789979007901790279037904790579067907790879097910791179127913791479157916791779187919792079217922792379247925792679277928792979307931793279337934793579367937793879397940794179427943794479457946794779487949795079517952795379547955795679577958795979607961796279637964796579667967796879697970797179727973797479757976797779787979798079817982798379847985798679877988798979907991799279937994799579967997799879998000800180028003800480058006800780088009801080118012801380148015801680178018801980208021802280238024802580268027802880298030803180328033803480358036803780388039804080418042804380448045804680478048804980508051805280538054805580568057805880598060806180628063806480658066806780688069807080718072807380748075807680778078807980808081808280838084808580868087808880898090809180928093809480958096809780988099810081018102810381048105810681078108810981108111811281138114811581168117811881198120812181228123812481258126812781288129813081318132813381348135813681378138813981408141814281438144814581468147814881498150815181528153815481558156815781588159816081618162816381648165816681678168816981708171817281738174817581768177817881798180818181828183818481858186818781888189819081918192819381948195819681978198819982008201820282038204820582068207820882098210821182128213821482158216821782188219822082218222822382248225822682278228822982308231823282338234823582368237823882398240824182428243824482458246824782488249825082518252825382548255825682578258825982608261826282638264826582668267826882698270827182728273827482758276827782788279828082818282828382848285828682878288828982908291829282938294829582968297829882998300830183028303830483058306830783088309831083118312831383148315831683178318831983208321832283238324832583268327832883298330833183328333833483358336833783388339834083418342834383448345834683478348834983508351835283538354835583568357835883598360836183628363836483658366836783688369837083718372837383748375837683778378837983808381838283838384838583868387838883898390839183928393839483958396839783988399840084018402840384048405840684078408840984108411841284138414841584168417841884198420842184228423842484258426842784288429843084318432843384348435843684378438843984408441844284438444844584468447844884498450845184528453845484558456845784588459846084618462846384648465846684678468846984708471847284738474847584768477847884798480848184828483848484858486848784888489849084918492849384948495849684978498849985008501850285038504850585068507850885098510851185128513851485158516851785188519852085218522852385248525852685278528852985308531853285338534853585368537853885398540854185428543854485458546854785488549
  1. /*
  2. * kernel/sched/core.c
  3. *
  4. * Kernel scheduler and related syscalls
  5. *
  6. * Copyright (C) 1991-2002 Linus Torvalds
  7. *
  8. * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and
  9. * make semaphores SMP safe
  10. * 1998-11-19 Implemented schedule_timeout() and related stuff
  11. * by Andrea Arcangeli
  12. * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar:
  13. * hybrid priority-list and round-robin design with
  14. * an array-switch method of distributing timeslices
  15. * and per-CPU runqueues. Cleanups and useful suggestions
  16. * by Davide Libenzi, preemptible kernel bits by Robert Love.
  17. * 2003-09-03 Interactivity tuning by Con Kolivas.
  18. * 2004-04-02 Scheduler domains code by Nick Piggin
  19. * 2007-04-15 Work begun on replacing all interactivity tuning with a
  20. * fair scheduling design by Con Kolivas.
  21. * 2007-05-05 Load balancing (smp-nice) and other improvements
  22. * by Peter Williams
  23. * 2007-05-06 Interactivity improvements to CFS by Mike Galbraith
  24. * 2007-07-01 Group scheduling enhancements by Srivatsa Vaddagiri
  25. * 2007-11-29 RT balancing improvements by Steven Rostedt, Gregory Haskins,
  26. * Thomas Gleixner, Mike Kravetz
  27. */
  28. #include <linux/mm.h>
  29. #include <linux/module.h>
  30. #include <linux/nmi.h>
  31. #include <linux/init.h>
  32. #include <linux/uaccess.h>
  33. #include <linux/highmem.h>
  34. #include <asm/mmu_context.h>
  35. #include <linux/interrupt.h>
  36. #include <linux/capability.h>
  37. #include <linux/completion.h>
  38. #include <linux/kernel_stat.h>
  39. #include <linux/debug_locks.h>
  40. #include <linux/perf_event.h>
  41. #include <linux/security.h>
  42. #include <linux/notifier.h>
  43. #include <linux/profile.h>
  44. #include <linux/freezer.h>
  45. #include <linux/vmalloc.h>
  46. #include <linux/blkdev.h>
  47. #include <linux/delay.h>
  48. #include <linux/pid_namespace.h>
  49. #include <linux/smp.h>
  50. #include <linux/threads.h>
  51. #include <linux/timer.h>
  52. #include <linux/rcupdate.h>
  53. #include <linux/cpu.h>
  54. #include <linux/cpuset.h>
  55. #include <linux/percpu.h>
  56. #include <linux/proc_fs.h>
  57. #include <linux/seq_file.h>
  58. #include <linux/sysctl.h>
  59. #include <linux/syscalls.h>
  60. #include <linux/times.h>
  61. #include <linux/tsacct_kern.h>
  62. #include <linux/kprobes.h>
  63. #include <linux/delayacct.h>
  64. #include <linux/unistd.h>
  65. #include <linux/pagemap.h>
  66. #include <linux/hrtimer.h>
  67. #include <linux/tick.h>
  68. #include <linux/debugfs.h>
  69. #include <linux/ctype.h>
  70. #include <linux/ftrace.h>
  71. #include <linux/slab.h>
  72. #include <linux/init_task.h>
  73. #include <linux/binfmts.h>
  74. #include <asm/switch_to.h>
  75. #include <asm/tlb.h>
  76. #include <asm/irq_regs.h>
  77. #include <asm/mutex.h>
  78. #ifdef CONFIG_PARAVIRT
  79. #include <asm/paravirt.h>
  80. #endif
  81. #include "sched.h"
  82. #include "../workqueue_sched.h"
  83. #include "../smpboot.h"
  84. #define CREATE_TRACE_POINTS
  85. #include <trace/events/sched.h>
  86. void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period)
  87. {
  88. unsigned long delta;
  89. ktime_t soft, hard, now;
  90. for (;;) {
  91. if (hrtimer_active(period_timer))
  92. break;
  93. now = hrtimer_cb_get_time(period_timer);
  94. hrtimer_forward(period_timer, now, period);
  95. soft = hrtimer_get_softexpires(period_timer);
  96. hard = hrtimer_get_expires(period_timer);
  97. delta = ktime_to_ns(ktime_sub(hard, soft));
  98. __hrtimer_start_range_ns(period_timer, soft, delta,
  99. HRTIMER_MODE_ABS_PINNED, 0);
  100. }
  101. }
  102. DEFINE_MUTEX(sched_domains_mutex);
  103. DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
  104. static void update_rq_clock_task(struct rq *rq, s64 delta);
  105. void update_rq_clock(struct rq *rq)
  106. {
  107. s64 delta;
  108. if (rq->skip_clock_update > 0)
  109. return;
  110. delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
  111. rq->clock += delta;
  112. update_rq_clock_task(rq, delta);
  113. }
  114. /*
  115. * Debugging: various feature bits
  116. */
  117. #define SCHED_FEAT(name, enabled) \
  118. (1UL << __SCHED_FEAT_##name) * enabled |
  119. const_debug unsigned int sysctl_sched_features =
  120. #include "features.h"
  121. 0;
  122. #undef SCHED_FEAT
  123. #ifdef CONFIG_SCHED_DEBUG
  124. #define SCHED_FEAT(name, enabled) \
  125. #name ,
  126. static const char * const sched_feat_names[] = {
  127. #include "features.h"
  128. };
  129. #undef SCHED_FEAT
  130. static int sched_feat_show(struct seq_file *m, void *v)
  131. {
  132. int i;
  133. for (i = 0; i < __SCHED_FEAT_NR; i++) {
  134. if (!(sysctl_sched_features & (1UL << i)))
  135. seq_puts(m, "NO_");
  136. seq_printf(m, "%s ", sched_feat_names[i]);
  137. }
  138. seq_puts(m, "\n");
  139. return 0;
  140. }
  141. #ifdef HAVE_JUMP_LABEL
  142. #define jump_label_key__true STATIC_KEY_INIT_TRUE
  143. #define jump_label_key__false STATIC_KEY_INIT_FALSE
  144. #define SCHED_FEAT(name, enabled) \
  145. jump_label_key__##enabled ,
  146. struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
  147. #include "features.h"
  148. };
  149. #undef SCHED_FEAT
  150. static void sched_feat_disable(int i)
  151. {
  152. if (static_key_enabled(&sched_feat_keys[i]))
  153. static_key_slow_dec(&sched_feat_keys[i]);
  154. }
  155. static void sched_feat_enable(int i)
  156. {
  157. if (!static_key_enabled(&sched_feat_keys[i]))
  158. static_key_slow_inc(&sched_feat_keys[i]);
  159. }
  160. #else
  161. static void sched_feat_disable(int i) { };
  162. static void sched_feat_enable(int i) { };
  163. #endif /* HAVE_JUMP_LABEL */
  164. static ssize_t
  165. sched_feat_write(struct file *filp, const char __user *ubuf,
  166. size_t cnt, loff_t *ppos)
  167. {
  168. char buf[64];
  169. char *cmp;
  170. int neg = 0;
  171. int i;
  172. if (cnt > 63)
  173. cnt = 63;
  174. if (copy_from_user(&buf, ubuf, cnt))
  175. return -EFAULT;
  176. buf[cnt] = 0;
  177. cmp = strstrip(buf);
  178. if (strncmp(cmp, "NO_", 3) == 0) {
  179. neg = 1;
  180. cmp += 3;
  181. }
  182. for (i = 0; i < __SCHED_FEAT_NR; i++) {
  183. if (strcmp(cmp, sched_feat_names[i]) == 0) {
  184. if (neg) {
  185. sysctl_sched_features &= ~(1UL << i);
  186. sched_feat_disable(i);
  187. } else {
  188. sysctl_sched_features |= (1UL << i);
  189. sched_feat_enable(i);
  190. }
  191. break;
  192. }
  193. }
  194. if (i == __SCHED_FEAT_NR)
  195. return -EINVAL;
  196. *ppos += cnt;
  197. return cnt;
  198. }
  199. static int sched_feat_open(struct inode *inode, struct file *filp)
  200. {
  201. return single_open(filp, sched_feat_show, NULL);
  202. }
  203. static const struct file_operations sched_feat_fops = {
  204. .open = sched_feat_open,
  205. .write = sched_feat_write,
  206. .read = seq_read,
  207. .llseek = seq_lseek,
  208. .release = single_release,
  209. };
  210. static __init int sched_init_debug(void)
  211. {
  212. debugfs_create_file("sched_features", 0644, NULL, NULL,
  213. &sched_feat_fops);
  214. return 0;
  215. }
  216. late_initcall(sched_init_debug);
  217. #endif /* CONFIG_SCHED_DEBUG */
  218. /*
  219. * Number of tasks to iterate in a single balance run.
  220. * Limited because this is done with IRQs disabled.
  221. */
  222. const_debug unsigned int sysctl_sched_nr_migrate = 32;
  223. /*
  224. * period over which we average the RT time consumption, measured
  225. * in ms.
  226. *
  227. * default: 1s
  228. */
  229. const_debug unsigned int sysctl_sched_time_avg = MSEC_PER_SEC;
  230. /*
  231. * period over which we measure -rt task cpu usage in us.
  232. * default: 1s
  233. */
  234. unsigned int sysctl_sched_rt_period = 1000000;
  235. __read_mostly int scheduler_running;
  236. /*
  237. * part of the period that we allow rt tasks to run in us.
  238. * default: 0.95s
  239. */
  240. int sysctl_sched_rt_runtime = 950000;
  241. /*
  242. * __task_rq_lock - lock the rq @p resides on.
  243. */
  244. static inline struct rq *__task_rq_lock(struct task_struct *p)
  245. __acquires(rq->lock)
  246. {
  247. struct rq *rq;
  248. lockdep_assert_held(&p->pi_lock);
  249. for (;;) {
  250. rq = task_rq(p);
  251. raw_spin_lock(&rq->lock);
  252. if (likely(rq == task_rq(p)))
  253. return rq;
  254. raw_spin_unlock(&rq->lock);
  255. }
  256. }
  257. /*
  258. * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
  259. */
  260. static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
  261. __acquires(p->pi_lock)
  262. __acquires(rq->lock)
  263. {
  264. struct rq *rq;
  265. for (;;) {
  266. raw_spin_lock_irqsave(&p->pi_lock, *flags);
  267. rq = task_rq(p);
  268. raw_spin_lock(&rq->lock);
  269. if (likely(rq == task_rq(p)))
  270. return rq;
  271. raw_spin_unlock(&rq->lock);
  272. raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
  273. }
  274. }
  275. static void __task_rq_unlock(struct rq *rq)
  276. __releases(rq->lock)
  277. {
  278. raw_spin_unlock(&rq->lock);
  279. }
  280. static inline void
  281. task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags)
  282. __releases(rq->lock)
  283. __releases(p->pi_lock)
  284. {
  285. raw_spin_unlock(&rq->lock);
  286. raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
  287. }
  288. /*
  289. * this_rq_lock - lock this runqueue and disable interrupts.
  290. */
  291. static struct rq *this_rq_lock(void)
  292. __acquires(rq->lock)
  293. {
  294. struct rq *rq;
  295. local_irq_disable();
  296. rq = this_rq();
  297. raw_spin_lock(&rq->lock);
  298. return rq;
  299. }
  300. #ifdef CONFIG_SCHED_HRTICK
  301. /*
  302. * Use HR-timers to deliver accurate preemption points.
  303. *
  304. * Its all a bit involved since we cannot program an hrt while holding the
  305. * rq->lock. So what we do is store a state in in rq->hrtick_* and ask for a
  306. * reschedule event.
  307. *
  308. * When we get rescheduled we reprogram the hrtick_timer outside of the
  309. * rq->lock.
  310. */
  311. static void hrtick_clear(struct rq *rq)
  312. {
  313. if (hrtimer_active(&rq->hrtick_timer))
  314. hrtimer_cancel(&rq->hrtick_timer);
  315. }
  316. /*
  317. * High-resolution timer tick.
  318. * Runs from hardirq context with interrupts disabled.
  319. */
  320. static enum hrtimer_restart hrtick(struct hrtimer *timer)
  321. {
  322. struct rq *rq = container_of(timer, struct rq, hrtick_timer);
  323. WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
  324. raw_spin_lock(&rq->lock);
  325. update_rq_clock(rq);
  326. rq->curr->sched_class->task_tick(rq, rq->curr, 1);
  327. raw_spin_unlock(&rq->lock);
  328. return HRTIMER_NORESTART;
  329. }
  330. #ifdef CONFIG_SMP
  331. /*
  332. * called from hardirq (IPI) context
  333. */
  334. static void __hrtick_start(void *arg)
  335. {
  336. struct rq *rq = arg;
  337. raw_spin_lock(&rq->lock);
  338. hrtimer_restart(&rq->hrtick_timer);
  339. rq->hrtick_csd_pending = 0;
  340. raw_spin_unlock(&rq->lock);
  341. }
  342. /*
  343. * Called to set the hrtick timer state.
  344. *
  345. * called with rq->lock held and irqs disabled
  346. */
  347. void hrtick_start(struct rq *rq, u64 delay)
  348. {
  349. struct hrtimer *timer = &rq->hrtick_timer;
  350. ktime_t time = ktime_add_ns(timer->base->get_time(), delay);
  351. hrtimer_set_expires(timer, time);
  352. if (rq == this_rq()) {
  353. hrtimer_restart(timer);
  354. } else if (!rq->hrtick_csd_pending) {
  355. __smp_call_function_single(cpu_of(rq), &rq->hrtick_csd, 0);
  356. rq->hrtick_csd_pending = 1;
  357. }
  358. }
  359. static int
  360. hotplug_hrtick(struct notifier_block *nfb, unsigned long action, void *hcpu)
  361. {
  362. int cpu = (int)(long)hcpu;
  363. switch (action) {
  364. case CPU_UP_CANCELED:
  365. case CPU_UP_CANCELED_FROZEN:
  366. case CPU_DOWN_PREPARE:
  367. case CPU_DOWN_PREPARE_FROZEN:
  368. case CPU_DEAD:
  369. case CPU_DEAD_FROZEN:
  370. hrtick_clear(cpu_rq(cpu));
  371. return NOTIFY_OK;
  372. }
  373. return NOTIFY_DONE;
  374. }
  375. static __init void init_hrtick(void)
  376. {
  377. hotcpu_notifier(hotplug_hrtick, 0);
  378. }
  379. #else
  380. /*
  381. * Called to set the hrtick timer state.
  382. *
  383. * called with rq->lock held and irqs disabled
  384. */
  385. void hrtick_start(struct rq *rq, u64 delay)
  386. {
  387. __hrtimer_start_range_ns(&rq->hrtick_timer, ns_to_ktime(delay), 0,
  388. HRTIMER_MODE_REL_PINNED, 0);
  389. }
  390. static inline void init_hrtick(void)
  391. {
  392. }
  393. #endif /* CONFIG_SMP */
  394. static void init_rq_hrtick(struct rq *rq)
  395. {
  396. #ifdef CONFIG_SMP
  397. rq->hrtick_csd_pending = 0;
  398. rq->hrtick_csd.flags = 0;
  399. rq->hrtick_csd.func = __hrtick_start;
  400. rq->hrtick_csd.info = rq;
  401. #endif
  402. hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  403. rq->hrtick_timer.function = hrtick;
  404. }
  405. #else /* CONFIG_SCHED_HRTICK */
  406. static inline void hrtick_clear(struct rq *rq)
  407. {
  408. }
  409. static inline void init_rq_hrtick(struct rq *rq)
  410. {
  411. }
  412. static inline void init_hrtick(void)
  413. {
  414. }
  415. #endif /* CONFIG_SCHED_HRTICK */
  416. /*
  417. * resched_task - mark a task 'to be rescheduled now'.
  418. *
  419. * On UP this means the setting of the need_resched flag, on SMP it
  420. * might also involve a cross-CPU call to trigger the scheduler on
  421. * the target CPU.
  422. */
  423. #ifdef CONFIG_SMP
  424. #ifndef tsk_is_polling
  425. #define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
  426. #endif
  427. void resched_task(struct task_struct *p)
  428. {
  429. int cpu;
  430. assert_raw_spin_locked(&task_rq(p)->lock);
  431. if (test_tsk_need_resched(p))
  432. return;
  433. set_tsk_need_resched(p);
  434. cpu = task_cpu(p);
  435. if (cpu == smp_processor_id())
  436. return;
  437. /* NEED_RESCHED must be visible before we test polling */
  438. smp_mb();
  439. if (!tsk_is_polling(p))
  440. smp_send_reschedule(cpu);
  441. }
  442. void resched_cpu(int cpu)
  443. {
  444. struct rq *rq = cpu_rq(cpu);
  445. unsigned long flags;
  446. if (!raw_spin_trylock_irqsave(&rq->lock, flags))
  447. return;
  448. resched_task(cpu_curr(cpu));
  449. raw_spin_unlock_irqrestore(&rq->lock, flags);
  450. }
  451. #ifdef CONFIG_NO_HZ
  452. /*
  453. * In the semi idle case, use the nearest busy cpu for migrating timers
  454. * from an idle cpu. This is good for power-savings.
  455. *
  456. * We don't do similar optimization for completely idle system, as
  457. * selecting an idle cpu will add more delays to the timers than intended
  458. * (as that cpu's timer base may not be uptodate wrt jiffies etc).
  459. */
  460. int get_nohz_timer_target(void)
  461. {
  462. int cpu = smp_processor_id();
  463. int i;
  464. struct sched_domain *sd;
  465. rcu_read_lock();
  466. for_each_domain(cpu, sd) {
  467. for_each_cpu(i, sched_domain_span(sd)) {
  468. if (!idle_cpu(i)) {
  469. cpu = i;
  470. goto unlock;
  471. }
  472. }
  473. }
  474. unlock:
  475. rcu_read_unlock();
  476. return cpu;
  477. }
  478. /*
  479. * When add_timer_on() enqueues a timer into the timer wheel of an
  480. * idle CPU then this timer might expire before the next timer event
  481. * which is scheduled to wake up that CPU. In case of a completely
  482. * idle system the next event might even be infinite time into the
  483. * future. wake_up_idle_cpu() ensures that the CPU is woken up and
  484. * leaves the inner idle loop so the newly added timer is taken into
  485. * account when the CPU goes back to idle and evaluates the timer
  486. * wheel for the next timer event.
  487. */
  488. void wake_up_idle_cpu(int cpu)
  489. {
  490. struct rq *rq = cpu_rq(cpu);
  491. if (cpu == smp_processor_id())
  492. return;
  493. /*
  494. * This is safe, as this function is called with the timer
  495. * wheel base lock of (cpu) held. When the CPU is on the way
  496. * to idle and has not yet set rq->curr to idle then it will
  497. * be serialized on the timer wheel base lock and take the new
  498. * timer into account automatically.
  499. */
  500. if (rq->curr != rq->idle)
  501. return;
  502. /*
  503. * We can set TIF_RESCHED on the idle task of the other CPU
  504. * lockless. The worst case is that the other CPU runs the
  505. * idle task through an additional NOOP schedule()
  506. */
  507. set_tsk_need_resched(rq->idle);
  508. /* NEED_RESCHED must be visible before we test polling */
  509. smp_mb();
  510. if (!tsk_is_polling(rq->idle))
  511. smp_send_reschedule(cpu);
  512. }
  513. static inline bool got_nohz_idle_kick(void)
  514. {
  515. int cpu = smp_processor_id();
  516. return idle_cpu(cpu) && test_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu));
  517. }
  518. #else /* CONFIG_NO_HZ */
  519. static inline bool got_nohz_idle_kick(void)
  520. {
  521. return false;
  522. }
  523. #endif /* CONFIG_NO_HZ */
  524. void sched_avg_update(struct rq *rq)
  525. {
  526. s64 period = sched_avg_period();
  527. while ((s64)(rq->clock - rq->age_stamp) > period) {
  528. /*
  529. * Inline assembly required to prevent the compiler
  530. * optimising this loop into a divmod call.
  531. * See __iter_div_u64_rem() for another example of this.
  532. */
  533. asm("" : "+rm" (rq->age_stamp));
  534. rq->age_stamp += period;
  535. rq->rt_avg /= 2;
  536. }
  537. }
  538. #else /* !CONFIG_SMP */
  539. void resched_task(struct task_struct *p)
  540. {
  541. assert_raw_spin_locked(&task_rq(p)->lock);
  542. set_tsk_need_resched(p);
  543. }
  544. #endif /* CONFIG_SMP */
  545. #if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \
  546. (defined(CONFIG_SMP) || defined(CONFIG_CFS_BANDWIDTH)))
  547. /*
  548. * Iterate task_group tree rooted at *from, calling @down when first entering a
  549. * node and @up when leaving it for the final time.
  550. *
  551. * Caller must hold rcu_lock or sufficient equivalent.
  552. */
  553. int walk_tg_tree_from(struct task_group *from,
  554. tg_visitor down, tg_visitor up, void *data)
  555. {
  556. struct task_group *parent, *child;
  557. int ret;
  558. parent = from;
  559. down:
  560. ret = (*down)(parent, data);
  561. if (ret)
  562. goto out;
  563. list_for_each_entry_rcu(child, &parent->children, siblings) {
  564. parent = child;
  565. goto down;
  566. up:
  567. continue;
  568. }
  569. ret = (*up)(parent, data);
  570. if (ret || parent == from)
  571. goto out;
  572. child = parent;
  573. parent = parent->parent;
  574. if (parent)
  575. goto up;
  576. out:
  577. return ret;
  578. }
  579. int tg_nop(struct task_group *tg, void *data)
  580. {
  581. return 0;
  582. }
  583. #endif
  584. static void set_load_weight(struct task_struct *p)
  585. {
  586. int prio = p->static_prio - MAX_RT_PRIO;
  587. struct load_weight *load = &p->se.load;
  588. /*
  589. * SCHED_IDLE tasks get minimal weight:
  590. */
  591. if (p->policy == SCHED_IDLE) {
  592. load->weight = scale_load(WEIGHT_IDLEPRIO);
  593. load->inv_weight = WMULT_IDLEPRIO;
  594. return;
  595. }
  596. load->weight = scale_load(prio_to_weight[prio]);
  597. load->inv_weight = prio_to_wmult[prio];
  598. }
  599. static void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
  600. {
  601. update_rq_clock(rq);
  602. sched_info_queued(p);
  603. p->sched_class->enqueue_task(rq, p, flags);
  604. }
  605. static void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
  606. {
  607. update_rq_clock(rq);
  608. sched_info_dequeued(p);
  609. p->sched_class->dequeue_task(rq, p, flags);
  610. }
  611. void activate_task(struct rq *rq, struct task_struct *p, int flags)
  612. {
  613. if (task_contributes_to_load(p))
  614. rq->nr_uninterruptible--;
  615. enqueue_task(rq, p, flags);
  616. }
  617. void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
  618. {
  619. if (task_contributes_to_load(p))
  620. rq->nr_uninterruptible++;
  621. dequeue_task(rq, p, flags);
  622. }
  623. #ifdef CONFIG_IRQ_TIME_ACCOUNTING
  624. /*
  625. * There are no locks covering percpu hardirq/softirq time.
  626. * They are only modified in account_system_vtime, on corresponding CPU
  627. * with interrupts disabled. So, writes are safe.
  628. * They are read and saved off onto struct rq in update_rq_clock().
  629. * This may result in other CPU reading this CPU's irq time and can
  630. * race with irq/account_system_vtime on this CPU. We would either get old
  631. * or new value with a side effect of accounting a slice of irq time to wrong
  632. * task when irq is in progress while we read rq->clock. That is a worthy
  633. * compromise in place of having locks on each irq in account_system_time.
  634. */
  635. static DEFINE_PER_CPU(u64, cpu_hardirq_time);
  636. static DEFINE_PER_CPU(u64, cpu_softirq_time);
  637. static DEFINE_PER_CPU(u64, irq_start_time);
  638. static int sched_clock_irqtime;
  639. void enable_sched_clock_irqtime(void)
  640. {
  641. sched_clock_irqtime = 1;
  642. }
  643. void disable_sched_clock_irqtime(void)
  644. {
  645. sched_clock_irqtime = 0;
  646. }
  647. #ifndef CONFIG_64BIT
  648. static DEFINE_PER_CPU(seqcount_t, irq_time_seq);
  649. static inline void irq_time_write_begin(void)
  650. {
  651. __this_cpu_inc(irq_time_seq.sequence);
  652. smp_wmb();
  653. }
  654. static inline void irq_time_write_end(void)
  655. {
  656. smp_wmb();
  657. __this_cpu_inc(irq_time_seq.sequence);
  658. }
  659. static inline u64 irq_time_read(int cpu)
  660. {
  661. u64 irq_time;
  662. unsigned seq;
  663. do {
  664. seq = read_seqcount_begin(&per_cpu(irq_time_seq, cpu));
  665. irq_time = per_cpu(cpu_softirq_time, cpu) +
  666. per_cpu(cpu_hardirq_time, cpu);
  667. } while (read_seqcount_retry(&per_cpu(irq_time_seq, cpu), seq));
  668. return irq_time;
  669. }
  670. #else /* CONFIG_64BIT */
  671. static inline void irq_time_write_begin(void)
  672. {
  673. }
  674. static inline void irq_time_write_end(void)
  675. {
  676. }
  677. static inline u64 irq_time_read(int cpu)
  678. {
  679. return per_cpu(cpu_softirq_time, cpu) + per_cpu(cpu_hardirq_time, cpu);
  680. }
  681. #endif /* CONFIG_64BIT */
  682. /*
  683. * Called before incrementing preempt_count on {soft,}irq_enter
  684. * and before decrementing preempt_count on {soft,}irq_exit.
  685. */
  686. void account_system_vtime(struct task_struct *curr)
  687. {
  688. unsigned long flags;
  689. s64 delta;
  690. int cpu;
  691. if (!sched_clock_irqtime)
  692. return;
  693. local_irq_save(flags);
  694. cpu = smp_processor_id();
  695. delta = sched_clock_cpu(cpu) - __this_cpu_read(irq_start_time);
  696. __this_cpu_add(irq_start_time, delta);
  697. irq_time_write_begin();
  698. /*
  699. * We do not account for softirq time from ksoftirqd here.
  700. * We want to continue accounting softirq time to ksoftirqd thread
  701. * in that case, so as not to confuse scheduler with a special task
  702. * that do not consume any time, but still wants to run.
  703. */
  704. if (hardirq_count())
  705. __this_cpu_add(cpu_hardirq_time, delta);
  706. else if (in_serving_softirq() && curr != this_cpu_ksoftirqd())
  707. __this_cpu_add(cpu_softirq_time, delta);
  708. irq_time_write_end();
  709. local_irq_restore(flags);
  710. }
  711. EXPORT_SYMBOL_GPL(account_system_vtime);
  712. #endif /* CONFIG_IRQ_TIME_ACCOUNTING */
  713. #ifdef CONFIG_PARAVIRT
  714. static inline u64 steal_ticks(u64 steal)
  715. {
  716. if (unlikely(steal > NSEC_PER_SEC))
  717. return div_u64(steal, TICK_NSEC);
  718. return __iter_div_u64_rem(steal, TICK_NSEC, &steal);
  719. }
  720. #endif
  721. static void update_rq_clock_task(struct rq *rq, s64 delta)
  722. {
  723. /*
  724. * In theory, the compile should just see 0 here, and optimize out the call
  725. * to sched_rt_avg_update. But I don't trust it...
  726. */
  727. #if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
  728. s64 steal = 0, irq_delta = 0;
  729. #endif
  730. #ifdef CONFIG_IRQ_TIME_ACCOUNTING
  731. irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
  732. /*
  733. * Since irq_time is only updated on {soft,}irq_exit, we might run into
  734. * this case when a previous update_rq_clock() happened inside a
  735. * {soft,}irq region.
  736. *
  737. * When this happens, we stop ->clock_task and only update the
  738. * prev_irq_time stamp to account for the part that fit, so that a next
  739. * update will consume the rest. This ensures ->clock_task is
  740. * monotonic.
  741. *
  742. * It does however cause some slight miss-attribution of {soft,}irq
  743. * time, a more accurate solution would be to update the irq_time using
  744. * the current rq->clock timestamp, except that would require using
  745. * atomic ops.
  746. */
  747. if (irq_delta > delta)
  748. irq_delta = delta;
  749. rq->prev_irq_time += irq_delta;
  750. delta -= irq_delta;
  751. #endif
  752. #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
  753. if (static_key_false((&paravirt_steal_rq_enabled))) {
  754. u64 st;
  755. steal = paravirt_steal_clock(cpu_of(rq));
  756. steal -= rq->prev_steal_time_rq;
  757. if (unlikely(steal > delta))
  758. steal = delta;
  759. st = steal_ticks(steal);
  760. steal = st * TICK_NSEC;
  761. rq->prev_steal_time_rq += steal;
  762. delta -= steal;
  763. }
  764. #endif
  765. rq->clock_task += delta;
  766. #if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
  767. if ((irq_delta + steal) && sched_feat(NONTASK_POWER))
  768. sched_rt_avg_update(rq, irq_delta + steal);
  769. #endif
  770. }
  771. #ifdef CONFIG_IRQ_TIME_ACCOUNTING
  772. static int irqtime_account_hi_update(void)
  773. {
  774. u64 *cpustat = kcpustat_this_cpu->cpustat;
  775. unsigned long flags;
  776. u64 latest_ns;
  777. int ret = 0;
  778. local_irq_save(flags);
  779. latest_ns = this_cpu_read(cpu_hardirq_time);
  780. if (nsecs_to_cputime64(latest_ns) > cpustat[CPUTIME_IRQ])
  781. ret = 1;
  782. local_irq_restore(flags);
  783. return ret;
  784. }
  785. static int irqtime_account_si_update(void)
  786. {
  787. u64 *cpustat = kcpustat_this_cpu->cpustat;
  788. unsigned long flags;
  789. u64 latest_ns;
  790. int ret = 0;
  791. local_irq_save(flags);
  792. latest_ns = this_cpu_read(cpu_softirq_time);
  793. if (nsecs_to_cputime64(latest_ns) > cpustat[CPUTIME_SOFTIRQ])
  794. ret = 1;
  795. local_irq_restore(flags);
  796. return ret;
  797. }
  798. #else /* CONFIG_IRQ_TIME_ACCOUNTING */
  799. #define sched_clock_irqtime (0)
  800. #endif
  801. void sched_set_stop_task(int cpu, struct task_struct *stop)
  802. {
  803. struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
  804. struct task_struct *old_stop = cpu_rq(cpu)->stop;
  805. if (stop) {
  806. /*
  807. * Make it appear like a SCHED_FIFO task, its something
  808. * userspace knows about and won't get confused about.
  809. *
  810. * Also, it will make PI more or less work without too
  811. * much confusion -- but then, stop work should not
  812. * rely on PI working anyway.
  813. */
  814. sched_setscheduler_nocheck(stop, SCHED_FIFO, &param);
  815. stop->sched_class = &stop_sched_class;
  816. }
  817. cpu_rq(cpu)->stop = stop;
  818. if (old_stop) {
  819. /*
  820. * Reset it back to a normal scheduling class so that
  821. * it can die in pieces.
  822. */
  823. old_stop->sched_class = &rt_sched_class;
  824. }
  825. }
  826. /*
  827. * __normal_prio - return the priority that is based on the static prio
  828. */
  829. static inline int __normal_prio(struct task_struct *p)
  830. {
  831. return p->static_prio;
  832. }
  833. /*
  834. * Calculate the expected normal priority: i.e. priority
  835. * without taking RT-inheritance into account. Might be
  836. * boosted by interactivity modifiers. Changes upon fork,
  837. * setprio syscalls, and whenever the interactivity
  838. * estimator recalculates.
  839. */
  840. static inline int normal_prio(struct task_struct *p)
  841. {
  842. int prio;
  843. if (task_has_rt_policy(p))
  844. prio = MAX_RT_PRIO-1 - p->rt_priority;
  845. else
  846. prio = __normal_prio(p);
  847. return prio;
  848. }
  849. /*
  850. * Calculate the current priority, i.e. the priority
  851. * taken into account by the scheduler. This value might
  852. * be boosted by RT tasks, or might be boosted by
  853. * interactivity modifiers. Will be RT if the task got
  854. * RT-boosted. If not then it returns p->normal_prio.
  855. */
  856. static int effective_prio(struct task_struct *p)
  857. {
  858. p->normal_prio = normal_prio(p);
  859. /*
  860. * If we are RT tasks or we were boosted to RT priority,
  861. * keep the priority unchanged. Otherwise, update priority
  862. * to the normal priority:
  863. */
  864. if (!rt_prio(p->prio))
  865. return p->normal_prio;
  866. return p->prio;
  867. }
  868. /**
  869. * task_curr - is this task currently executing on a CPU?
  870. * @p: the task in question.
  871. */
  872. inline int task_curr(const struct task_struct *p)
  873. {
  874. return cpu_curr(task_cpu(p)) == p;
  875. }
  876. static inline void check_class_changed(struct rq *rq, struct task_struct *p,
  877. const struct sched_class *prev_class,
  878. int oldprio)
  879. {
  880. if (prev_class != p->sched_class) {
  881. if (prev_class->switched_from)
  882. prev_class->switched_from(rq, p);
  883. p->sched_class->switched_to(rq, p);
  884. } else if (oldprio != p->prio)
  885. p->sched_class->prio_changed(rq, p, oldprio);
  886. }
  887. void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
  888. {
  889. const struct sched_class *class;
  890. if (p->sched_class == rq->curr->sched_class) {
  891. rq->curr->sched_class->check_preempt_curr(rq, p, flags);
  892. } else {
  893. for_each_class(class) {
  894. if (class == rq->curr->sched_class)
  895. break;
  896. if (class == p->sched_class) {
  897. resched_task(rq->curr);
  898. break;
  899. }
  900. }
  901. }
  902. /*
  903. * A queue event has occurred, and we're going to schedule. In
  904. * this case, we can save a useless back to back clock update.
  905. */
  906. if (rq->curr->on_rq && test_tsk_need_resched(rq->curr))
  907. rq->skip_clock_update = 1;
  908. }
  909. #ifdef CONFIG_SMP
  910. void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
  911. {
  912. #ifdef CONFIG_SCHED_DEBUG
  913. /*
  914. * We should never call set_task_cpu() on a blocked task,
  915. * ttwu() will sort out the placement.
  916. */
  917. WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
  918. !(task_thread_info(p)->preempt_count & PREEMPT_ACTIVE));
  919. #ifdef CONFIG_LOCKDEP
  920. /*
  921. * The caller should hold either p->pi_lock or rq->lock, when changing
  922. * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
  923. *
  924. * sched_move_task() holds both and thus holding either pins the cgroup,
  925. * see task_group().
  926. *
  927. * Furthermore, all task_rq users should acquire both locks, see
  928. * task_rq_lock().
  929. */
  930. WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
  931. lockdep_is_held(&task_rq(p)->lock)));
  932. #endif
  933. #endif
  934. trace_sched_migrate_task(p, new_cpu);
  935. if (task_cpu(p) != new_cpu) {
  936. p->se.nr_migrations++;
  937. perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, NULL, 0);
  938. }
  939. __set_task_cpu(p, new_cpu);
  940. }
  941. struct migration_arg {
  942. struct task_struct *task;
  943. int dest_cpu;
  944. };
  945. static int migration_cpu_stop(void *data);
  946. /*
  947. * wait_task_inactive - wait for a thread to unschedule.
  948. *
  949. * If @match_state is nonzero, it's the @p->state value just checked and
  950. * not expected to change. If it changes, i.e. @p might have woken up,
  951. * then return zero. When we succeed in waiting for @p to be off its CPU,
  952. * we return a positive number (its total switch count). If a second call
  953. * a short while later returns the same number, the caller can be sure that
  954. * @p has remained unscheduled the whole time.
  955. *
  956. * The caller must ensure that the task *will* unschedule sometime soon,
  957. * else this function might spin for a *long* time. This function can't
  958. * be called with interrupts off, or it may introduce deadlock with
  959. * smp_call_function() if an IPI is sent by the same process we are
  960. * waiting to become inactive.
  961. */
  962. unsigned long wait_task_inactive(struct task_struct *p, long match_state)
  963. {
  964. unsigned long flags;
  965. int running, on_rq;
  966. unsigned long ncsw;
  967. struct rq *rq;
  968. for (;;) {
  969. /*
  970. * We do the initial early heuristics without holding
  971. * any task-queue locks at all. We'll only try to get
  972. * the runqueue lock when things look like they will
  973. * work out!
  974. */
  975. rq = task_rq(p);
  976. /*
  977. * If the task is actively running on another CPU
  978. * still, just relax and busy-wait without holding
  979. * any locks.
  980. *
  981. * NOTE! Since we don't hold any locks, it's not
  982. * even sure that "rq" stays as the right runqueue!
  983. * But we don't care, since "task_running()" will
  984. * return false if the runqueue has changed and p
  985. * is actually now running somewhere else!
  986. */
  987. while (task_running(rq, p)) {
  988. if (match_state && unlikely(p->state != match_state))
  989. return 0;
  990. cpu_relax();
  991. }
  992. /*
  993. * Ok, time to look more closely! We need the rq
  994. * lock now, to be *sure*. If we're wrong, we'll
  995. * just go back and repeat.
  996. */
  997. rq = task_rq_lock(p, &flags);
  998. trace_sched_wait_task(p);
  999. running = task_running(rq, p);
  1000. on_rq = p->on_rq;
  1001. ncsw = 0;
  1002. if (!match_state || p->state == match_state)
  1003. ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
  1004. task_rq_unlock(rq, p, &flags);
  1005. /*
  1006. * If it changed from the expected state, bail out now.
  1007. */
  1008. if (unlikely(!ncsw))
  1009. break;
  1010. /*
  1011. * Was it really running after all now that we
  1012. * checked with the proper locks actually held?
  1013. *
  1014. * Oops. Go back and try again..
  1015. */
  1016. if (unlikely(running)) {
  1017. cpu_relax();
  1018. continue;
  1019. }
  1020. /*
  1021. * It's not enough that it's not actively running,
  1022. * it must be off the runqueue _entirely_, and not
  1023. * preempted!
  1024. *
  1025. * So if it was still runnable (but just not actively
  1026. * running right now), it's preempted, and we should
  1027. * yield - it could be a while.
  1028. */
  1029. if (unlikely(on_rq)) {
  1030. ktime_t to = ktime_set(0, NSEC_PER_SEC/HZ);
  1031. set_current_state(TASK_UNINTERRUPTIBLE);
  1032. schedule_hrtimeout(&to, HRTIMER_MODE_REL);
  1033. continue;
  1034. }
  1035. /*
  1036. * Ahh, all good. It wasn't running, and it wasn't
  1037. * runnable, which means that it will never become
  1038. * running in the future either. We're all done!
  1039. */
  1040. break;
  1041. }
  1042. return ncsw;
  1043. }
  1044. /***
  1045. * kick_process - kick a running thread to enter/exit the kernel
  1046. * @p: the to-be-kicked thread
  1047. *
  1048. * Cause a process which is running on another CPU to enter
  1049. * kernel-mode, without any delay. (to get signals handled.)
  1050. *
  1051. * NOTE: this function doesn't have to take the runqueue lock,
  1052. * because all it wants to ensure is that the remote task enters
  1053. * the kernel. If the IPI races and the task has been migrated
  1054. * to another CPU then no harm is done and the purpose has been
  1055. * achieved as well.
  1056. */
  1057. void kick_process(struct task_struct *p)
  1058. {
  1059. int cpu;
  1060. preempt_disable();
  1061. cpu = task_cpu(p);
  1062. if ((cpu != smp_processor_id()) && task_curr(p))
  1063. smp_send_reschedule(cpu);
  1064. preempt_enable();
  1065. }
  1066. EXPORT_SYMBOL_GPL(kick_process);
  1067. #endif /* CONFIG_SMP */
  1068. #ifdef CONFIG_SMP
  1069. /*
  1070. * ->cpus_allowed is protected by both rq->lock and p->pi_lock
  1071. */
  1072. static int select_fallback_rq(int cpu, struct task_struct *p)
  1073. {
  1074. const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(cpu));
  1075. enum { cpuset, possible, fail } state = cpuset;
  1076. int dest_cpu;
  1077. /* Look for allowed, online CPU in same node. */
  1078. for_each_cpu(dest_cpu, nodemask) {
  1079. if (!cpu_online(dest_cpu))
  1080. continue;
  1081. if (!cpu_active(dest_cpu))
  1082. continue;
  1083. if (cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
  1084. return dest_cpu;
  1085. }
  1086. for (;;) {
  1087. /* Any allowed, online CPU? */
  1088. for_each_cpu(dest_cpu, tsk_cpus_allowed(p)) {
  1089. if (!cpu_online(dest_cpu))
  1090. continue;
  1091. if (!cpu_active(dest_cpu))
  1092. continue;
  1093. goto out;
  1094. }
  1095. switch (state) {
  1096. case cpuset:
  1097. /* No more Mr. Nice Guy. */
  1098. cpuset_cpus_allowed_fallback(p);
  1099. state = possible;
  1100. break;
  1101. case possible:
  1102. do_set_cpus_allowed(p, cpu_possible_mask);
  1103. state = fail;
  1104. break;
  1105. case fail:
  1106. BUG();
  1107. break;
  1108. }
  1109. }
  1110. out:
  1111. if (state != cpuset) {
  1112. /*
  1113. * Don't tell them about moving exiting tasks or
  1114. * kernel threads (both mm NULL), since they never
  1115. * leave kernel.
  1116. */
  1117. if (p->mm && printk_ratelimit()) {
  1118. printk_sched("process %d (%s) no longer affine to cpu%d\n",
  1119. task_pid_nr(p), p->comm, cpu);
  1120. }
  1121. }
  1122. return dest_cpu;
  1123. }
  1124. /*
  1125. * The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable.
  1126. */
  1127. static inline
  1128. int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags)
  1129. {
  1130. int cpu = p->sched_class->select_task_rq(p, sd_flags, wake_flags);
  1131. /*
  1132. * In order not to call set_task_cpu() on a blocking task we need
  1133. * to rely on ttwu() to place the task on a valid ->cpus_allowed
  1134. * cpu.
  1135. *
  1136. * Since this is common to all placement strategies, this lives here.
  1137. *
  1138. * [ this allows ->select_task() to simply return task_cpu(p) and
  1139. * not worry about this generic constraint ]
  1140. */
  1141. if (unlikely(!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)) ||
  1142. !cpu_online(cpu)))
  1143. cpu = select_fallback_rq(task_cpu(p), p);
  1144. return cpu;
  1145. }
  1146. static void update_avg(u64 *avg, u64 sample)
  1147. {
  1148. s64 diff = sample - *avg;
  1149. *avg += diff >> 3;
  1150. }
  1151. #endif
  1152. static void
  1153. ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
  1154. {
  1155. #ifdef CONFIG_SCHEDSTATS
  1156. struct rq *rq = this_rq();
  1157. #ifdef CONFIG_SMP
  1158. int this_cpu = smp_processor_id();
  1159. if (cpu == this_cpu) {
  1160. schedstat_inc(rq, ttwu_local);
  1161. schedstat_inc(p, se.statistics.nr_wakeups_local);
  1162. } else {
  1163. struct sched_domain *sd;
  1164. schedstat_inc(p, se.statistics.nr_wakeups_remote);
  1165. rcu_read_lock();
  1166. for_each_domain(this_cpu, sd) {
  1167. if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
  1168. schedstat_inc(sd, ttwu_wake_remote);
  1169. break;
  1170. }
  1171. }
  1172. rcu_read_unlock();
  1173. }
  1174. if (wake_flags & WF_MIGRATED)
  1175. schedstat_inc(p, se.statistics.nr_wakeups_migrate);
  1176. #endif /* CONFIG_SMP */
  1177. schedstat_inc(rq, ttwu_count);
  1178. schedstat_inc(p, se.statistics.nr_wakeups);
  1179. if (wake_flags & WF_SYNC)
  1180. schedstat_inc(p, se.statistics.nr_wakeups_sync);
  1181. #endif /* CONFIG_SCHEDSTATS */
  1182. }
  1183. static void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags)
  1184. {
  1185. activate_task(rq, p, en_flags);
  1186. p->on_rq = 1;
  1187. /* if a worker is waking up, notify workqueue */
  1188. if (p->flags & PF_WQ_WORKER)
  1189. wq_worker_waking_up(p, cpu_of(rq));
  1190. }
  1191. /*
  1192. * Mark the task runnable and perform wakeup-preemption.
  1193. */
  1194. static void
  1195. ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
  1196. {
  1197. trace_sched_wakeup(p, true);
  1198. check_preempt_curr(rq, p, wake_flags);
  1199. p->state = TASK_RUNNING;
  1200. #ifdef CONFIG_SMP
  1201. if (p->sched_class->task_woken)
  1202. p->sched_class->task_woken(rq, p);
  1203. if (rq->idle_stamp) {
  1204. u64 delta = rq->clock - rq->idle_stamp;
  1205. u64 max = 2*sysctl_sched_migration_cost;
  1206. if (delta > max)
  1207. rq->avg_idle = max;
  1208. else
  1209. update_avg(&rq->avg_idle, delta);
  1210. rq->idle_stamp = 0;
  1211. }
  1212. #endif
  1213. }
  1214. static void
  1215. ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags)
  1216. {
  1217. #ifdef CONFIG_SMP
  1218. if (p->sched_contributes_to_load)
  1219. rq->nr_uninterruptible--;
  1220. #endif
  1221. ttwu_activate(rq, p, ENQUEUE_WAKEUP | ENQUEUE_WAKING);
  1222. ttwu_do_wakeup(rq, p, wake_flags);
  1223. }
  1224. /*
  1225. * Called in case the task @p isn't fully descheduled from its runqueue,
  1226. * in this case we must do a remote wakeup. Its a 'light' wakeup though,
  1227. * since all we need to do is flip p->state to TASK_RUNNING, since
  1228. * the task is still ->on_rq.
  1229. */
  1230. static int ttwu_remote(struct task_struct *p, int wake_flags)
  1231. {
  1232. struct rq *rq;
  1233. int ret = 0;
  1234. rq = __task_rq_lock(p);
  1235. if (p->on_rq) {
  1236. ttwu_do_wakeup(rq, p, wake_flags);
  1237. ret = 1;
  1238. }
  1239. __task_rq_unlock(rq);
  1240. return ret;
  1241. }
  1242. #ifdef CONFIG_SMP
  1243. static void sched_ttwu_pending(void)
  1244. {
  1245. struct rq *rq = this_rq();
  1246. struct llist_node *llist = llist_del_all(&rq->wake_list);
  1247. struct task_struct *p;
  1248. raw_spin_lock(&rq->lock);
  1249. while (llist) {
  1250. p = llist_entry(llist, struct task_struct, wake_entry);
  1251. llist = llist_next(llist);
  1252. ttwu_do_activate(rq, p, 0);
  1253. }
  1254. raw_spin_unlock(&rq->lock);
  1255. }
  1256. void scheduler_ipi(void)
  1257. {
  1258. if (llist_empty(&this_rq()->wake_list) && !got_nohz_idle_kick())
  1259. return;
  1260. /*
  1261. * Not all reschedule IPI handlers call irq_enter/irq_exit, since
  1262. * traditionally all their work was done from the interrupt return
  1263. * path. Now that we actually do some work, we need to make sure
  1264. * we do call them.
  1265. *
  1266. * Some archs already do call them, luckily irq_enter/exit nest
  1267. * properly.
  1268. *
  1269. * Arguably we should visit all archs and update all handlers,
  1270. * however a fair share of IPIs are still resched only so this would
  1271. * somewhat pessimize the simple resched case.
  1272. */
  1273. irq_enter();
  1274. sched_ttwu_pending();
  1275. /*
  1276. * Check if someone kicked us for doing the nohz idle load balance.
  1277. */
  1278. if (unlikely(got_nohz_idle_kick() && !need_resched())) {
  1279. this_rq()->idle_balance = 1;
  1280. raise_softirq_irqoff(SCHED_SOFTIRQ);
  1281. }
  1282. irq_exit();
  1283. }
  1284. static void ttwu_queue_remote(struct task_struct *p, int cpu)
  1285. {
  1286. if (llist_add(&p->wake_entry, &cpu_rq(cpu)->wake_list))
  1287. smp_send_reschedule(cpu);
  1288. }
  1289. #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
  1290. static int ttwu_activate_remote(struct task_struct *p, int wake_flags)
  1291. {
  1292. struct rq *rq;
  1293. int ret = 0;
  1294. rq = __task_rq_lock(p);
  1295. if (p->on_cpu) {
  1296. ttwu_activate(rq, p, ENQUEUE_WAKEUP);
  1297. ttwu_do_wakeup(rq, p, wake_flags);
  1298. ret = 1;
  1299. }
  1300. __task_rq_unlock(rq);
  1301. return ret;
  1302. }
  1303. #endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
  1304. bool cpus_share_cache(int this_cpu, int that_cpu)
  1305. {
  1306. return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
  1307. }
  1308. #endif /* CONFIG_SMP */
  1309. static void ttwu_queue(struct task_struct *p, int cpu)
  1310. {
  1311. struct rq *rq = cpu_rq(cpu);
  1312. #if defined(CONFIG_SMP)
  1313. if (sched_feat(TTWU_QUEUE) && !cpus_share_cache(smp_processor_id(), cpu)) {
  1314. sched_clock_cpu(cpu); /* sync clocks x-cpu */
  1315. ttwu_queue_remote(p, cpu);
  1316. return;
  1317. }
  1318. #endif
  1319. raw_spin_lock(&rq->lock);
  1320. ttwu_do_activate(rq, p, 0);
  1321. raw_spin_unlock(&rq->lock);
  1322. }
  1323. /**
  1324. * try_to_wake_up - wake up a thread
  1325. * @p: the thread to be awakened
  1326. * @state: the mask of task states that can be woken
  1327. * @wake_flags: wake modifier flags (WF_*)
  1328. *
  1329. * Put it on the run-queue if it's not already there. The "current"
  1330. * thread is always on the run-queue (except when the actual
  1331. * re-schedule is in progress), and as such you're allowed to do
  1332. * the simpler "current->state = TASK_RUNNING" to mark yourself
  1333. * runnable without the overhead of this.
  1334. *
  1335. * Returns %true if @p was woken up, %false if it was already running
  1336. * or @state didn't match @p's state.
  1337. */
  1338. static int
  1339. try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
  1340. {
  1341. unsigned long flags;
  1342. int cpu, success = 0;
  1343. smp_wmb();
  1344. raw_spin_lock_irqsave(&p->pi_lock, flags);
  1345. if (!(p->state & state))
  1346. goto out;
  1347. success = 1; /* we're going to change ->state */
  1348. cpu = task_cpu(p);
  1349. if (p->on_rq && ttwu_remote(p, wake_flags))
  1350. goto stat;
  1351. #ifdef CONFIG_SMP
  1352. /*
  1353. * If the owning (remote) cpu is still in the middle of schedule() with
  1354. * this task as prev, wait until its done referencing the task.
  1355. */
  1356. while (p->on_cpu) {
  1357. #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
  1358. /*
  1359. * In case the architecture enables interrupts in
  1360. * context_switch(), we cannot busy wait, since that
  1361. * would lead to deadlocks when an interrupt hits and
  1362. * tries to wake up @prev. So bail and do a complete
  1363. * remote wakeup.
  1364. */
  1365. if (ttwu_activate_remote(p, wake_flags))
  1366. goto stat;
  1367. #else
  1368. cpu_relax();
  1369. #endif
  1370. }
  1371. /*
  1372. * Pairs with the smp_wmb() in finish_lock_switch().
  1373. */
  1374. smp_rmb();
  1375. p->sched_contributes_to_load = !!task_contributes_to_load(p);
  1376. p->state = TASK_WAKING;
  1377. if (p->sched_class->task_waking)
  1378. p->sched_class->task_waking(p);
  1379. cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
  1380. if (task_cpu(p) != cpu) {
  1381. wake_flags |= WF_MIGRATED;
  1382. set_task_cpu(p, cpu);
  1383. }
  1384. #endif /* CONFIG_SMP */
  1385. ttwu_queue(p, cpu);
  1386. stat:
  1387. ttwu_stat(p, cpu, wake_flags);
  1388. out:
  1389. raw_spin_unlock_irqrestore(&p->pi_lock, flags);
  1390. return success;
  1391. }
  1392. /**
  1393. * try_to_wake_up_local - try to wake up a local task with rq lock held
  1394. * @p: the thread to be awakened
  1395. *
  1396. * Put @p on the run-queue if it's not already there. The caller must
  1397. * ensure that this_rq() is locked, @p is bound to this_rq() and not
  1398. * the current task.
  1399. */
  1400. static void try_to_wake_up_local(struct task_struct *p)
  1401. {
  1402. struct rq *rq = task_rq(p);
  1403. BUG_ON(rq != this_rq());
  1404. BUG_ON(p == current);
  1405. lockdep_assert_held(&rq->lock);
  1406. if (!raw_spin_trylock(&p->pi_lock)) {
  1407. raw_spin_unlock(&rq->lock);
  1408. raw_spin_lock(&p->pi_lock);
  1409. raw_spin_lock(&rq->lock);
  1410. }
  1411. if (!(p->state & TASK_NORMAL))
  1412. goto out;
  1413. if (!p->on_rq)
  1414. ttwu_activate(rq, p, ENQUEUE_WAKEUP);
  1415. ttwu_do_wakeup(rq, p, 0);
  1416. ttwu_stat(p, smp_processor_id(), 0);
  1417. out:
  1418. raw_spin_unlock(&p->pi_lock);
  1419. }
  1420. /**
  1421. * wake_up_process - Wake up a specific process
  1422. * @p: The process to be woken up.
  1423. *
  1424. * Attempt to wake up the nominated process and move it to the set of runnable
  1425. * processes. Returns 1 if the process was woken up, 0 if it was already
  1426. * running.
  1427. *
  1428. * It may be assumed that this function implies a write memory barrier before
  1429. * changing the task state if and only if any tasks are woken up.
  1430. */
  1431. int wake_up_process(struct task_struct *p)
  1432. {
  1433. return try_to_wake_up(p, TASK_ALL, 0);
  1434. }
  1435. EXPORT_SYMBOL(wake_up_process);
  1436. int wake_up_state(struct task_struct *p, unsigned int state)
  1437. {
  1438. return try_to_wake_up(p, state, 0);
  1439. }
  1440. /*
  1441. * Perform scheduler related setup for a newly forked process p.
  1442. * p is forked by current.
  1443. *
  1444. * __sched_fork() is basic setup used by init_idle() too:
  1445. */
  1446. static void __sched_fork(struct task_struct *p)
  1447. {
  1448. p->on_rq = 0;
  1449. p->se.on_rq = 0;
  1450. p->se.exec_start = 0;
  1451. p->se.sum_exec_runtime = 0;
  1452. p->se.prev_sum_exec_runtime = 0;
  1453. p->se.nr_migrations = 0;
  1454. p->se.vruntime = 0;
  1455. INIT_LIST_HEAD(&p->se.group_node);
  1456. #ifdef CONFIG_SCHEDSTATS
  1457. memset(&p->se.statistics, 0, sizeof(p->se.statistics));
  1458. #endif
  1459. INIT_LIST_HEAD(&p->rt.run_list);
  1460. #ifdef CONFIG_PREEMPT_NOTIFIERS
  1461. INIT_HLIST_HEAD(&p->preempt_notifiers);
  1462. #endif
  1463. }
  1464. /*
  1465. * fork()/clone()-time setup:
  1466. */
  1467. void sched_fork(struct task_struct *p)
  1468. {
  1469. unsigned long flags;
  1470. int cpu = get_cpu();
  1471. __sched_fork(p);
  1472. /*
  1473. * We mark the process as running here. This guarantees that
  1474. * nobody will actually run it, and a signal or other external
  1475. * event cannot wake it up and insert it on the runqueue either.
  1476. */
  1477. p->state = TASK_RUNNING;
  1478. /*
  1479. * Make sure we do not leak PI boosting priority to the child.
  1480. */
  1481. p->prio = current->normal_prio;
  1482. /*
  1483. * Revert to default priority/policy on fork if requested.
  1484. */
  1485. if (unlikely(p->sched_reset_on_fork)) {
  1486. if (task_has_rt_policy(p)) {
  1487. p->policy = SCHED_NORMAL;
  1488. p->static_prio = NICE_TO_PRIO(0);
  1489. p->rt_priority = 0;
  1490. } else if (PRIO_TO_NICE(p->static_prio) < 0)
  1491. p->static_prio = NICE_TO_PRIO(0);
  1492. p->prio = p->normal_prio = __normal_prio(p);
  1493. set_load_weight(p);
  1494. /*
  1495. * We don't need the reset flag anymore after the fork. It has
  1496. * fulfilled its duty:
  1497. */
  1498. p->sched_reset_on_fork = 0;
  1499. }
  1500. if (!rt_prio(p->prio))
  1501. p->sched_class = &fair_sched_class;
  1502. if (p->sched_class->task_fork)
  1503. p->sched_class->task_fork(p);
  1504. /*
  1505. * The child is not yet in the pid-hash so no cgroup attach races,
  1506. * and the cgroup is pinned to this child due to cgroup_fork()
  1507. * is ran before sched_fork().
  1508. *
  1509. * Silence PROVE_RCU.
  1510. */
  1511. raw_spin_lock_irqsave(&p->pi_lock, flags);
  1512. set_task_cpu(p, cpu);
  1513. raw_spin_unlock_irqrestore(&p->pi_lock, flags);
  1514. #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
  1515. if (likely(sched_info_on()))
  1516. memset(&p->sched_info, 0, sizeof(p->sched_info));
  1517. #endif
  1518. #if defined(CONFIG_SMP)
  1519. p->on_cpu = 0;
  1520. #endif
  1521. #ifdef CONFIG_PREEMPT_COUNT
  1522. /* Want to start with kernel preemption disabled. */
  1523. task_thread_info(p)->preempt_count = 1;
  1524. #endif
  1525. #ifdef CONFIG_SMP
  1526. plist_node_init(&p->pushable_tasks, MAX_PRIO);
  1527. #endif
  1528. put_cpu();
  1529. }
  1530. /*
  1531. * wake_up_new_task - wake up a newly created task for the first time.
  1532. *
  1533. * This function will do some initial scheduler statistics housekeeping
  1534. * that must be done for every newly created context, then puts the task
  1535. * on the runqueue and wakes it.
  1536. */
  1537. void wake_up_new_task(struct task_struct *p)
  1538. {
  1539. unsigned long flags;
  1540. struct rq *rq;
  1541. raw_spin_lock_irqsave(&p->pi_lock, flags);
  1542. #ifdef CONFIG_SMP
  1543. /*
  1544. * Fork balancing, do it here and not earlier because:
  1545. * - cpus_allowed can change in the fork path
  1546. * - any previously selected cpu might disappear through hotplug
  1547. */
  1548. set_task_cpu(p, select_task_rq(p, SD_BALANCE_FORK, 0));
  1549. #endif
  1550. rq = __task_rq_lock(p);
  1551. activate_task(rq, p, 0);
  1552. p->on_rq = 1;
  1553. trace_sched_wakeup_new(p, true);
  1554. check_preempt_curr(rq, p, WF_FORK);
  1555. #ifdef CONFIG_SMP
  1556. if (p->sched_class->task_woken)
  1557. p->sched_class->task_woken(rq, p);
  1558. #endif
  1559. task_rq_unlock(rq, p, &flags);
  1560. }
  1561. #ifdef CONFIG_PREEMPT_NOTIFIERS
  1562. /**
  1563. * preempt_notifier_register - tell me when current is being preempted & rescheduled
  1564. * @notifier: notifier struct to register
  1565. */
  1566. void preempt_notifier_register(struct preempt_notifier *notifier)
  1567. {
  1568. hlist_add_head(&notifier->link, &current->preempt_notifiers);
  1569. }
  1570. EXPORT_SYMBOL_GPL(preempt_notifier_register);
  1571. /**
  1572. * preempt_notifier_unregister - no longer interested in preemption notifications
  1573. * @notifier: notifier struct to unregister
  1574. *
  1575. * This is safe to call from within a preemption notifier.
  1576. */
  1577. void preempt_notifier_unregister(struct preempt_notifier *notifier)
  1578. {
  1579. hlist_del(&notifier->link);
  1580. }
  1581. EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
  1582. static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
  1583. {
  1584. struct preempt_notifier *notifier;
  1585. struct hlist_node *node;
  1586. hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link)
  1587. notifier->ops->sched_in(notifier, raw_smp_processor_id());
  1588. }
  1589. static void
  1590. fire_sched_out_preempt_notifiers(struct task_struct *curr,
  1591. struct task_struct *next)
  1592. {
  1593. struct preempt_notifier *notifier;
  1594. struct hlist_node *node;
  1595. hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link)
  1596. notifier->ops->sched_out(notifier, next);
  1597. }
  1598. #else /* !CONFIG_PREEMPT_NOTIFIERS */
  1599. static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
  1600. {
  1601. }
  1602. static void
  1603. fire_sched_out_preempt_notifiers(struct task_struct *curr,
  1604. struct task_struct *next)
  1605. {
  1606. }
  1607. #endif /* CONFIG_PREEMPT_NOTIFIERS */
  1608. /**
  1609. * prepare_task_switch - prepare to switch tasks
  1610. * @rq: the runqueue preparing to switch
  1611. * @prev: the current task that is being switched out
  1612. * @next: the task we are going to switch to.
  1613. *
  1614. * This is called with the rq lock held and interrupts off. It must
  1615. * be paired with a subsequent finish_task_switch after the context
  1616. * switch.
  1617. *
  1618. * prepare_task_switch sets up locking and calls architecture specific
  1619. * hooks.
  1620. */
  1621. static inline void
  1622. prepare_task_switch(struct rq *rq, struct task_struct *prev,
  1623. struct task_struct *next)
  1624. {
  1625. trace_sched_switch(prev, next);
  1626. sched_info_switch(prev, next);
  1627. perf_event_task_sched_out(prev, next);
  1628. fire_sched_out_preempt_notifiers(prev, next);
  1629. prepare_lock_switch(rq, next);
  1630. prepare_arch_switch(next);
  1631. }
  1632. /**
  1633. * finish_task_switch - clean up after a task-switch
  1634. * @rq: runqueue associated with task-switch
  1635. * @prev: the thread we just switched away from.
  1636. *
  1637. * finish_task_switch must be called after the context switch, paired
  1638. * with a prepare_task_switch call before the context switch.
  1639. * finish_task_switch will reconcile locking set up by prepare_task_switch,
  1640. * and do any other architecture-specific cleanup actions.
  1641. *
  1642. * Note that we may have delayed dropping an mm in context_switch(). If
  1643. * so, we finish that here outside of the runqueue lock. (Doing it
  1644. * with the lock held can cause deadlocks; see schedule() for
  1645. * details.)
  1646. */
  1647. static void finish_task_switch(struct rq *rq, struct task_struct *prev)
  1648. __releases(rq->lock)
  1649. {
  1650. struct mm_struct *mm = rq->prev_mm;
  1651. long prev_state;
  1652. rq->prev_mm = NULL;
  1653. /*
  1654. * A task struct has one reference for the use as "current".
  1655. * If a task dies, then it sets TASK_DEAD in tsk->state and calls
  1656. * schedule one last time. The schedule call will never return, and
  1657. * the scheduled task must drop that reference.
  1658. * The test for TASK_DEAD must occur while the runqueue locks are
  1659. * still held, otherwise prev could be scheduled on another cpu, die
  1660. * there before we look at prev->state, and then the reference would
  1661. * be dropped twice.
  1662. * Manfred Spraul <manfred@colorfullife.com>
  1663. */
  1664. prev_state = prev->state;
  1665. finish_arch_switch(prev);
  1666. #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
  1667. local_irq_disable();
  1668. #endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
  1669. perf_event_task_sched_in(prev, current);
  1670. #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
  1671. local_irq_enable();
  1672. #endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
  1673. finish_lock_switch(rq, prev);
  1674. finish_arch_post_lock_switch();
  1675. fire_sched_in_preempt_notifiers(current);
  1676. if (mm)
  1677. mmdrop(mm);
  1678. if (unlikely(prev_state == TASK_DEAD)) {
  1679. /*
  1680. * Remove function-return probe instances associated with this
  1681. * task and put them back on the free list.
  1682. */
  1683. kprobe_flush_task(prev);
  1684. put_task_struct(prev);
  1685. }
  1686. }
  1687. #ifdef CONFIG_SMP
  1688. /* assumes rq->lock is held */
  1689. static inline void pre_schedule(struct rq *rq, struct task_struct *prev)
  1690. {
  1691. if (prev->sched_class->pre_schedule)
  1692. prev->sched_class->pre_schedule(rq, prev);
  1693. }
  1694. /* rq->lock is NOT held, but preemption is disabled */
  1695. static inline void post_schedule(struct rq *rq)
  1696. {
  1697. if (rq->post_schedule) {
  1698. unsigned long flags;
  1699. raw_spin_lock_irqsave(&rq->lock, flags);
  1700. if (rq->curr->sched_class->post_schedule)
  1701. rq->curr->sched_class->post_schedule(rq);
  1702. raw_spin_unlock_irqrestore(&rq->lock, flags);
  1703. rq->post_schedule = 0;
  1704. }
  1705. }
  1706. #else
  1707. static inline void pre_schedule(struct rq *rq, struct task_struct *p)
  1708. {
  1709. }
  1710. static inline void post_schedule(struct rq *rq)
  1711. {
  1712. }
  1713. #endif
  1714. /**
  1715. * schedule_tail - first thing a freshly forked thread must call.
  1716. * @prev: the thread we just switched away from.
  1717. */
  1718. asmlinkage void schedule_tail(struct task_struct *prev)
  1719. __releases(rq->lock)
  1720. {
  1721. struct rq *rq = this_rq();
  1722. finish_task_switch(rq, prev);
  1723. /*
  1724. * FIXME: do we need to worry about rq being invalidated by the
  1725. * task_switch?
  1726. */
  1727. post_schedule(rq);
  1728. #ifdef __ARCH_WANT_UNLOCKED_CTXSW
  1729. /* In this case, finish_task_switch does not reenable preemption */
  1730. preempt_enable();
  1731. #endif
  1732. if (current->set_child_tid)
  1733. put_user(task_pid_vnr(current), current->set_child_tid);
  1734. }
  1735. /*
  1736. * context_switch - switch to the new MM and the new
  1737. * thread's register state.
  1738. */
  1739. static inline void
  1740. context_switch(struct rq *rq, struct task_struct *prev,
  1741. struct task_struct *next)
  1742. {
  1743. struct mm_struct *mm, *oldmm;
  1744. prepare_task_switch(rq, prev, next);
  1745. mm = next->mm;
  1746. oldmm = prev->active_mm;
  1747. /*
  1748. * For paravirt, this is coupled with an exit in switch_to to
  1749. * combine the page table reload and the switch backend into
  1750. * one hypercall.
  1751. */
  1752. arch_start_context_switch(prev);
  1753. if (!mm) {
  1754. next->active_mm = oldmm;
  1755. atomic_inc(&oldmm->mm_count);
  1756. enter_lazy_tlb(oldmm, next);
  1757. } else
  1758. switch_mm(oldmm, mm, next);
  1759. if (!prev->mm) {
  1760. prev->active_mm = NULL;
  1761. rq->prev_mm = oldmm;
  1762. }
  1763. /*
  1764. * Since the runqueue lock will be released by the next
  1765. * task (which is an invalid locking op but in the case
  1766. * of the scheduler it's an obvious special-case), so we
  1767. * do an early lockdep release here:
  1768. */
  1769. #ifndef __ARCH_WANT_UNLOCKED_CTXSW
  1770. spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
  1771. #endif
  1772. /* Here we just switch the register state and the stack. */
  1773. rcu_switch(prev, next);
  1774. switch_to(prev, next, prev);
  1775. barrier();
  1776. /*
  1777. * this_rq must be evaluated again because prev may have moved
  1778. * CPUs since it called schedule(), thus the 'rq' on its stack
  1779. * frame will be invalid.
  1780. */
  1781. finish_task_switch(this_rq(), prev);
  1782. }
  1783. /*
  1784. * nr_running, nr_uninterruptible and nr_context_switches:
  1785. *
  1786. * externally visible scheduler statistics: current number of runnable
  1787. * threads, current number of uninterruptible-sleeping threads, total
  1788. * number of context switches performed since bootup.
  1789. */
  1790. unsigned long nr_running(void)
  1791. {
  1792. unsigned long i, sum = 0;
  1793. for_each_online_cpu(i)
  1794. sum += cpu_rq(i)->nr_running;
  1795. return sum;
  1796. }
  1797. unsigned long nr_uninterruptible(void)
  1798. {
  1799. unsigned long i, sum = 0;
  1800. for_each_possible_cpu(i)
  1801. sum += cpu_rq(i)->nr_uninterruptible;
  1802. /*
  1803. * Since we read the counters lockless, it might be slightly
  1804. * inaccurate. Do not allow it to go below zero though:
  1805. */
  1806. if (unlikely((long)sum < 0))
  1807. sum = 0;
  1808. return sum;
  1809. }
  1810. unsigned long long nr_context_switches(void)
  1811. {
  1812. int i;
  1813. unsigned long long sum = 0;
  1814. for_each_possible_cpu(i)
  1815. sum += cpu_rq(i)->nr_switches;
  1816. return sum;
  1817. }
  1818. unsigned long nr_iowait(void)
  1819. {
  1820. unsigned long i, sum = 0;
  1821. for_each_possible_cpu(i)
  1822. sum += atomic_read(&cpu_rq(i)->nr_iowait);
  1823. return sum;
  1824. }
  1825. unsigned long nr_iowait_cpu(int cpu)
  1826. {
  1827. struct rq *this = cpu_rq(cpu);
  1828. return atomic_read(&this->nr_iowait);
  1829. }
  1830. unsigned long this_cpu_load(void)
  1831. {
  1832. struct rq *this = this_rq();
  1833. return this->cpu_load[0];
  1834. }
  1835. /*
  1836. * Global load-average calculations
  1837. *
  1838. * We take a distributed and async approach to calculating the global load-avg
  1839. * in order to minimize overhead.
  1840. *
  1841. * The global load average is an exponentially decaying average of nr_running +
  1842. * nr_uninterruptible.
  1843. *
  1844. * Once every LOAD_FREQ:
  1845. *
  1846. * nr_active = 0;
  1847. * for_each_possible_cpu(cpu)
  1848. * nr_active += cpu_of(cpu)->nr_running + cpu_of(cpu)->nr_uninterruptible;
  1849. *
  1850. * avenrun[n] = avenrun[0] * exp_n + nr_active * (1 - exp_n)
  1851. *
  1852. * Due to a number of reasons the above turns in the mess below:
  1853. *
  1854. * - for_each_possible_cpu() is prohibitively expensive on machines with
  1855. * serious number of cpus, therefore we need to take a distributed approach
  1856. * to calculating nr_active.
  1857. *
  1858. * \Sum_i x_i(t) = \Sum_i x_i(t) - x_i(t_0) | x_i(t_0) := 0
  1859. * = \Sum_i { \Sum_j=1 x_i(t_j) - x_i(t_j-1) }
  1860. *
  1861. * So assuming nr_active := 0 when we start out -- true per definition, we
  1862. * can simply take per-cpu deltas and fold those into a global accumulate
  1863. * to obtain the same result. See calc_load_fold_active().
  1864. *
  1865. * Furthermore, in order to avoid synchronizing all per-cpu delta folding
  1866. * across the machine, we assume 10 ticks is sufficient time for every
  1867. * cpu to have completed this task.
  1868. *
  1869. * This places an upper-bound on the IRQ-off latency of the machine. Then
  1870. * again, being late doesn't loose the delta, just wrecks the sample.
  1871. *
  1872. * - cpu_rq()->nr_uninterruptible isn't accurately tracked per-cpu because
  1873. * this would add another cross-cpu cacheline miss and atomic operation
  1874. * to the wakeup path. Instead we increment on whatever cpu the task ran
  1875. * when it went into uninterruptible state and decrement on whatever cpu
  1876. * did the wakeup. This means that only the sum of nr_uninterruptible over
  1877. * all cpus yields the correct result.
  1878. *
  1879. * This covers the NO_HZ=n code, for extra head-aches, see the comment below.
  1880. */
  1881. /* Variables and functions for calc_load */
  1882. static atomic_long_t calc_load_tasks;
  1883. static unsigned long calc_load_update;
  1884. unsigned long avenrun[3];
  1885. EXPORT_SYMBOL(avenrun); /* should be removed */
  1886. /**
  1887. * get_avenrun - get the load average array
  1888. * @loads: pointer to dest load array
  1889. * @offset: offset to add
  1890. * @shift: shift count to shift the result left
  1891. *
  1892. * These values are estimates at best, so no need for locking.
  1893. */
  1894. void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
  1895. {
  1896. loads[0] = (avenrun[0] + offset) << shift;
  1897. loads[1] = (avenrun[1] + offset) << shift;
  1898. loads[2] = (avenrun[2] + offset) << shift;
  1899. }
  1900. static long calc_load_fold_active(struct rq *this_rq)
  1901. {
  1902. long nr_active, delta = 0;
  1903. nr_active = this_rq->nr_running;
  1904. nr_active += (long) this_rq->nr_uninterruptible;
  1905. if (nr_active != this_rq->calc_load_active) {
  1906. delta = nr_active - this_rq->calc_load_active;
  1907. this_rq->calc_load_active = nr_active;
  1908. }
  1909. return delta;
  1910. }
  1911. /*
  1912. * a1 = a0 * e + a * (1 - e)
  1913. */
  1914. static unsigned long
  1915. calc_load(unsigned long load, unsigned long exp, unsigned long active)
  1916. {
  1917. load *= exp;
  1918. load += active * (FIXED_1 - exp);
  1919. load += 1UL << (FSHIFT - 1);
  1920. return load >> FSHIFT;
  1921. }
  1922. #ifdef CONFIG_NO_HZ
  1923. /*
  1924. * Handle NO_HZ for the global load-average.
  1925. *
  1926. * Since the above described distributed algorithm to compute the global
  1927. * load-average relies on per-cpu sampling from the tick, it is affected by
  1928. * NO_HZ.
  1929. *
  1930. * The basic idea is to fold the nr_active delta into a global idle-delta upon
  1931. * entering NO_HZ state such that we can include this as an 'extra' cpu delta
  1932. * when we read the global state.
  1933. *
  1934. * Obviously reality has to ruin such a delightfully simple scheme:
  1935. *
  1936. * - When we go NO_HZ idle during the window, we can negate our sample
  1937. * contribution, causing under-accounting.
  1938. *
  1939. * We avoid this by keeping two idle-delta counters and flipping them
  1940. * when the window starts, thus separating old and new NO_HZ load.
  1941. *
  1942. * The only trick is the slight shift in index flip for read vs write.
  1943. *
  1944. * 0s 5s 10s 15s
  1945. * +10 +10 +10 +10
  1946. * |-|-----------|-|-----------|-|-----------|-|
  1947. * r:0 0 1 1 0 0 1 1 0
  1948. * w:0 1 1 0 0 1 1 0 0
  1949. *
  1950. * This ensures we'll fold the old idle contribution in this window while
  1951. * accumlating the new one.
  1952. *
  1953. * - When we wake up from NO_HZ idle during the window, we push up our
  1954. * contribution, since we effectively move our sample point to a known
  1955. * busy state.
  1956. *
  1957. * This is solved by pushing the window forward, and thus skipping the
  1958. * sample, for this cpu (effectively using the idle-delta for this cpu which
  1959. * was in effect at the time the window opened). This also solves the issue
  1960. * of having to deal with a cpu having been in NOHZ idle for multiple
  1961. * LOAD_FREQ intervals.
  1962. *
  1963. * When making the ILB scale, we should try to pull this in as well.
  1964. */
  1965. static atomic_long_t calc_load_idle[2];
  1966. static int calc_load_idx;
  1967. static inline int calc_load_write_idx(void)
  1968. {
  1969. int idx = calc_load_idx;
  1970. /*
  1971. * See calc_global_nohz(), if we observe the new index, we also
  1972. * need to observe the new update time.
  1973. */
  1974. smp_rmb();
  1975. /*
  1976. * If the folding window started, make sure we start writing in the
  1977. * next idle-delta.
  1978. */
  1979. if (!time_before(jiffies, calc_load_update))
  1980. idx++;
  1981. return idx & 1;
  1982. }
  1983. static inline int calc_load_read_idx(void)
  1984. {
  1985. return calc_load_idx & 1;
  1986. }
  1987. void calc_load_enter_idle(void)
  1988. {
  1989. struct rq *this_rq = this_rq();
  1990. long delta;
  1991. /*
  1992. * We're going into NOHZ mode, if there's any pending delta, fold it
  1993. * into the pending idle delta.
  1994. */
  1995. delta = calc_load_fold_active(this_rq);
  1996. if (delta) {
  1997. int idx = calc_load_write_idx();
  1998. atomic_long_add(delta, &calc_load_idle[idx]);
  1999. }
  2000. }
  2001. void calc_load_exit_idle(void)
  2002. {
  2003. struct rq *this_rq = this_rq();
  2004. /*
  2005. * If we're still before the sample window, we're done.
  2006. */
  2007. if (time_before(jiffies, this_rq->calc_load_update))
  2008. return;
  2009. /*
  2010. * We woke inside or after the sample window, this means we're already
  2011. * accounted through the nohz accounting, so skip the entire deal and
  2012. * sync up for the next window.
  2013. */
  2014. this_rq->calc_load_update = calc_load_update;
  2015. if (time_before(jiffies, this_rq->calc_load_update + 10))
  2016. this_rq->calc_load_update += LOAD_FREQ;
  2017. }
  2018. static long calc_load_fold_idle(void)
  2019. {
  2020. int idx = calc_load_read_idx();
  2021. long delta = 0;
  2022. if (atomic_long_read(&calc_load_idle[idx]))
  2023. delta = atomic_long_xchg(&calc_load_idle[idx], 0);
  2024. return delta;
  2025. }
  2026. /**
  2027. * fixed_power_int - compute: x^n, in O(log n) time
  2028. *
  2029. * @x: base of the power
  2030. * @frac_bits: fractional bits of @x
  2031. * @n: power to raise @x to.
  2032. *
  2033. * By exploiting the relation between the definition of the natural power
  2034. * function: x^n := x*x*...*x (x multiplied by itself for n times), and
  2035. * the binary encoding of numbers used by computers: n := \Sum n_i * 2^i,
  2036. * (where: n_i \elem {0, 1}, the binary vector representing n),
  2037. * we find: x^n := x^(\Sum n_i * 2^i) := \Prod x^(n_i * 2^i), which is
  2038. * of course trivially computable in O(log_2 n), the length of our binary
  2039. * vector.
  2040. */
  2041. static unsigned long
  2042. fixed_power_int(unsigned long x, unsigned int frac_bits, unsigned int n)
  2043. {
  2044. unsigned long result = 1UL << frac_bits;
  2045. if (n) for (;;) {
  2046. if (n & 1) {
  2047. result *= x;
  2048. result += 1UL << (frac_bits - 1);
  2049. result >>= frac_bits;
  2050. }
  2051. n >>= 1;
  2052. if (!n)
  2053. break;
  2054. x *= x;
  2055. x += 1UL << (frac_bits - 1);
  2056. x >>= frac_bits;
  2057. }
  2058. return result;
  2059. }
  2060. /*
  2061. * a1 = a0 * e + a * (1 - e)
  2062. *
  2063. * a2 = a1 * e + a * (1 - e)
  2064. * = (a0 * e + a * (1 - e)) * e + a * (1 - e)
  2065. * = a0 * e^2 + a * (1 - e) * (1 + e)
  2066. *
  2067. * a3 = a2 * e + a * (1 - e)
  2068. * = (a0 * e^2 + a * (1 - e) * (1 + e)) * e + a * (1 - e)
  2069. * = a0 * e^3 + a * (1 - e) * (1 + e + e^2)
  2070. *
  2071. * ...
  2072. *
  2073. * an = a0 * e^n + a * (1 - e) * (1 + e + ... + e^n-1) [1]
  2074. * = a0 * e^n + a * (1 - e) * (1 - e^n)/(1 - e)
  2075. * = a0 * e^n + a * (1 - e^n)
  2076. *
  2077. * [1] application of the geometric series:
  2078. *
  2079. * n 1 - x^(n+1)
  2080. * S_n := \Sum x^i = -------------
  2081. * i=0 1 - x
  2082. */
  2083. static unsigned long
  2084. calc_load_n(unsigned long load, unsigned long exp,
  2085. unsigned long active, unsigned int n)
  2086. {
  2087. return calc_load(load, fixed_power_int(exp, FSHIFT, n), active);
  2088. }
  2089. /*
  2090. * NO_HZ can leave us missing all per-cpu ticks calling
  2091. * calc_load_account_active(), but since an idle CPU folds its delta into
  2092. * calc_load_tasks_idle per calc_load_account_idle(), all we need to do is fold
  2093. * in the pending idle delta if our idle period crossed a load cycle boundary.
  2094. *
  2095. * Once we've updated the global active value, we need to apply the exponential
  2096. * weights adjusted to the number of cycles missed.
  2097. */
  2098. static void calc_global_nohz(void)
  2099. {
  2100. long delta, active, n;
  2101. if (!time_before(jiffies, calc_load_update + 10)) {
  2102. /*
  2103. * Catch-up, fold however many we are behind still
  2104. */
  2105. delta = jiffies - calc_load_update - 10;
  2106. n = 1 + (delta / LOAD_FREQ);
  2107. active = atomic_long_read(&calc_load_tasks);
  2108. active = active > 0 ? active * FIXED_1 : 0;
  2109. avenrun[0] = calc_load_n(avenrun[0], EXP_1, active, n);
  2110. avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n);
  2111. avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n);
  2112. calc_load_update += n * LOAD_FREQ;
  2113. }
  2114. /*
  2115. * Flip the idle index...
  2116. *
  2117. * Make sure we first write the new time then flip the index, so that
  2118. * calc_load_write_idx() will see the new time when it reads the new
  2119. * index, this avoids a double flip messing things up.
  2120. */
  2121. smp_wmb();
  2122. calc_load_idx++;
  2123. }
  2124. #else /* !CONFIG_NO_HZ */
  2125. static inline long calc_load_fold_idle(void) { return 0; }
  2126. static inline void calc_global_nohz(void) { }
  2127. #endif /* CONFIG_NO_HZ */
  2128. /*
  2129. * calc_load - update the avenrun load estimates 10 ticks after the
  2130. * CPUs have updated calc_load_tasks.
  2131. */
  2132. void calc_global_load(unsigned long ticks)
  2133. {
  2134. long active, delta;
  2135. if (time_before(jiffies, calc_load_update + 10))
  2136. return;
  2137. /*
  2138. * Fold the 'old' idle-delta to include all NO_HZ cpus.
  2139. */
  2140. delta = calc_load_fold_idle();
  2141. if (delta)
  2142. atomic_long_add(delta, &calc_load_tasks);
  2143. active = atomic_long_read(&calc_load_tasks);
  2144. active = active > 0 ? active * FIXED_1 : 0;
  2145. avenrun[0] = calc_load(avenrun[0], EXP_1, active);
  2146. avenrun[1] = calc_load(avenrun[1], EXP_5, active);
  2147. avenrun[2] = calc_load(avenrun[2], EXP_15, active);
  2148. calc_load_update += LOAD_FREQ;
  2149. /*
  2150. * In case we idled for multiple LOAD_FREQ intervals, catch up in bulk.
  2151. */
  2152. calc_global_nohz();
  2153. }
  2154. /*
  2155. * Called from update_cpu_load() to periodically update this CPU's
  2156. * active count.
  2157. */
  2158. static void calc_load_account_active(struct rq *this_rq)
  2159. {
  2160. long delta;
  2161. if (time_before(jiffies, this_rq->calc_load_update))
  2162. return;
  2163. delta = calc_load_fold_active(this_rq);
  2164. if (delta)
  2165. atomic_long_add(delta, &calc_load_tasks);
  2166. this_rq->calc_load_update += LOAD_FREQ;
  2167. }
  2168. /*
  2169. * End of global load-average stuff
  2170. */
  2171. /*
  2172. * The exact cpuload at various idx values, calculated at every tick would be
  2173. * load = (2^idx - 1) / 2^idx * load + 1 / 2^idx * cur_load
  2174. *
  2175. * If a cpu misses updates for n-1 ticks (as it was idle) and update gets called
  2176. * on nth tick when cpu may be busy, then we have:
  2177. * load = ((2^idx - 1) / 2^idx)^(n-1) * load
  2178. * load = (2^idx - 1) / 2^idx) * load + 1 / 2^idx * cur_load
  2179. *
  2180. * decay_load_missed() below does efficient calculation of
  2181. * load = ((2^idx - 1) / 2^idx)^(n-1) * load
  2182. * avoiding 0..n-1 loop doing load = ((2^idx - 1) / 2^idx) * load
  2183. *
  2184. * The calculation is approximated on a 128 point scale.
  2185. * degrade_zero_ticks is the number of ticks after which load at any
  2186. * particular idx is approximated to be zero.
  2187. * degrade_factor is a precomputed table, a row for each load idx.
  2188. * Each column corresponds to degradation factor for a power of two ticks,
  2189. * based on 128 point scale.
  2190. * Example:
  2191. * row 2, col 3 (=12) says that the degradation at load idx 2 after
  2192. * 8 ticks is 12/128 (which is an approximation of exact factor 3^8/4^8).
  2193. *
  2194. * With this power of 2 load factors, we can degrade the load n times
  2195. * by looking at 1 bits in n and doing as many mult/shift instead of
  2196. * n mult/shifts needed by the exact degradation.
  2197. */
  2198. #define DEGRADE_SHIFT 7
  2199. static const unsigned char
  2200. degrade_zero_ticks[CPU_LOAD_IDX_MAX] = {0, 8, 32, 64, 128};
  2201. static const unsigned char
  2202. degrade_factor[CPU_LOAD_IDX_MAX][DEGRADE_SHIFT + 1] = {
  2203. {0, 0, 0, 0, 0, 0, 0, 0},
  2204. {64, 32, 8, 0, 0, 0, 0, 0},
  2205. {96, 72, 40, 12, 1, 0, 0},
  2206. {112, 98, 75, 43, 15, 1, 0},
  2207. {120, 112, 98, 76, 45, 16, 2} };
  2208. /*
  2209. * Update cpu_load for any missed ticks, due to tickless idle. The backlog
  2210. * would be when CPU is idle and so we just decay the old load without
  2211. * adding any new load.
  2212. */
  2213. static unsigned long
  2214. decay_load_missed(unsigned long load, unsigned long missed_updates, int idx)
  2215. {
  2216. int j = 0;
  2217. if (!missed_updates)
  2218. return load;
  2219. if (missed_updates >= degrade_zero_ticks[idx])
  2220. return 0;
  2221. if (idx == 1)
  2222. return load >> missed_updates;
  2223. while (missed_updates) {
  2224. if (missed_updates % 2)
  2225. load = (load * degrade_factor[idx][j]) >> DEGRADE_SHIFT;
  2226. missed_updates >>= 1;
  2227. j++;
  2228. }
  2229. return load;
  2230. }
  2231. /*
  2232. * Update rq->cpu_load[] statistics. This function is usually called every
  2233. * scheduler tick (TICK_NSEC). With tickless idle this will not be called
  2234. * every tick. We fix it up based on jiffies.
  2235. */
  2236. static void __update_cpu_load(struct rq *this_rq, unsigned long this_load,
  2237. unsigned long pending_updates)
  2238. {
  2239. int i, scale;
  2240. this_rq->nr_load_updates++;
  2241. /* Update our load: */
  2242. this_rq->cpu_load[0] = this_load; /* Fasttrack for idx 0 */
  2243. for (i = 1, scale = 2; i < CPU_LOAD_IDX_MAX; i++, scale += scale) {
  2244. unsigned long old_load, new_load;
  2245. /* scale is effectively 1 << i now, and >> i divides by scale */
  2246. old_load = this_rq->cpu_load[i];
  2247. old_load = decay_load_missed(old_load, pending_updates - 1, i);
  2248. new_load = this_load;
  2249. /*
  2250. * Round up the averaging division if load is increasing. This
  2251. * prevents us from getting stuck on 9 if the load is 10, for
  2252. * example.
  2253. */
  2254. if (new_load > old_load)
  2255. new_load += scale - 1;
  2256. this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i;
  2257. }
  2258. sched_avg_update(this_rq);
  2259. }
  2260. #ifdef CONFIG_NO_HZ
  2261. /*
  2262. * There is no sane way to deal with nohz on smp when using jiffies because the
  2263. * cpu doing the jiffies update might drift wrt the cpu doing the jiffy reading
  2264. * causing off-by-one errors in observed deltas; {0,2} instead of {1,1}.
  2265. *
  2266. * Therefore we cannot use the delta approach from the regular tick since that
  2267. * would seriously skew the load calculation. However we'll make do for those
  2268. * updates happening while idle (nohz_idle_balance) or coming out of idle
  2269. * (tick_nohz_idle_exit).
  2270. *
  2271. * This means we might still be one tick off for nohz periods.
  2272. */
  2273. /*
  2274. * Called from nohz_idle_balance() to update the load ratings before doing the
  2275. * idle balance.
  2276. */
  2277. void update_idle_cpu_load(struct rq *this_rq)
  2278. {
  2279. unsigned long curr_jiffies = ACCESS_ONCE(jiffies);
  2280. unsigned long load = this_rq->load.weight;
  2281. unsigned long pending_updates;
  2282. /*
  2283. * bail if there's load or we're actually up-to-date.
  2284. */
  2285. if (load || curr_jiffies == this_rq->last_load_update_tick)
  2286. return;
  2287. pending_updates = curr_jiffies - this_rq->last_load_update_tick;
  2288. this_rq->last_load_update_tick = curr_jiffies;
  2289. __update_cpu_load(this_rq, load, pending_updates);
  2290. }
  2291. /*
  2292. * Called from tick_nohz_idle_exit() -- try and fix up the ticks we missed.
  2293. */
  2294. void update_cpu_load_nohz(void)
  2295. {
  2296. struct rq *this_rq = this_rq();
  2297. unsigned long curr_jiffies = ACCESS_ONCE(jiffies);
  2298. unsigned long pending_updates;
  2299. if (curr_jiffies == this_rq->last_load_update_tick)
  2300. return;
  2301. raw_spin_lock(&this_rq->lock);
  2302. pending_updates = curr_jiffies - this_rq->last_load_update_tick;
  2303. if (pending_updates) {
  2304. this_rq->last_load_update_tick = curr_jiffies;
  2305. /*
  2306. * We were idle, this means load 0, the current load might be
  2307. * !0 due to remote wakeups and the sort.
  2308. */
  2309. __update_cpu_load(this_rq, 0, pending_updates);
  2310. }
  2311. raw_spin_unlock(&this_rq->lock);
  2312. }
  2313. #endif /* CONFIG_NO_HZ */
  2314. /*
  2315. * Called from scheduler_tick()
  2316. */
  2317. static void update_cpu_load_active(struct rq *this_rq)
  2318. {
  2319. /*
  2320. * See the mess around update_idle_cpu_load() / update_cpu_load_nohz().
  2321. */
  2322. this_rq->last_load_update_tick = jiffies;
  2323. __update_cpu_load(this_rq, this_rq->load.weight, 1);
  2324. calc_load_account_active(this_rq);
  2325. }
  2326. #ifdef CONFIG_SMP
  2327. /*
  2328. * sched_exec - execve() is a valuable balancing opportunity, because at
  2329. * this point the task has the smallest effective memory and cache footprint.
  2330. */
  2331. void sched_exec(void)
  2332. {
  2333. struct task_struct *p = current;
  2334. unsigned long flags;
  2335. int dest_cpu;
  2336. raw_spin_lock_irqsave(&p->pi_lock, flags);
  2337. dest_cpu = p->sched_class->select_task_rq(p, SD_BALANCE_EXEC, 0);
  2338. if (dest_cpu == smp_processor_id())
  2339. goto unlock;
  2340. if (likely(cpu_active(dest_cpu))) {
  2341. struct migration_arg arg = { p, dest_cpu };
  2342. raw_spin_unlock_irqrestore(&p->pi_lock, flags);
  2343. stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
  2344. return;
  2345. }
  2346. unlock:
  2347. raw_spin_unlock_irqrestore(&p->pi_lock, flags);
  2348. }
  2349. #endif
  2350. DEFINE_PER_CPU(struct kernel_stat, kstat);
  2351. DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
  2352. EXPORT_PER_CPU_SYMBOL(kstat);
  2353. EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
  2354. /*
  2355. * Return any ns on the sched_clock that have not yet been accounted in
  2356. * @p in case that task is currently running.
  2357. *
  2358. * Called with task_rq_lock() held on @rq.
  2359. */
  2360. static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq)
  2361. {
  2362. u64 ns = 0;
  2363. if (task_current(rq, p)) {
  2364. update_rq_clock(rq);
  2365. ns = rq->clock_task - p->se.exec_start;
  2366. if ((s64)ns < 0)
  2367. ns = 0;
  2368. }
  2369. return ns;
  2370. }
  2371. unsigned long long task_delta_exec(struct task_struct *p)
  2372. {
  2373. unsigned long flags;
  2374. struct rq *rq;
  2375. u64 ns = 0;
  2376. rq = task_rq_lock(p, &flags);
  2377. ns = do_task_delta_exec(p, rq);
  2378. task_rq_unlock(rq, p, &flags);
  2379. return ns;
  2380. }
  2381. /*
  2382. * Return accounted runtime for the task.
  2383. * In case the task is currently running, return the runtime plus current's
  2384. * pending runtime that have not been accounted yet.
  2385. */
  2386. unsigned long long task_sched_runtime(struct task_struct *p)
  2387. {
  2388. unsigned long flags;
  2389. struct rq *rq;
  2390. u64 ns = 0;
  2391. rq = task_rq_lock(p, &flags);
  2392. ns = p->se.sum_exec_runtime + do_task_delta_exec(p, rq);
  2393. task_rq_unlock(rq, p, &flags);
  2394. return ns;
  2395. }
  2396. #ifdef CONFIG_CGROUP_CPUACCT
  2397. struct cgroup_subsys cpuacct_subsys;
  2398. struct cpuacct root_cpuacct;
  2399. #endif
  2400. static inline void task_group_account_field(struct task_struct *p, int index,
  2401. u64 tmp)
  2402. {
  2403. #ifdef CONFIG_CGROUP_CPUACCT
  2404. struct kernel_cpustat *kcpustat;
  2405. struct cpuacct *ca;
  2406. #endif
  2407. /*
  2408. * Since all updates are sure to touch the root cgroup, we
  2409. * get ourselves ahead and touch it first. If the root cgroup
  2410. * is the only cgroup, then nothing else should be necessary.
  2411. *
  2412. */
  2413. __get_cpu_var(kernel_cpustat).cpustat[index] += tmp;
  2414. #ifdef CONFIG_CGROUP_CPUACCT
  2415. if (unlikely(!cpuacct_subsys.active))
  2416. return;
  2417. rcu_read_lock();
  2418. ca = task_ca(p);
  2419. while (ca && (ca != &root_cpuacct)) {
  2420. kcpustat = this_cpu_ptr(ca->cpustat);
  2421. kcpustat->cpustat[index] += tmp;
  2422. ca = parent_ca(ca);
  2423. }
  2424. rcu_read_unlock();
  2425. #endif
  2426. }
  2427. /*
  2428. * Account user cpu time to a process.
  2429. * @p: the process that the cpu time gets accounted to
  2430. * @cputime: the cpu time spent in user space since the last update
  2431. * @cputime_scaled: cputime scaled by cpu frequency
  2432. */
  2433. void account_user_time(struct task_struct *p, cputime_t cputime,
  2434. cputime_t cputime_scaled)
  2435. {
  2436. int index;
  2437. /* Add user time to process. */
  2438. p->utime += cputime;
  2439. p->utimescaled += cputime_scaled;
  2440. account_group_user_time(p, cputime);
  2441. index = (TASK_NICE(p) > 0) ? CPUTIME_NICE : CPUTIME_USER;
  2442. /* Add user time to cpustat. */
  2443. task_group_account_field(p, index, (__force u64) cputime);
  2444. /* Account for user time used */
  2445. acct_update_integrals(p);
  2446. }
  2447. /*
  2448. * Account guest cpu time to a process.
  2449. * @p: the process that the cpu time gets accounted to
  2450. * @cputime: the cpu time spent in virtual machine since the last update
  2451. * @cputime_scaled: cputime scaled by cpu frequency
  2452. */
  2453. static void account_guest_time(struct task_struct *p, cputime_t cputime,
  2454. cputime_t cputime_scaled)
  2455. {
  2456. u64 *cpustat = kcpustat_this_cpu->cpustat;
  2457. /* Add guest time to process. */
  2458. p->utime += cputime;
  2459. p->utimescaled += cputime_scaled;
  2460. account_group_user_time(p, cputime);
  2461. p->gtime += cputime;
  2462. /* Add guest time to cpustat. */
  2463. if (TASK_NICE(p) > 0) {
  2464. cpustat[CPUTIME_NICE] += (__force u64) cputime;
  2465. cpustat[CPUTIME_GUEST_NICE] += (__force u64) cputime;
  2466. } else {
  2467. cpustat[CPUTIME_USER] += (__force u64) cputime;
  2468. cpustat[CPUTIME_GUEST] += (__force u64) cputime;
  2469. }
  2470. }
  2471. /*
  2472. * Account system cpu time to a process and desired cpustat field
  2473. * @p: the process that the cpu time gets accounted to
  2474. * @cputime: the cpu time spent in kernel space since the last update
  2475. * @cputime_scaled: cputime scaled by cpu frequency
  2476. * @target_cputime64: pointer to cpustat field that has to be updated
  2477. */
  2478. static inline
  2479. void __account_system_time(struct task_struct *p, cputime_t cputime,
  2480. cputime_t cputime_scaled, int index)
  2481. {
  2482. /* Add system time to process. */
  2483. p->stime += cputime;
  2484. p->stimescaled += cputime_scaled;
  2485. account_group_system_time(p, cputime);
  2486. /* Add system time to cpustat. */
  2487. task_group_account_field(p, index, (__force u64) cputime);
  2488. /* Account for system time used */
  2489. acct_update_integrals(p);
  2490. }
  2491. /*
  2492. * Account system cpu time to a process.
  2493. * @p: the process that the cpu time gets accounted to
  2494. * @hardirq_offset: the offset to subtract from hardirq_count()
  2495. * @cputime: the cpu time spent in kernel space since the last update
  2496. * @cputime_scaled: cputime scaled by cpu frequency
  2497. */
  2498. void account_system_time(struct task_struct *p, int hardirq_offset,
  2499. cputime_t cputime, cputime_t cputime_scaled)
  2500. {
  2501. int index;
  2502. if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) {
  2503. account_guest_time(p, cputime, cputime_scaled);
  2504. return;
  2505. }
  2506. if (hardirq_count() - hardirq_offset)
  2507. index = CPUTIME_IRQ;
  2508. else if (in_serving_softirq())
  2509. index = CPUTIME_SOFTIRQ;
  2510. else
  2511. index = CPUTIME_SYSTEM;
  2512. __account_system_time(p, cputime, cputime_scaled, index);
  2513. }
  2514. /*
  2515. * Account for involuntary wait time.
  2516. * @cputime: the cpu time spent in involuntary wait
  2517. */
  2518. void account_steal_time(cputime_t cputime)
  2519. {
  2520. u64 *cpustat = kcpustat_this_cpu->cpustat;
  2521. cpustat[CPUTIME_STEAL] += (__force u64) cputime;
  2522. }
  2523. /*
  2524. * Account for idle time.
  2525. * @cputime: the cpu time spent in idle wait
  2526. */
  2527. void account_idle_time(cputime_t cputime)
  2528. {
  2529. u64 *cpustat = kcpustat_this_cpu->cpustat;
  2530. struct rq *rq = this_rq();
  2531. if (atomic_read(&rq->nr_iowait) > 0)
  2532. cpustat[CPUTIME_IOWAIT] += (__force u64) cputime;
  2533. else
  2534. cpustat[CPUTIME_IDLE] += (__force u64) cputime;
  2535. }
  2536. static __always_inline bool steal_account_process_tick(void)
  2537. {
  2538. #ifdef CONFIG_PARAVIRT
  2539. if (static_key_false(&paravirt_steal_enabled)) {
  2540. u64 steal, st = 0;
  2541. steal = paravirt_steal_clock(smp_processor_id());
  2542. steal -= this_rq()->prev_steal_time;
  2543. st = steal_ticks(steal);
  2544. this_rq()->prev_steal_time += st * TICK_NSEC;
  2545. account_steal_time(st);
  2546. return st;
  2547. }
  2548. #endif
  2549. return false;
  2550. }
  2551. #ifndef CONFIG_VIRT_CPU_ACCOUNTING
  2552. #ifdef CONFIG_IRQ_TIME_ACCOUNTING
  2553. /*
  2554. * Account a tick to a process and cpustat
  2555. * @p: the process that the cpu time gets accounted to
  2556. * @user_tick: is the tick from userspace
  2557. * @rq: the pointer to rq
  2558. *
  2559. * Tick demultiplexing follows the order
  2560. * - pending hardirq update
  2561. * - pending softirq update
  2562. * - user_time
  2563. * - idle_time
  2564. * - system time
  2565. * - check for guest_time
  2566. * - else account as system_time
  2567. *
  2568. * Check for hardirq is done both for system and user time as there is
  2569. * no timer going off while we are on hardirq and hence we may never get an
  2570. * opportunity to update it solely in system time.
  2571. * p->stime and friends are only updated on system time and not on irq
  2572. * softirq as those do not count in task exec_runtime any more.
  2573. */
  2574. static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
  2575. struct rq *rq)
  2576. {
  2577. cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
  2578. u64 *cpustat = kcpustat_this_cpu->cpustat;
  2579. if (steal_account_process_tick())
  2580. return;
  2581. if (irqtime_account_hi_update()) {
  2582. cpustat[CPUTIME_IRQ] += (__force u64) cputime_one_jiffy;
  2583. } else if (irqtime_account_si_update()) {
  2584. cpustat[CPUTIME_SOFTIRQ] += (__force u64) cputime_one_jiffy;
  2585. } else if (this_cpu_ksoftirqd() == p) {
  2586. /*
  2587. * ksoftirqd time do not get accounted in cpu_softirq_time.
  2588. * So, we have to handle it separately here.
  2589. * Also, p->stime needs to be updated for ksoftirqd.
  2590. */
  2591. __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled,
  2592. CPUTIME_SOFTIRQ);
  2593. } else if (user_tick) {
  2594. account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
  2595. } else if (p == rq->idle) {
  2596. account_idle_time(cputime_one_jiffy);
  2597. } else if (p->flags & PF_VCPU) { /* System time or guest time */
  2598. account_guest_time(p, cputime_one_jiffy, one_jiffy_scaled);
  2599. } else {
  2600. __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled,
  2601. CPUTIME_SYSTEM);
  2602. }
  2603. }
  2604. static void irqtime_account_idle_ticks(int ticks)
  2605. {
  2606. int i;
  2607. struct rq *rq = this_rq();
  2608. for (i = 0; i < ticks; i++)
  2609. irqtime_account_process_tick(current, 0, rq);
  2610. }
  2611. #else /* CONFIG_IRQ_TIME_ACCOUNTING */
  2612. static void irqtime_account_idle_ticks(int ticks) {}
  2613. static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
  2614. struct rq *rq) {}
  2615. #endif /* CONFIG_IRQ_TIME_ACCOUNTING */
  2616. /*
  2617. * Account a single tick of cpu time.
  2618. * @p: the process that the cpu time gets accounted to
  2619. * @user_tick: indicates if the tick is a user or a system tick
  2620. */
  2621. void account_process_tick(struct task_struct *p, int user_tick)
  2622. {
  2623. cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
  2624. struct rq *rq = this_rq();
  2625. if (sched_clock_irqtime) {
  2626. irqtime_account_process_tick(p, user_tick, rq);
  2627. return;
  2628. }
  2629. if (steal_account_process_tick())
  2630. return;
  2631. if (user_tick)
  2632. account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
  2633. else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET))
  2634. account_system_time(p, HARDIRQ_OFFSET, cputime_one_jiffy,
  2635. one_jiffy_scaled);
  2636. else
  2637. account_idle_time(cputime_one_jiffy);
  2638. }
  2639. /*
  2640. * Account multiple ticks of steal time.
  2641. * @p: the process from which the cpu time has been stolen
  2642. * @ticks: number of stolen ticks
  2643. */
  2644. void account_steal_ticks(unsigned long ticks)
  2645. {
  2646. account_steal_time(jiffies_to_cputime(ticks));
  2647. }
  2648. /*
  2649. * Account multiple ticks of idle time.
  2650. * @ticks: number of stolen ticks
  2651. */
  2652. void account_idle_ticks(unsigned long ticks)
  2653. {
  2654. if (sched_clock_irqtime) {
  2655. irqtime_account_idle_ticks(ticks);
  2656. return;
  2657. }
  2658. account_idle_time(jiffies_to_cputime(ticks));
  2659. }
  2660. #endif
  2661. /*
  2662. * Use precise platform statistics if available:
  2663. */
  2664. #ifdef CONFIG_VIRT_CPU_ACCOUNTING
  2665. void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
  2666. {
  2667. *ut = p->utime;
  2668. *st = p->stime;
  2669. }
  2670. void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
  2671. {
  2672. struct task_cputime cputime;
  2673. thread_group_cputime(p, &cputime);
  2674. *ut = cputime.utime;
  2675. *st = cputime.stime;
  2676. }
  2677. #else
  2678. #ifndef nsecs_to_cputime
  2679. # define nsecs_to_cputime(__nsecs) nsecs_to_jiffies(__nsecs)
  2680. #endif
  2681. static cputime_t scale_utime(cputime_t utime, cputime_t rtime, cputime_t total)
  2682. {
  2683. u64 temp = (__force u64) rtime;
  2684. temp *= (__force u64) utime;
  2685. if (sizeof(cputime_t) == 4)
  2686. temp = div_u64(temp, (__force u32) total);
  2687. else
  2688. temp = div64_u64(temp, (__force u64) total);
  2689. return (__force cputime_t) temp;
  2690. }
  2691. void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
  2692. {
  2693. cputime_t rtime, utime = p->utime, total = utime + p->stime;
  2694. /*
  2695. * Use CFS's precise accounting:
  2696. */
  2697. rtime = nsecs_to_cputime(p->se.sum_exec_runtime);
  2698. if (total)
  2699. utime = scale_utime(utime, rtime, total);
  2700. else
  2701. utime = rtime;
  2702. /*
  2703. * Compare with previous values, to keep monotonicity:
  2704. */
  2705. p->prev_utime = max(p->prev_utime, utime);
  2706. p->prev_stime = max(p->prev_stime, rtime - p->prev_utime);
  2707. *ut = p->prev_utime;
  2708. *st = p->prev_stime;
  2709. }
  2710. /*
  2711. * Must be called with siglock held.
  2712. */
  2713. void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
  2714. {
  2715. struct signal_struct *sig = p->signal;
  2716. struct task_cputime cputime;
  2717. cputime_t rtime, utime, total;
  2718. thread_group_cputime(p, &cputime);
  2719. total = cputime.utime + cputime.stime;
  2720. rtime = nsecs_to_cputime(cputime.sum_exec_runtime);
  2721. if (total)
  2722. utime = scale_utime(cputime.utime, rtime, total);
  2723. else
  2724. utime = rtime;
  2725. sig->prev_utime = max(sig->prev_utime, utime);
  2726. sig->prev_stime = max(sig->prev_stime, rtime - sig->prev_utime);
  2727. *ut = sig->prev_utime;
  2728. *st = sig->prev_stime;
  2729. }
  2730. #endif
  2731. /*
  2732. * This function gets called by the timer code, with HZ frequency.
  2733. * We call it with interrupts disabled.
  2734. */
  2735. void scheduler_tick(void)
  2736. {
  2737. int cpu = smp_processor_id();
  2738. struct rq *rq = cpu_rq(cpu);
  2739. struct task_struct *curr = rq->curr;
  2740. sched_clock_tick();
  2741. raw_spin_lock(&rq->lock);
  2742. update_rq_clock(rq);
  2743. update_cpu_load_active(rq);
  2744. curr->sched_class->task_tick(rq, curr, 0);
  2745. raw_spin_unlock(&rq->lock);
  2746. perf_event_task_tick();
  2747. #ifdef CONFIG_SMP
  2748. rq->idle_balance = idle_cpu(cpu);
  2749. trigger_load_balance(rq, cpu);
  2750. #endif
  2751. }
  2752. notrace unsigned long get_parent_ip(unsigned long addr)
  2753. {
  2754. if (in_lock_functions(addr)) {
  2755. addr = CALLER_ADDR2;
  2756. if (in_lock_functions(addr))
  2757. addr = CALLER_ADDR3;
  2758. }
  2759. return addr;
  2760. }
  2761. #if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
  2762. defined(CONFIG_PREEMPT_TRACER))
  2763. void __kprobes add_preempt_count(int val)
  2764. {
  2765. #ifdef CONFIG_DEBUG_PREEMPT
  2766. /*
  2767. * Underflow?
  2768. */
  2769. if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
  2770. return;
  2771. #endif
  2772. preempt_count() += val;
  2773. #ifdef CONFIG_DEBUG_PREEMPT
  2774. /*
  2775. * Spinlock count overflowing soon?
  2776. */
  2777. DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
  2778. PREEMPT_MASK - 10);
  2779. #endif
  2780. if (preempt_count() == val)
  2781. trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
  2782. }
  2783. EXPORT_SYMBOL(add_preempt_count);
  2784. void __kprobes sub_preempt_count(int val)
  2785. {
  2786. #ifdef CONFIG_DEBUG_PREEMPT
  2787. /*
  2788. * Underflow?
  2789. */
  2790. if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
  2791. return;
  2792. /*
  2793. * Is the spinlock portion underflowing?
  2794. */
  2795. if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
  2796. !(preempt_count() & PREEMPT_MASK)))
  2797. return;
  2798. #endif
  2799. if (preempt_count() == val)
  2800. trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
  2801. preempt_count() -= val;
  2802. }
  2803. EXPORT_SYMBOL(sub_preempt_count);
  2804. #endif
  2805. /*
  2806. * Print scheduling while atomic bug:
  2807. */
  2808. static noinline void __schedule_bug(struct task_struct *prev)
  2809. {
  2810. if (oops_in_progress)
  2811. return;
  2812. printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
  2813. prev->comm, prev->pid, preempt_count());
  2814. debug_show_held_locks(prev);
  2815. print_modules();
  2816. if (irqs_disabled())
  2817. print_irqtrace_events(prev);
  2818. dump_stack();
  2819. add_taint(TAINT_WARN);
  2820. }
  2821. /*
  2822. * Various schedule()-time debugging checks and statistics:
  2823. */
  2824. static inline void schedule_debug(struct task_struct *prev)
  2825. {
  2826. /*
  2827. * Test if we are atomic. Since do_exit() needs to call into
  2828. * schedule() atomically, we ignore that path for now.
  2829. * Otherwise, whine if we are scheduling when we should not be.
  2830. */
  2831. if (unlikely(in_atomic_preempt_off() && !prev->exit_state))
  2832. __schedule_bug(prev);
  2833. rcu_sleep_check();
  2834. profile_hit(SCHED_PROFILING, __builtin_return_address(0));
  2835. schedstat_inc(this_rq(), sched_count);
  2836. }
  2837. static void put_prev_task(struct rq *rq, struct task_struct *prev)
  2838. {
  2839. if (prev->on_rq || rq->skip_clock_update < 0)
  2840. update_rq_clock(rq);
  2841. prev->sched_class->put_prev_task(rq, prev);
  2842. }
  2843. /*
  2844. * Pick up the highest-prio task:
  2845. */
  2846. static inline struct task_struct *
  2847. pick_next_task(struct rq *rq)
  2848. {
  2849. const struct sched_class *class;
  2850. struct task_struct *p;
  2851. /*
  2852. * Optimization: we know that if all tasks are in
  2853. * the fair class we can call that function directly:
  2854. */
  2855. if (likely(rq->nr_running == rq->cfs.h_nr_running)) {
  2856. p = fair_sched_class.pick_next_task(rq);
  2857. if (likely(p))
  2858. return p;
  2859. }
  2860. for_each_class(class) {
  2861. p = class->pick_next_task(rq);
  2862. if (p)
  2863. return p;
  2864. }
  2865. BUG(); /* the idle class will always have a runnable task */
  2866. }
  2867. /*
  2868. * __schedule() is the main scheduler function.
  2869. */
  2870. static void __sched __schedule(void)
  2871. {
  2872. struct task_struct *prev, *next;
  2873. unsigned long *switch_count;
  2874. struct rq *rq;
  2875. int cpu;
  2876. need_resched:
  2877. preempt_disable();
  2878. cpu = smp_processor_id();
  2879. rq = cpu_rq(cpu);
  2880. rcu_note_context_switch(cpu);
  2881. prev = rq->curr;
  2882. schedule_debug(prev);
  2883. if (sched_feat(HRTICK))
  2884. hrtick_clear(rq);
  2885. raw_spin_lock_irq(&rq->lock);
  2886. switch_count = &prev->nivcsw;
  2887. if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
  2888. if (unlikely(signal_pending_state(prev->state, prev))) {
  2889. prev->state = TASK_RUNNING;
  2890. } else {
  2891. deactivate_task(rq, prev, DEQUEUE_SLEEP);
  2892. prev->on_rq = 0;
  2893. /*
  2894. * If a worker went to sleep, notify and ask workqueue
  2895. * whether it wants to wake up a task to maintain
  2896. * concurrency.
  2897. */
  2898. if (prev->flags & PF_WQ_WORKER) {
  2899. struct task_struct *to_wakeup;
  2900. to_wakeup = wq_worker_sleeping(prev, cpu);
  2901. if (to_wakeup)
  2902. try_to_wake_up_local(to_wakeup);
  2903. }
  2904. }
  2905. switch_count = &prev->nvcsw;
  2906. }
  2907. pre_schedule(rq, prev);
  2908. if (unlikely(!rq->nr_running))
  2909. idle_balance(cpu, rq);
  2910. put_prev_task(rq, prev);
  2911. next = pick_next_task(rq);
  2912. clear_tsk_need_resched(prev);
  2913. rq->skip_clock_update = 0;
  2914. if (likely(prev != next)) {
  2915. rq->nr_switches++;
  2916. rq->curr = next;
  2917. ++*switch_count;
  2918. context_switch(rq, prev, next); /* unlocks the rq */
  2919. /*
  2920. * The context switch have flipped the stack from under us
  2921. * and restored the local variables which were saved when
  2922. * this task called schedule() in the past. prev == current
  2923. * is still correct, but it can be moved to another cpu/rq.
  2924. */
  2925. cpu = smp_processor_id();
  2926. rq = cpu_rq(cpu);
  2927. } else
  2928. raw_spin_unlock_irq(&rq->lock);
  2929. post_schedule(rq);
  2930. sched_preempt_enable_no_resched();
  2931. if (need_resched())
  2932. goto need_resched;
  2933. }
  2934. static inline void sched_submit_work(struct task_struct *tsk)
  2935. {
  2936. if (!tsk->state || tsk_is_pi_blocked(tsk))
  2937. return;
  2938. /*
  2939. * If we are going to sleep and we have plugged IO queued,
  2940. * make sure to submit it to avoid deadlocks.
  2941. */
  2942. if (blk_needs_flush_plug(tsk))
  2943. blk_schedule_flush_plug(tsk);
  2944. }
  2945. asmlinkage void __sched schedule(void)
  2946. {
  2947. struct task_struct *tsk = current;
  2948. sched_submit_work(tsk);
  2949. __schedule();
  2950. }
  2951. EXPORT_SYMBOL(schedule);
  2952. /**
  2953. * schedule_preempt_disabled - called with preemption disabled
  2954. *
  2955. * Returns with preemption disabled. Note: preempt_count must be 1
  2956. */
  2957. void __sched schedule_preempt_disabled(void)
  2958. {
  2959. sched_preempt_enable_no_resched();
  2960. schedule();
  2961. preempt_disable();
  2962. }
  2963. #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
  2964. static inline bool owner_running(struct mutex *lock, struct task_struct *owner)
  2965. {
  2966. if (lock->owner != owner)
  2967. return false;
  2968. /*
  2969. * Ensure we emit the owner->on_cpu, dereference _after_ checking
  2970. * lock->owner still matches owner, if that fails, owner might
  2971. * point to free()d memory, if it still matches, the rcu_read_lock()
  2972. * ensures the memory stays valid.
  2973. */
  2974. barrier();
  2975. return owner->on_cpu;
  2976. }
  2977. /*
  2978. * Look out! "owner" is an entirely speculative pointer
  2979. * access and not reliable.
  2980. */
  2981. int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
  2982. {
  2983. if (!sched_feat(OWNER_SPIN))
  2984. return 0;
  2985. rcu_read_lock();
  2986. while (owner_running(lock, owner)) {
  2987. if (need_resched())
  2988. break;
  2989. arch_mutex_cpu_relax();
  2990. }
  2991. rcu_read_unlock();
  2992. /*
  2993. * We break out the loop above on need_resched() and when the
  2994. * owner changed, which is a sign for heavy contention. Return
  2995. * success only when lock->owner is NULL.
  2996. */
  2997. return lock->owner == NULL;
  2998. }
  2999. #endif
  3000. #ifdef CONFIG_PREEMPT
  3001. /*
  3002. * this is the entry point to schedule() from in-kernel preemption
  3003. * off of preempt_enable. Kernel preemptions off return from interrupt
  3004. * occur there and call schedule directly.
  3005. */
  3006. asmlinkage void __sched notrace preempt_schedule(void)
  3007. {
  3008. struct thread_info *ti = current_thread_info();
  3009. /*
  3010. * If there is a non-zero preempt_count or interrupts are disabled,
  3011. * we do not want to preempt the current task. Just return..
  3012. */
  3013. if (likely(ti->preempt_count || irqs_disabled()))
  3014. return;
  3015. do {
  3016. add_preempt_count_notrace(PREEMPT_ACTIVE);
  3017. __schedule();
  3018. sub_preempt_count_notrace(PREEMPT_ACTIVE);
  3019. /*
  3020. * Check again in case we missed a preemption opportunity
  3021. * between schedule and now.
  3022. */
  3023. barrier();
  3024. } while (need_resched());
  3025. }
  3026. EXPORT_SYMBOL(preempt_schedule);
  3027. /*
  3028. * this is the entry point to schedule() from kernel preemption
  3029. * off of irq context.
  3030. * Note, that this is called and return with irqs disabled. This will
  3031. * protect us against recursive calling from irq.
  3032. */
  3033. asmlinkage void __sched preempt_schedule_irq(void)
  3034. {
  3035. struct thread_info *ti = current_thread_info();
  3036. /* Catch callers which need to be fixed */
  3037. BUG_ON(ti->preempt_count || !irqs_disabled());
  3038. rcu_user_exit();
  3039. do {
  3040. add_preempt_count(PREEMPT_ACTIVE);
  3041. local_irq_enable();
  3042. __schedule();
  3043. local_irq_disable();
  3044. sub_preempt_count(PREEMPT_ACTIVE);
  3045. /*
  3046. * Check again in case we missed a preemption opportunity
  3047. * between schedule and now.
  3048. */
  3049. barrier();
  3050. } while (need_resched());
  3051. }
  3052. #endif /* CONFIG_PREEMPT */
  3053. int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags,
  3054. void *key)
  3055. {
  3056. return try_to_wake_up(curr->private, mode, wake_flags);
  3057. }
  3058. EXPORT_SYMBOL(default_wake_function);
  3059. /*
  3060. * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
  3061. * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
  3062. * number) then we wake all the non-exclusive tasks and one exclusive task.
  3063. *
  3064. * There are circumstances in which we can try to wake a task which has already
  3065. * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
  3066. * zero in this (rare) case, and we handle it by continuing to scan the queue.
  3067. */
  3068. static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
  3069. int nr_exclusive, int wake_flags, void *key)
  3070. {
  3071. wait_queue_t *curr, *next;
  3072. list_for_each_entry_safe(curr, next, &q->task_list, task_list) {
  3073. unsigned flags = curr->flags;
  3074. if (curr->func(curr, mode, wake_flags, key) &&
  3075. (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
  3076. break;
  3077. }
  3078. }
  3079. /**
  3080. * __wake_up - wake up threads blocked on a waitqueue.
  3081. * @q: the waitqueue
  3082. * @mode: which threads
  3083. * @nr_exclusive: how many wake-one or wake-many threads to wake up
  3084. * @key: is directly passed to the wakeup function
  3085. *
  3086. * It may be assumed that this function implies a write memory barrier before
  3087. * changing the task state if and only if any tasks are woken up.
  3088. */
  3089. void __wake_up(wait_queue_head_t *q, unsigned int mode,
  3090. int nr_exclusive, void *key)
  3091. {
  3092. unsigned long flags;
  3093. spin_lock_irqsave(&q->lock, flags);
  3094. __wake_up_common(q, mode, nr_exclusive, 0, key);
  3095. spin_unlock_irqrestore(&q->lock, flags);
  3096. }
  3097. EXPORT_SYMBOL(__wake_up);
  3098. /*
  3099. * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
  3100. */
  3101. void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr)
  3102. {
  3103. __wake_up_common(q, mode, nr, 0, NULL);
  3104. }
  3105. EXPORT_SYMBOL_GPL(__wake_up_locked);
  3106. void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key)
  3107. {
  3108. __wake_up_common(q, mode, 1, 0, key);
  3109. }
  3110. EXPORT_SYMBOL_GPL(__wake_up_locked_key);
  3111. /**
  3112. * __wake_up_sync_key - wake up threads blocked on a waitqueue.
  3113. * @q: the waitqueue
  3114. * @mode: which threads
  3115. * @nr_exclusive: how many wake-one or wake-many threads to wake up
  3116. * @key: opaque value to be passed to wakeup targets
  3117. *
  3118. * The sync wakeup differs that the waker knows that it will schedule
  3119. * away soon, so while the target thread will be woken up, it will not
  3120. * be migrated to another CPU - ie. the two threads are 'synchronized'
  3121. * with each other. This can prevent needless bouncing between CPUs.
  3122. *
  3123. * On UP it can prevent extra preemption.
  3124. *
  3125. * It may be assumed that this function implies a write memory barrier before
  3126. * changing the task state if and only if any tasks are woken up.
  3127. */
  3128. void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode,
  3129. int nr_exclusive, void *key)
  3130. {
  3131. unsigned long flags;
  3132. int wake_flags = WF_SYNC;
  3133. if (unlikely(!q))
  3134. return;
  3135. if (unlikely(!nr_exclusive))
  3136. wake_flags = 0;
  3137. spin_lock_irqsave(&q->lock, flags);
  3138. __wake_up_common(q, mode, nr_exclusive, wake_flags, key);
  3139. spin_unlock_irqrestore(&q->lock, flags);
  3140. }
  3141. EXPORT_SYMBOL_GPL(__wake_up_sync_key);
  3142. /*
  3143. * __wake_up_sync - see __wake_up_sync_key()
  3144. */
  3145. void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
  3146. {
  3147. __wake_up_sync_key(q, mode, nr_exclusive, NULL);
  3148. }
  3149. EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
  3150. /**
  3151. * complete: - signals a single thread waiting on this completion
  3152. * @x: holds the state of this particular completion
  3153. *
  3154. * This will wake up a single thread waiting on this completion. Threads will be
  3155. * awakened in the same order in which they were queued.
  3156. *
  3157. * See also complete_all(), wait_for_completion() and related routines.
  3158. *
  3159. * It may be assumed that this function implies a write memory barrier before
  3160. * changing the task state if and only if any tasks are woken up.
  3161. */
  3162. void complete(struct completion *x)
  3163. {
  3164. unsigned long flags;
  3165. spin_lock_irqsave(&x->wait.lock, flags);
  3166. x->done++;
  3167. __wake_up_common(&x->wait, TASK_NORMAL, 1, 0, NULL);
  3168. spin_unlock_irqrestore(&x->wait.lock, flags);
  3169. }
  3170. EXPORT_SYMBOL(complete);
  3171. /**
  3172. * complete_all: - signals all threads waiting on this completion
  3173. * @x: holds the state of this particular completion
  3174. *
  3175. * This will wake up all threads waiting on this particular completion event.
  3176. *
  3177. * It may be assumed that this function implies a write memory barrier before
  3178. * changing the task state if and only if any tasks are woken up.
  3179. */
  3180. void complete_all(struct completion *x)
  3181. {
  3182. unsigned long flags;
  3183. spin_lock_irqsave(&x->wait.lock, flags);
  3184. x->done += UINT_MAX/2;
  3185. __wake_up_common(&x->wait, TASK_NORMAL, 0, 0, NULL);
  3186. spin_unlock_irqrestore(&x->wait.lock, flags);
  3187. }
  3188. EXPORT_SYMBOL(complete_all);
  3189. static inline long __sched
  3190. do_wait_for_common(struct completion *x, long timeout, int state)
  3191. {
  3192. if (!x->done) {
  3193. DECLARE_WAITQUEUE(wait, current);
  3194. __add_wait_queue_tail_exclusive(&x->wait, &wait);
  3195. do {
  3196. if (signal_pending_state(state, current)) {
  3197. timeout = -ERESTARTSYS;
  3198. break;
  3199. }
  3200. __set_current_state(state);
  3201. spin_unlock_irq(&x->wait.lock);
  3202. timeout = schedule_timeout(timeout);
  3203. spin_lock_irq(&x->wait.lock);
  3204. } while (!x->done && timeout);
  3205. __remove_wait_queue(&x->wait, &wait);
  3206. if (!x->done)
  3207. return timeout;
  3208. }
  3209. x->done--;
  3210. return timeout ?: 1;
  3211. }
  3212. static long __sched
  3213. wait_for_common(struct completion *x, long timeout, int state)
  3214. {
  3215. might_sleep();
  3216. spin_lock_irq(&x->wait.lock);
  3217. timeout = do_wait_for_common(x, timeout, state);
  3218. spin_unlock_irq(&x->wait.lock);
  3219. return timeout;
  3220. }
  3221. /**
  3222. * wait_for_completion: - waits for completion of a task
  3223. * @x: holds the state of this particular completion
  3224. *
  3225. * This waits to be signaled for completion of a specific task. It is NOT
  3226. * interruptible and there is no timeout.
  3227. *
  3228. * See also similar routines (i.e. wait_for_completion_timeout()) with timeout
  3229. * and interrupt capability. Also see complete().
  3230. */
  3231. void __sched wait_for_completion(struct completion *x)
  3232. {
  3233. wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
  3234. }
  3235. EXPORT_SYMBOL(wait_for_completion);
  3236. /**
  3237. * wait_for_completion_timeout: - waits for completion of a task (w/timeout)
  3238. * @x: holds the state of this particular completion
  3239. * @timeout: timeout value in jiffies
  3240. *
  3241. * This waits for either a completion of a specific task to be signaled or for a
  3242. * specified timeout to expire. The timeout is in jiffies. It is not
  3243. * interruptible.
  3244. *
  3245. * The return value is 0 if timed out, and positive (at least 1, or number of
  3246. * jiffies left till timeout) if completed.
  3247. */
  3248. unsigned long __sched
  3249. wait_for_completion_timeout(struct completion *x, unsigned long timeout)
  3250. {
  3251. return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE);
  3252. }
  3253. EXPORT_SYMBOL(wait_for_completion_timeout);
  3254. /**
  3255. * wait_for_completion_interruptible: - waits for completion of a task (w/intr)
  3256. * @x: holds the state of this particular completion
  3257. *
  3258. * This waits for completion of a specific task to be signaled. It is
  3259. * interruptible.
  3260. *
  3261. * The return value is -ERESTARTSYS if interrupted, 0 if completed.
  3262. */
  3263. int __sched wait_for_completion_interruptible(struct completion *x)
  3264. {
  3265. long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE);
  3266. if (t == -ERESTARTSYS)
  3267. return t;
  3268. return 0;
  3269. }
  3270. EXPORT_SYMBOL(wait_for_completion_interruptible);
  3271. /**
  3272. * wait_for_completion_interruptible_timeout: - waits for completion (w/(to,intr))
  3273. * @x: holds the state of this particular completion
  3274. * @timeout: timeout value in jiffies
  3275. *
  3276. * This waits for either a completion of a specific task to be signaled or for a
  3277. * specified timeout to expire. It is interruptible. The timeout is in jiffies.
  3278. *
  3279. * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
  3280. * positive (at least 1, or number of jiffies left till timeout) if completed.
  3281. */
  3282. long __sched
  3283. wait_for_completion_interruptible_timeout(struct completion *x,
  3284. unsigned long timeout)
  3285. {
  3286. return wait_for_common(x, timeout, TASK_INTERRUPTIBLE);
  3287. }
  3288. EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
  3289. /**
  3290. * wait_for_completion_killable: - waits for completion of a task (killable)
  3291. * @x: holds the state of this particular completion
  3292. *
  3293. * This waits to be signaled for completion of a specific task. It can be
  3294. * interrupted by a kill signal.
  3295. *
  3296. * The return value is -ERESTARTSYS if interrupted, 0 if completed.
  3297. */
  3298. int __sched wait_for_completion_killable(struct completion *x)
  3299. {
  3300. long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
  3301. if (t == -ERESTARTSYS)
  3302. return t;
  3303. return 0;
  3304. }
  3305. EXPORT_SYMBOL(wait_for_completion_killable);
  3306. /**
  3307. * wait_for_completion_killable_timeout: - waits for completion of a task (w/(to,killable))
  3308. * @x: holds the state of this particular completion
  3309. * @timeout: timeout value in jiffies
  3310. *
  3311. * This waits for either a completion of a specific task to be
  3312. * signaled or for a specified timeout to expire. It can be
  3313. * interrupted by a kill signal. The timeout is in jiffies.
  3314. *
  3315. * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
  3316. * positive (at least 1, or number of jiffies left till timeout) if completed.
  3317. */
  3318. long __sched
  3319. wait_for_completion_killable_timeout(struct completion *x,
  3320. unsigned long timeout)
  3321. {
  3322. return wait_for_common(x, timeout, TASK_KILLABLE);
  3323. }
  3324. EXPORT_SYMBOL(wait_for_completion_killable_timeout);
  3325. /**
  3326. * try_wait_for_completion - try to decrement a completion without blocking
  3327. * @x: completion structure
  3328. *
  3329. * Returns: 0 if a decrement cannot be done without blocking
  3330. * 1 if a decrement succeeded.
  3331. *
  3332. * If a completion is being used as a counting completion,
  3333. * attempt to decrement the counter without blocking. This
  3334. * enables us to avoid waiting if the resource the completion
  3335. * is protecting is not available.
  3336. */
  3337. bool try_wait_for_completion(struct completion *x)
  3338. {
  3339. unsigned long flags;
  3340. int ret = 1;
  3341. spin_lock_irqsave(&x->wait.lock, flags);
  3342. if (!x->done)
  3343. ret = 0;
  3344. else
  3345. x->done--;
  3346. spin_unlock_irqrestore(&x->wait.lock, flags);
  3347. return ret;
  3348. }
  3349. EXPORT_SYMBOL(try_wait_for_completion);
  3350. /**
  3351. * completion_done - Test to see if a completion has any waiters
  3352. * @x: completion structure
  3353. *
  3354. * Returns: 0 if there are waiters (wait_for_completion() in progress)
  3355. * 1 if there are no waiters.
  3356. *
  3357. */
  3358. bool completion_done(struct completion *x)
  3359. {
  3360. unsigned long flags;
  3361. int ret = 1;
  3362. spin_lock_irqsave(&x->wait.lock, flags);
  3363. if (!x->done)
  3364. ret = 0;
  3365. spin_unlock_irqrestore(&x->wait.lock, flags);
  3366. return ret;
  3367. }
  3368. EXPORT_SYMBOL(completion_done);
  3369. static long __sched
  3370. sleep_on_common(wait_queue_head_t *q, int state, long timeout)
  3371. {
  3372. unsigned long flags;
  3373. wait_queue_t wait;
  3374. init_waitqueue_entry(&wait, current);
  3375. __set_current_state(state);
  3376. spin_lock_irqsave(&q->lock, flags);
  3377. __add_wait_queue(q, &wait);
  3378. spin_unlock(&q->lock);
  3379. timeout = schedule_timeout(timeout);
  3380. spin_lock_irq(&q->lock);
  3381. __remove_wait_queue(q, &wait);
  3382. spin_unlock_irqrestore(&q->lock, flags);
  3383. return timeout;
  3384. }
  3385. void __sched interruptible_sleep_on(wait_queue_head_t *q)
  3386. {
  3387. sleep_on_common(q, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
  3388. }
  3389. EXPORT_SYMBOL(interruptible_sleep_on);
  3390. long __sched
  3391. interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
  3392. {
  3393. return sleep_on_common(q, TASK_INTERRUPTIBLE, timeout);
  3394. }
  3395. EXPORT_SYMBOL(interruptible_sleep_on_timeout);
  3396. void __sched sleep_on(wait_queue_head_t *q)
  3397. {
  3398. sleep_on_common(q, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
  3399. }
  3400. EXPORT_SYMBOL(sleep_on);
  3401. long __sched sleep_on_timeout(wait_queue_head_t *q, long timeout)
  3402. {
  3403. return sleep_on_common(q, TASK_UNINTERRUPTIBLE, timeout);
  3404. }
  3405. EXPORT_SYMBOL(sleep_on_timeout);
  3406. #ifdef CONFIG_RT_MUTEXES
  3407. /*
  3408. * rt_mutex_setprio - set the current priority of a task
  3409. * @p: task
  3410. * @prio: prio value (kernel-internal form)
  3411. *
  3412. * This function changes the 'effective' priority of a task. It does
  3413. * not touch ->normal_prio like __setscheduler().
  3414. *
  3415. * Used by the rt_mutex code to implement priority inheritance logic.
  3416. */
  3417. void rt_mutex_setprio(struct task_struct *p, int prio)
  3418. {
  3419. int oldprio, on_rq, running;
  3420. struct rq *rq;
  3421. const struct sched_class *prev_class;
  3422. BUG_ON(prio < 0 || prio > MAX_PRIO);
  3423. rq = __task_rq_lock(p);
  3424. /*
  3425. * Idle task boosting is a nono in general. There is one
  3426. * exception, when PREEMPT_RT and NOHZ is active:
  3427. *
  3428. * The idle task calls get_next_timer_interrupt() and holds
  3429. * the timer wheel base->lock on the CPU and another CPU wants
  3430. * to access the timer (probably to cancel it). We can safely
  3431. * ignore the boosting request, as the idle CPU runs this code
  3432. * with interrupts disabled and will complete the lock
  3433. * protected section without being interrupted. So there is no
  3434. * real need to boost.
  3435. */
  3436. if (unlikely(p == rq->idle)) {
  3437. WARN_ON(p != rq->curr);
  3438. WARN_ON(p->pi_blocked_on);
  3439. goto out_unlock;
  3440. }
  3441. trace_sched_pi_setprio(p, prio);
  3442. oldprio = p->prio;
  3443. prev_class = p->sched_class;
  3444. on_rq = p->on_rq;
  3445. running = task_current(rq, p);
  3446. if (on_rq)
  3447. dequeue_task(rq, p, 0);
  3448. if (running)
  3449. p->sched_class->put_prev_task(rq, p);
  3450. if (rt_prio(prio))
  3451. p->sched_class = &rt_sched_class;
  3452. else
  3453. p->sched_class = &fair_sched_class;
  3454. p->prio = prio;
  3455. if (running)
  3456. p->sched_class->set_curr_task(rq);
  3457. if (on_rq)
  3458. enqueue_task(rq, p, oldprio < prio ? ENQUEUE_HEAD : 0);
  3459. check_class_changed(rq, p, prev_class, oldprio);
  3460. out_unlock:
  3461. __task_rq_unlock(rq);
  3462. }
  3463. #endif
  3464. void set_user_nice(struct task_struct *p, long nice)
  3465. {
  3466. int old_prio, delta, on_rq;
  3467. unsigned long flags;
  3468. struct rq *rq;
  3469. if (TASK_NICE(p) == nice || nice < -20 || nice > 19)
  3470. return;
  3471. /*
  3472. * We have to be careful, if called from sys_setpriority(),
  3473. * the task might be in the middle of scheduling on another CPU.
  3474. */
  3475. rq = task_rq_lock(p, &flags);
  3476. /*
  3477. * The RT priorities are set via sched_setscheduler(), but we still
  3478. * allow the 'normal' nice value to be set - but as expected
  3479. * it wont have any effect on scheduling until the task is
  3480. * SCHED_FIFO/SCHED_RR:
  3481. */
  3482. if (task_has_rt_policy(p)) {
  3483. p->static_prio = NICE_TO_PRIO(nice);
  3484. goto out_unlock;
  3485. }
  3486. on_rq = p->on_rq;
  3487. if (on_rq)
  3488. dequeue_task(rq, p, 0);
  3489. p->static_prio = NICE_TO_PRIO(nice);
  3490. set_load_weight(p);
  3491. old_prio = p->prio;
  3492. p->prio = effective_prio(p);
  3493. delta = p->prio - old_prio;
  3494. if (on_rq) {
  3495. enqueue_task(rq, p, 0);
  3496. /*
  3497. * If the task increased its priority or is running and
  3498. * lowered its priority, then reschedule its CPU:
  3499. */
  3500. if (delta < 0 || (delta > 0 && task_running(rq, p)))
  3501. resched_task(rq->curr);
  3502. }
  3503. out_unlock:
  3504. task_rq_unlock(rq, p, &flags);
  3505. }
  3506. EXPORT_SYMBOL(set_user_nice);
  3507. /*
  3508. * can_nice - check if a task can reduce its nice value
  3509. * @p: task
  3510. * @nice: nice value
  3511. */
  3512. int can_nice(const struct task_struct *p, const int nice)
  3513. {
  3514. /* convert nice value [19,-20] to rlimit style value [1,40] */
  3515. int nice_rlim = 20 - nice;
  3516. return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
  3517. capable(CAP_SYS_NICE));
  3518. }
  3519. #ifdef __ARCH_WANT_SYS_NICE
  3520. /*
  3521. * sys_nice - change the priority of the current process.
  3522. * @increment: priority increment
  3523. *
  3524. * sys_setpriority is a more generic, but much slower function that
  3525. * does similar things.
  3526. */
  3527. SYSCALL_DEFINE1(nice, int, increment)
  3528. {
  3529. long nice, retval;
  3530. /*
  3531. * Setpriority might change our priority at the same moment.
  3532. * We don't have to worry. Conceptually one call occurs first
  3533. * and we have a single winner.
  3534. */
  3535. if (increment < -40)
  3536. increment = -40;
  3537. if (increment > 40)
  3538. increment = 40;
  3539. nice = TASK_NICE(current) + increment;
  3540. if (nice < -20)
  3541. nice = -20;
  3542. if (nice > 19)
  3543. nice = 19;
  3544. if (increment < 0 && !can_nice(current, nice))
  3545. return -EPERM;
  3546. retval = security_task_setnice(current, nice);
  3547. if (retval)
  3548. return retval;
  3549. set_user_nice(current, nice);
  3550. return 0;
  3551. }
  3552. #endif
  3553. /**
  3554. * task_prio - return the priority value of a given task.
  3555. * @p: the task in question.
  3556. *
  3557. * This is the priority value as seen by users in /proc.
  3558. * RT tasks are offset by -200. Normal tasks are centered
  3559. * around 0, value goes from -16 to +15.
  3560. */
  3561. int task_prio(const struct task_struct *p)
  3562. {
  3563. return p->prio - MAX_RT_PRIO;
  3564. }
  3565. /**
  3566. * task_nice - return the nice value of a given task.
  3567. * @p: the task in question.
  3568. */
  3569. int task_nice(const struct task_struct *p)
  3570. {
  3571. return TASK_NICE(p);
  3572. }
  3573. EXPORT_SYMBOL(task_nice);
  3574. /**
  3575. * idle_cpu - is a given cpu idle currently?
  3576. * @cpu: the processor in question.
  3577. */
  3578. int idle_cpu(int cpu)
  3579. {
  3580. struct rq *rq = cpu_rq(cpu);
  3581. if (rq->curr != rq->idle)
  3582. return 0;
  3583. if (rq->nr_running)
  3584. return 0;
  3585. #ifdef CONFIG_SMP
  3586. if (!llist_empty(&rq->wake_list))
  3587. return 0;
  3588. #endif
  3589. return 1;
  3590. }
  3591. /**
  3592. * idle_task - return the idle task for a given cpu.
  3593. * @cpu: the processor in question.
  3594. */
  3595. struct task_struct *idle_task(int cpu)
  3596. {
  3597. return cpu_rq(cpu)->idle;
  3598. }
  3599. /**
  3600. * find_process_by_pid - find a process with a matching PID value.
  3601. * @pid: the pid in question.
  3602. */
  3603. static struct task_struct *find_process_by_pid(pid_t pid)
  3604. {
  3605. return pid ? find_task_by_vpid(pid) : current;
  3606. }
  3607. /* Actually do priority change: must hold rq lock. */
  3608. static void
  3609. __setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
  3610. {
  3611. p->policy = policy;
  3612. p->rt_priority = prio;
  3613. p->normal_prio = normal_prio(p);
  3614. /* we are holding p->pi_lock already */
  3615. p->prio = rt_mutex_getprio(p);
  3616. if (rt_prio(p->prio))
  3617. p->sched_class = &rt_sched_class;
  3618. else
  3619. p->sched_class = &fair_sched_class;
  3620. set_load_weight(p);
  3621. }
  3622. /*
  3623. * check the target process has a UID that matches the current process's
  3624. */
  3625. static bool check_same_owner(struct task_struct *p)
  3626. {
  3627. const struct cred *cred = current_cred(), *pcred;
  3628. bool match;
  3629. rcu_read_lock();
  3630. pcred = __task_cred(p);
  3631. match = (uid_eq(cred->euid, pcred->euid) ||
  3632. uid_eq(cred->euid, pcred->uid));
  3633. rcu_read_unlock();
  3634. return match;
  3635. }
  3636. static int __sched_setscheduler(struct task_struct *p, int policy,
  3637. const struct sched_param *param, bool user)
  3638. {
  3639. int retval, oldprio, oldpolicy = -1, on_rq, running;
  3640. unsigned long flags;
  3641. const struct sched_class *prev_class;
  3642. struct rq *rq;
  3643. int reset_on_fork;
  3644. /* may grab non-irq protected spin_locks */
  3645. BUG_ON(in_interrupt());
  3646. recheck:
  3647. /* double check policy once rq lock held */
  3648. if (policy < 0) {
  3649. reset_on_fork = p->sched_reset_on_fork;
  3650. policy = oldpolicy = p->policy;
  3651. } else {
  3652. reset_on_fork = !!(policy & SCHED_RESET_ON_FORK);
  3653. policy &= ~SCHED_RESET_ON_FORK;
  3654. if (policy != SCHED_FIFO && policy != SCHED_RR &&
  3655. policy != SCHED_NORMAL && policy != SCHED_BATCH &&
  3656. policy != SCHED_IDLE)
  3657. return -EINVAL;
  3658. }
  3659. /*
  3660. * Valid priorities for SCHED_FIFO and SCHED_RR are
  3661. * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL,
  3662. * SCHED_BATCH and SCHED_IDLE is 0.
  3663. */
  3664. if (param->sched_priority < 0 ||
  3665. (p->mm && param->sched_priority > MAX_USER_RT_PRIO-1) ||
  3666. (!p->mm && param->sched_priority > MAX_RT_PRIO-1))
  3667. return -EINVAL;
  3668. if (rt_policy(policy) != (param->sched_priority != 0))
  3669. return -EINVAL;
  3670. /*
  3671. * Allow unprivileged RT tasks to decrease priority:
  3672. */
  3673. if (user && !capable(CAP_SYS_NICE)) {
  3674. if (rt_policy(policy)) {
  3675. unsigned long rlim_rtprio =
  3676. task_rlimit(p, RLIMIT_RTPRIO);
  3677. /* can't set/change the rt policy */
  3678. if (policy != p->policy && !rlim_rtprio)
  3679. return -EPERM;
  3680. /* can't increase priority */
  3681. if (param->sched_priority > p->rt_priority &&
  3682. param->sched_priority > rlim_rtprio)
  3683. return -EPERM;
  3684. }
  3685. /*
  3686. * Treat SCHED_IDLE as nice 20. Only allow a switch to
  3687. * SCHED_NORMAL if the RLIMIT_NICE would normally permit it.
  3688. */
  3689. if (p->policy == SCHED_IDLE && policy != SCHED_IDLE) {
  3690. if (!can_nice(p, TASK_NICE(p)))
  3691. return -EPERM;
  3692. }
  3693. /* can't change other user's priorities */
  3694. if (!check_same_owner(p))
  3695. return -EPERM;
  3696. /* Normal users shall not reset the sched_reset_on_fork flag */
  3697. if (p->sched_reset_on_fork && !reset_on_fork)
  3698. return -EPERM;
  3699. }
  3700. if (user) {
  3701. retval = security_task_setscheduler(p);
  3702. if (retval)
  3703. return retval;
  3704. }
  3705. /*
  3706. * make sure no PI-waiters arrive (or leave) while we are
  3707. * changing the priority of the task:
  3708. *
  3709. * To be able to change p->policy safely, the appropriate
  3710. * runqueue lock must be held.
  3711. */
  3712. rq = task_rq_lock(p, &flags);
  3713. /*
  3714. * Changing the policy of the stop threads its a very bad idea
  3715. */
  3716. if (p == rq->stop) {
  3717. task_rq_unlock(rq, p, &flags);
  3718. return -EINVAL;
  3719. }
  3720. /*
  3721. * If not changing anything there's no need to proceed further:
  3722. */
  3723. if (unlikely(policy == p->policy && (!rt_policy(policy) ||
  3724. param->sched_priority == p->rt_priority))) {
  3725. task_rq_unlock(rq, p, &flags);
  3726. return 0;
  3727. }
  3728. #ifdef CONFIG_RT_GROUP_SCHED
  3729. if (user) {
  3730. /*
  3731. * Do not allow realtime tasks into groups that have no runtime
  3732. * assigned.
  3733. */
  3734. if (rt_bandwidth_enabled() && rt_policy(policy) &&
  3735. task_group(p)->rt_bandwidth.rt_runtime == 0 &&
  3736. !task_group_is_autogroup(task_group(p))) {
  3737. task_rq_unlock(rq, p, &flags);
  3738. return -EPERM;
  3739. }
  3740. }
  3741. #endif
  3742. /* recheck policy now with rq lock held */
  3743. if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
  3744. policy = oldpolicy = -1;
  3745. task_rq_unlock(rq, p, &flags);
  3746. goto recheck;
  3747. }
  3748. on_rq = p->on_rq;
  3749. running = task_current(rq, p);
  3750. if (on_rq)
  3751. dequeue_task(rq, p, 0);
  3752. if (running)
  3753. p->sched_class->put_prev_task(rq, p);
  3754. p->sched_reset_on_fork = reset_on_fork;
  3755. oldprio = p->prio;
  3756. prev_class = p->sched_class;
  3757. __setscheduler(rq, p, policy, param->sched_priority);
  3758. if (running)
  3759. p->sched_class->set_curr_task(rq);
  3760. if (on_rq)
  3761. enqueue_task(rq, p, 0);
  3762. check_class_changed(rq, p, prev_class, oldprio);
  3763. task_rq_unlock(rq, p, &flags);
  3764. rt_mutex_adjust_pi(p);
  3765. return 0;
  3766. }
  3767. /**
  3768. * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
  3769. * @p: the task in question.
  3770. * @policy: new policy.
  3771. * @param: structure containing the new RT priority.
  3772. *
  3773. * NOTE that the task may be already dead.
  3774. */
  3775. int sched_setscheduler(struct task_struct *p, int policy,
  3776. const struct sched_param *param)
  3777. {
  3778. return __sched_setscheduler(p, policy, param, true);
  3779. }
  3780. EXPORT_SYMBOL_GPL(sched_setscheduler);
  3781. /**
  3782. * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
  3783. * @p: the task in question.
  3784. * @policy: new policy.
  3785. * @param: structure containing the new RT priority.
  3786. *
  3787. * Just like sched_setscheduler, only don't bother checking if the
  3788. * current context has permission. For example, this is needed in
  3789. * stop_machine(): we create temporary high priority worker threads,
  3790. * but our caller might not have that capability.
  3791. */
  3792. int sched_setscheduler_nocheck(struct task_struct *p, int policy,
  3793. const struct sched_param *param)
  3794. {
  3795. return __sched_setscheduler(p, policy, param, false);
  3796. }
  3797. static int
  3798. do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
  3799. {
  3800. struct sched_param lparam;
  3801. struct task_struct *p;
  3802. int retval;
  3803. if (!param || pid < 0)
  3804. return -EINVAL;
  3805. if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
  3806. return -EFAULT;
  3807. rcu_read_lock();
  3808. retval = -ESRCH;
  3809. p = find_process_by_pid(pid);
  3810. if (p != NULL)
  3811. retval = sched_setscheduler(p, policy, &lparam);
  3812. rcu_read_unlock();
  3813. return retval;
  3814. }
  3815. /**
  3816. * sys_sched_setscheduler - set/change the scheduler policy and RT priority
  3817. * @pid: the pid in question.
  3818. * @policy: new policy.
  3819. * @param: structure containing the new RT priority.
  3820. */
  3821. SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy,
  3822. struct sched_param __user *, param)
  3823. {
  3824. /* negative values for policy are not valid */
  3825. if (policy < 0)
  3826. return -EINVAL;
  3827. return do_sched_setscheduler(pid, policy, param);
  3828. }
  3829. /**
  3830. * sys_sched_setparam - set/change the RT priority of a thread
  3831. * @pid: the pid in question.
  3832. * @param: structure containing the new RT priority.
  3833. */
  3834. SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
  3835. {
  3836. return do_sched_setscheduler(pid, -1, param);
  3837. }
  3838. /**
  3839. * sys_sched_getscheduler - get the policy (scheduling class) of a thread
  3840. * @pid: the pid in question.
  3841. */
  3842. SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
  3843. {
  3844. struct task_struct *p;
  3845. int retval;
  3846. if (pid < 0)
  3847. return -EINVAL;
  3848. retval = -ESRCH;
  3849. rcu_read_lock();
  3850. p = find_process_by_pid(pid);
  3851. if (p) {
  3852. retval = security_task_getscheduler(p);
  3853. if (!retval)
  3854. retval = p->policy
  3855. | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0);
  3856. }
  3857. rcu_read_unlock();
  3858. return retval;
  3859. }
  3860. /**
  3861. * sys_sched_getparam - get the RT priority of a thread
  3862. * @pid: the pid in question.
  3863. * @param: structure containing the RT priority.
  3864. */
  3865. SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
  3866. {
  3867. struct sched_param lp;
  3868. struct task_struct *p;
  3869. int retval;
  3870. if (!param || pid < 0)
  3871. return -EINVAL;
  3872. rcu_read_lock();
  3873. p = find_process_by_pid(pid);
  3874. retval = -ESRCH;
  3875. if (!p)
  3876. goto out_unlock;
  3877. retval = security_task_getscheduler(p);
  3878. if (retval)
  3879. goto out_unlock;
  3880. lp.sched_priority = p->rt_priority;
  3881. rcu_read_unlock();
  3882. /*
  3883. * This one might sleep, we cannot do it with a spinlock held ...
  3884. */
  3885. retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
  3886. return retval;
  3887. out_unlock:
  3888. rcu_read_unlock();
  3889. return retval;
  3890. }
  3891. long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
  3892. {
  3893. cpumask_var_t cpus_allowed, new_mask;
  3894. struct task_struct *p;
  3895. int retval;
  3896. get_online_cpus();
  3897. rcu_read_lock();
  3898. p = find_process_by_pid(pid);
  3899. if (!p) {
  3900. rcu_read_unlock();
  3901. put_online_cpus();
  3902. return -ESRCH;
  3903. }
  3904. /* Prevent p going away */
  3905. get_task_struct(p);
  3906. rcu_read_unlock();
  3907. if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
  3908. retval = -ENOMEM;
  3909. goto out_put_task;
  3910. }
  3911. if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
  3912. retval = -ENOMEM;
  3913. goto out_free_cpus_allowed;
  3914. }
  3915. retval = -EPERM;
  3916. if (!check_same_owner(p) && !ns_capable(task_user_ns(p), CAP_SYS_NICE))
  3917. goto out_unlock;
  3918. retval = security_task_setscheduler(p);
  3919. if (retval)
  3920. goto out_unlock;
  3921. cpuset_cpus_allowed(p, cpus_allowed);
  3922. cpumask_and(new_mask, in_mask, cpus_allowed);
  3923. again:
  3924. retval = set_cpus_allowed_ptr(p, new_mask);
  3925. if (!retval) {
  3926. cpuset_cpus_allowed(p, cpus_allowed);
  3927. if (!cpumask_subset(new_mask, cpus_allowed)) {
  3928. /*
  3929. * We must have raced with a concurrent cpuset
  3930. * update. Just reset the cpus_allowed to the
  3931. * cpuset's cpus_allowed
  3932. */
  3933. cpumask_copy(new_mask, cpus_allowed);
  3934. goto again;
  3935. }
  3936. }
  3937. out_unlock:
  3938. free_cpumask_var(new_mask);
  3939. out_free_cpus_allowed:
  3940. free_cpumask_var(cpus_allowed);
  3941. out_put_task:
  3942. put_task_struct(p);
  3943. put_online_cpus();
  3944. return retval;
  3945. }
  3946. static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
  3947. struct cpumask *new_mask)
  3948. {
  3949. if (len < cpumask_size())
  3950. cpumask_clear(new_mask);
  3951. else if (len > cpumask_size())
  3952. len = cpumask_size();
  3953. return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
  3954. }
  3955. /**
  3956. * sys_sched_setaffinity - set the cpu affinity of a process
  3957. * @pid: pid of the process
  3958. * @len: length in bytes of the bitmask pointed to by user_mask_ptr
  3959. * @user_mask_ptr: user-space pointer to the new cpu mask
  3960. */
  3961. SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
  3962. unsigned long __user *, user_mask_ptr)
  3963. {
  3964. cpumask_var_t new_mask;
  3965. int retval;
  3966. if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
  3967. return -ENOMEM;
  3968. retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
  3969. if (retval == 0)
  3970. retval = sched_setaffinity(pid, new_mask);
  3971. free_cpumask_var(new_mask);
  3972. return retval;
  3973. }
  3974. long sched_getaffinity(pid_t pid, struct cpumask *mask)
  3975. {
  3976. struct task_struct *p;
  3977. unsigned long flags;
  3978. int retval;
  3979. get_online_cpus();
  3980. rcu_read_lock();
  3981. retval = -ESRCH;
  3982. p = find_process_by_pid(pid);
  3983. if (!p)
  3984. goto out_unlock;
  3985. retval = security_task_getscheduler(p);
  3986. if (retval)
  3987. goto out_unlock;
  3988. raw_spin_lock_irqsave(&p->pi_lock, flags);
  3989. cpumask_and(mask, &p->cpus_allowed, cpu_online_mask);
  3990. raw_spin_unlock_irqrestore(&p->pi_lock, flags);
  3991. out_unlock:
  3992. rcu_read_unlock();
  3993. put_online_cpus();
  3994. return retval;
  3995. }
  3996. /**
  3997. * sys_sched_getaffinity - get the cpu affinity of a process
  3998. * @pid: pid of the process
  3999. * @len: length in bytes of the bitmask pointed to by user_mask_ptr
  4000. * @user_mask_ptr: user-space pointer to hold the current cpu mask
  4001. */
  4002. SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
  4003. unsigned long __user *, user_mask_ptr)
  4004. {
  4005. int ret;
  4006. cpumask_var_t mask;
  4007. if ((len * BITS_PER_BYTE) < nr_cpu_ids)
  4008. return -EINVAL;
  4009. if (len & (sizeof(unsigned long)-1))
  4010. return -EINVAL;
  4011. if (!alloc_cpumask_var(&mask, GFP_KERNEL))
  4012. return -ENOMEM;
  4013. ret = sched_getaffinity(pid, mask);
  4014. if (ret == 0) {
  4015. size_t retlen = min_t(size_t, len, cpumask_size());
  4016. if (copy_to_user(user_mask_ptr, mask, retlen))
  4017. ret = -EFAULT;
  4018. else
  4019. ret = retlen;
  4020. }
  4021. free_cpumask_var(mask);
  4022. return ret;
  4023. }
  4024. /**
  4025. * sys_sched_yield - yield the current processor to other threads.
  4026. *
  4027. * This function yields the current CPU to other tasks. If there are no
  4028. * other threads running on this CPU then this function will return.
  4029. */
  4030. SYSCALL_DEFINE0(sched_yield)
  4031. {
  4032. struct rq *rq = this_rq_lock();
  4033. schedstat_inc(rq, yld_count);
  4034. current->sched_class->yield_task(rq);
  4035. /*
  4036. * Since we are going to call schedule() anyway, there's
  4037. * no need to preempt or enable interrupts:
  4038. */
  4039. __release(rq->lock);
  4040. spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
  4041. do_raw_spin_unlock(&rq->lock);
  4042. sched_preempt_enable_no_resched();
  4043. schedule();
  4044. return 0;
  4045. }
  4046. static inline int should_resched(void)
  4047. {
  4048. return need_resched() && !(preempt_count() & PREEMPT_ACTIVE);
  4049. }
  4050. static void __cond_resched(void)
  4051. {
  4052. add_preempt_count(PREEMPT_ACTIVE);
  4053. __schedule();
  4054. sub_preempt_count(PREEMPT_ACTIVE);
  4055. }
  4056. int __sched _cond_resched(void)
  4057. {
  4058. if (should_resched()) {
  4059. __cond_resched();
  4060. return 1;
  4061. }
  4062. return 0;
  4063. }
  4064. EXPORT_SYMBOL(_cond_resched);
  4065. /*
  4066. * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
  4067. * call schedule, and on return reacquire the lock.
  4068. *
  4069. * This works OK both with and without CONFIG_PREEMPT. We do strange low-level
  4070. * operations here to prevent schedule() from being called twice (once via
  4071. * spin_unlock(), once by hand).
  4072. */
  4073. int __cond_resched_lock(spinlock_t *lock)
  4074. {
  4075. int resched = should_resched();
  4076. int ret = 0;
  4077. lockdep_assert_held(lock);
  4078. if (spin_needbreak(lock) || resched) {
  4079. spin_unlock(lock);
  4080. if (resched)
  4081. __cond_resched();
  4082. else
  4083. cpu_relax();
  4084. ret = 1;
  4085. spin_lock(lock);
  4086. }
  4087. return ret;
  4088. }
  4089. EXPORT_SYMBOL(__cond_resched_lock);
  4090. int __sched __cond_resched_softirq(void)
  4091. {
  4092. BUG_ON(!in_softirq());
  4093. if (should_resched()) {
  4094. local_bh_enable();
  4095. __cond_resched();
  4096. local_bh_disable();
  4097. return 1;
  4098. }
  4099. return 0;
  4100. }
  4101. EXPORT_SYMBOL(__cond_resched_softirq);
  4102. /**
  4103. * yield - yield the current processor to other threads.
  4104. *
  4105. * Do not ever use this function, there's a 99% chance you're doing it wrong.
  4106. *
  4107. * The scheduler is at all times free to pick the calling task as the most
  4108. * eligible task to run, if removing the yield() call from your code breaks
  4109. * it, its already broken.
  4110. *
  4111. * Typical broken usage is:
  4112. *
  4113. * while (!event)
  4114. * yield();
  4115. *
  4116. * where one assumes that yield() will let 'the other' process run that will
  4117. * make event true. If the current task is a SCHED_FIFO task that will never
  4118. * happen. Never use yield() as a progress guarantee!!
  4119. *
  4120. * If you want to use yield() to wait for something, use wait_event().
  4121. * If you want to use yield() to be 'nice' for others, use cond_resched().
  4122. * If you still want to use yield(), do not!
  4123. */
  4124. void __sched yield(void)
  4125. {
  4126. set_current_state(TASK_RUNNING);
  4127. sys_sched_yield();
  4128. }
  4129. EXPORT_SYMBOL(yield);
  4130. /**
  4131. * yield_to - yield the current processor to another thread in
  4132. * your thread group, or accelerate that thread toward the
  4133. * processor it's on.
  4134. * @p: target task
  4135. * @preempt: whether task preemption is allowed or not
  4136. *
  4137. * It's the caller's job to ensure that the target task struct
  4138. * can't go away on us before we can do any checks.
  4139. *
  4140. * Returns true if we indeed boosted the target task.
  4141. */
  4142. bool __sched yield_to(struct task_struct *p, bool preempt)
  4143. {
  4144. struct task_struct *curr = current;
  4145. struct rq *rq, *p_rq;
  4146. unsigned long flags;
  4147. bool yielded = 0;
  4148. local_irq_save(flags);
  4149. rq = this_rq();
  4150. again:
  4151. p_rq = task_rq(p);
  4152. double_rq_lock(rq, p_rq);
  4153. while (task_rq(p) != p_rq) {
  4154. double_rq_unlock(rq, p_rq);
  4155. goto again;
  4156. }
  4157. if (!curr->sched_class->yield_to_task)
  4158. goto out;
  4159. if (curr->sched_class != p->sched_class)
  4160. goto out;
  4161. if (task_running(p_rq, p) || p->state)
  4162. goto out;
  4163. yielded = curr->sched_class->yield_to_task(rq, p, preempt);
  4164. if (yielded) {
  4165. schedstat_inc(rq, yld_count);
  4166. /*
  4167. * Make p's CPU reschedule; pick_next_entity takes care of
  4168. * fairness.
  4169. */
  4170. if (preempt && rq != p_rq)
  4171. resched_task(p_rq->curr);
  4172. } else {
  4173. /*
  4174. * We might have set it in task_yield_fair(), but are
  4175. * not going to schedule(), so don't want to skip
  4176. * the next update.
  4177. */
  4178. rq->skip_clock_update = 0;
  4179. }
  4180. out:
  4181. double_rq_unlock(rq, p_rq);
  4182. local_irq_restore(flags);
  4183. if (yielded)
  4184. schedule();
  4185. return yielded;
  4186. }
  4187. EXPORT_SYMBOL_GPL(yield_to);
  4188. /*
  4189. * This task is about to go to sleep on IO. Increment rq->nr_iowait so
  4190. * that process accounting knows that this is a task in IO wait state.
  4191. */
  4192. void __sched io_schedule(void)
  4193. {
  4194. struct rq *rq = raw_rq();
  4195. delayacct_blkio_start();
  4196. atomic_inc(&rq->nr_iowait);
  4197. blk_flush_plug(current);
  4198. current->in_iowait = 1;
  4199. schedule();
  4200. current->in_iowait = 0;
  4201. atomic_dec(&rq->nr_iowait);
  4202. delayacct_blkio_end();
  4203. }
  4204. EXPORT_SYMBOL(io_schedule);
  4205. long __sched io_schedule_timeout(long timeout)
  4206. {
  4207. struct rq *rq = raw_rq();
  4208. long ret;
  4209. delayacct_blkio_start();
  4210. atomic_inc(&rq->nr_iowait);
  4211. blk_flush_plug(current);
  4212. current->in_iowait = 1;
  4213. ret = schedule_timeout(timeout);
  4214. current->in_iowait = 0;
  4215. atomic_dec(&rq->nr_iowait);
  4216. delayacct_blkio_end();
  4217. return ret;
  4218. }
  4219. /**
  4220. * sys_sched_get_priority_max - return maximum RT priority.
  4221. * @policy: scheduling class.
  4222. *
  4223. * this syscall returns the maximum rt_priority that can be used
  4224. * by a given scheduling class.
  4225. */
  4226. SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
  4227. {
  4228. int ret = -EINVAL;
  4229. switch (policy) {
  4230. case SCHED_FIFO:
  4231. case SCHED_RR:
  4232. ret = MAX_USER_RT_PRIO-1;
  4233. break;
  4234. case SCHED_NORMAL:
  4235. case SCHED_BATCH:
  4236. case SCHED_IDLE:
  4237. ret = 0;
  4238. break;
  4239. }
  4240. return ret;
  4241. }
  4242. /**
  4243. * sys_sched_get_priority_min - return minimum RT priority.
  4244. * @policy: scheduling class.
  4245. *
  4246. * this syscall returns the minimum rt_priority that can be used
  4247. * by a given scheduling class.
  4248. */
  4249. SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
  4250. {
  4251. int ret = -EINVAL;
  4252. switch (policy) {
  4253. case SCHED_FIFO:
  4254. case SCHED_RR:
  4255. ret = 1;
  4256. break;
  4257. case SCHED_NORMAL:
  4258. case SCHED_BATCH:
  4259. case SCHED_IDLE:
  4260. ret = 0;
  4261. }
  4262. return ret;
  4263. }
  4264. /**
  4265. * sys_sched_rr_get_interval - return the default timeslice of a process.
  4266. * @pid: pid of the process.
  4267. * @interval: userspace pointer to the timeslice value.
  4268. *
  4269. * this syscall writes the default timeslice value of a given process
  4270. * into the user-space timespec buffer. A value of '0' means infinity.
  4271. */
  4272. SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
  4273. struct timespec __user *, interval)
  4274. {
  4275. struct task_struct *p;
  4276. unsigned int time_slice;
  4277. unsigned long flags;
  4278. struct rq *rq;
  4279. int retval;
  4280. struct timespec t;
  4281. if (pid < 0)
  4282. return -EINVAL;
  4283. retval = -ESRCH;
  4284. rcu_read_lock();
  4285. p = find_process_by_pid(pid);
  4286. if (!p)
  4287. goto out_unlock;
  4288. retval = security_task_getscheduler(p);
  4289. if (retval)
  4290. goto out_unlock;
  4291. rq = task_rq_lock(p, &flags);
  4292. time_slice = p->sched_class->get_rr_interval(rq, p);
  4293. task_rq_unlock(rq, p, &flags);
  4294. rcu_read_unlock();
  4295. jiffies_to_timespec(time_slice, &t);
  4296. retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
  4297. return retval;
  4298. out_unlock:
  4299. rcu_read_unlock();
  4300. return retval;
  4301. }
  4302. static const char stat_nam[] = TASK_STATE_TO_CHAR_STR;
  4303. void sched_show_task(struct task_struct *p)
  4304. {
  4305. unsigned long free = 0;
  4306. unsigned state;
  4307. state = p->state ? __ffs(p->state) + 1 : 0;
  4308. printk(KERN_INFO "%-15.15s %c", p->comm,
  4309. state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
  4310. #if BITS_PER_LONG == 32
  4311. if (state == TASK_RUNNING)
  4312. printk(KERN_CONT " running ");
  4313. else
  4314. printk(KERN_CONT " %08lx ", thread_saved_pc(p));
  4315. #else
  4316. if (state == TASK_RUNNING)
  4317. printk(KERN_CONT " running task ");
  4318. else
  4319. printk(KERN_CONT " %016lx ", thread_saved_pc(p));
  4320. #endif
  4321. #ifdef CONFIG_DEBUG_STACK_USAGE
  4322. free = stack_not_used(p);
  4323. #endif
  4324. printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free,
  4325. task_pid_nr(p), task_pid_nr(rcu_dereference(p->real_parent)),
  4326. (unsigned long)task_thread_info(p)->flags);
  4327. show_stack(p, NULL);
  4328. }
  4329. void show_state_filter(unsigned long state_filter)
  4330. {
  4331. struct task_struct *g, *p;
  4332. #if BITS_PER_LONG == 32
  4333. printk(KERN_INFO
  4334. " task PC stack pid father\n");
  4335. #else
  4336. printk(KERN_INFO
  4337. " task PC stack pid father\n");
  4338. #endif
  4339. rcu_read_lock();
  4340. do_each_thread(g, p) {
  4341. /*
  4342. * reset the NMI-timeout, listing all files on a slow
  4343. * console might take a lot of time:
  4344. */
  4345. touch_nmi_watchdog();
  4346. if (!state_filter || (p->state & state_filter))
  4347. sched_show_task(p);
  4348. } while_each_thread(g, p);
  4349. touch_all_softlockup_watchdogs();
  4350. #ifdef CONFIG_SCHED_DEBUG
  4351. sysrq_sched_debug_show();
  4352. #endif
  4353. rcu_read_unlock();
  4354. /*
  4355. * Only show locks if all tasks are dumped:
  4356. */
  4357. if (!state_filter)
  4358. debug_show_all_locks();
  4359. }
  4360. void __cpuinit init_idle_bootup_task(struct task_struct *idle)
  4361. {
  4362. idle->sched_class = &idle_sched_class;
  4363. }
  4364. /**
  4365. * init_idle - set up an idle thread for a given CPU
  4366. * @idle: task in question
  4367. * @cpu: cpu the idle task belongs to
  4368. *
  4369. * NOTE: this function does not set the idle thread's NEED_RESCHED
  4370. * flag, to make booting more robust.
  4371. */
  4372. void __cpuinit init_idle(struct task_struct *idle, int cpu)
  4373. {
  4374. struct rq *rq = cpu_rq(cpu);
  4375. unsigned long flags;
  4376. raw_spin_lock_irqsave(&rq->lock, flags);
  4377. __sched_fork(idle);
  4378. idle->state = TASK_RUNNING;
  4379. idle->se.exec_start = sched_clock();
  4380. do_set_cpus_allowed(idle, cpumask_of(cpu));
  4381. /*
  4382. * We're having a chicken and egg problem, even though we are
  4383. * holding rq->lock, the cpu isn't yet set to this cpu so the
  4384. * lockdep check in task_group() will fail.
  4385. *
  4386. * Similar case to sched_fork(). / Alternatively we could
  4387. * use task_rq_lock() here and obtain the other rq->lock.
  4388. *
  4389. * Silence PROVE_RCU
  4390. */
  4391. rcu_read_lock();
  4392. __set_task_cpu(idle, cpu);
  4393. rcu_read_unlock();
  4394. rq->curr = rq->idle = idle;
  4395. #if defined(CONFIG_SMP)
  4396. idle->on_cpu = 1;
  4397. #endif
  4398. raw_spin_unlock_irqrestore(&rq->lock, flags);
  4399. /* Set the preempt count _outside_ the spinlocks! */
  4400. task_thread_info(idle)->preempt_count = 0;
  4401. /*
  4402. * The idle tasks have their own, simple scheduling class:
  4403. */
  4404. idle->sched_class = &idle_sched_class;
  4405. ftrace_graph_init_idle_task(idle, cpu);
  4406. #if defined(CONFIG_SMP)
  4407. sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
  4408. #endif
  4409. }
  4410. #ifdef CONFIG_SMP
  4411. void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
  4412. {
  4413. if (p->sched_class && p->sched_class->set_cpus_allowed)
  4414. p->sched_class->set_cpus_allowed(p, new_mask);
  4415. cpumask_copy(&p->cpus_allowed, new_mask);
  4416. p->nr_cpus_allowed = cpumask_weight(new_mask);
  4417. }
  4418. /*
  4419. * This is how migration works:
  4420. *
  4421. * 1) we invoke migration_cpu_stop() on the target CPU using
  4422. * stop_one_cpu().
  4423. * 2) stopper starts to run (implicitly forcing the migrated thread
  4424. * off the CPU)
  4425. * 3) it checks whether the migrated task is still in the wrong runqueue.
  4426. * 4) if it's in the wrong runqueue then the migration thread removes
  4427. * it and puts it into the right queue.
  4428. * 5) stopper completes and stop_one_cpu() returns and the migration
  4429. * is done.
  4430. */
  4431. /*
  4432. * Change a given task's CPU affinity. Migrate the thread to a
  4433. * proper CPU and schedule it away if the CPU it's executing on
  4434. * is removed from the allowed bitmask.
  4435. *
  4436. * NOTE: the caller must have a valid reference to the task, the
  4437. * task must not exit() & deallocate itself prematurely. The
  4438. * call is not atomic; no spinlocks may be held.
  4439. */
  4440. int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
  4441. {
  4442. unsigned long flags;
  4443. struct rq *rq;
  4444. unsigned int dest_cpu;
  4445. int ret = 0;
  4446. rq = task_rq_lock(p, &flags);
  4447. if (cpumask_equal(&p->cpus_allowed, new_mask))
  4448. goto out;
  4449. if (!cpumask_intersects(new_mask, cpu_active_mask)) {
  4450. ret = -EINVAL;
  4451. goto out;
  4452. }
  4453. if (unlikely((p->flags & PF_THREAD_BOUND) && p != current)) {
  4454. ret = -EINVAL;
  4455. goto out;
  4456. }
  4457. do_set_cpus_allowed(p, new_mask);
  4458. /* Can the task run on the task's current CPU? If so, we're done */
  4459. if (cpumask_test_cpu(task_cpu(p), new_mask))
  4460. goto out;
  4461. dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
  4462. if (p->on_rq) {
  4463. struct migration_arg arg = { p, dest_cpu };
  4464. /* Need help from migration thread: drop lock and wait. */
  4465. task_rq_unlock(rq, p, &flags);
  4466. stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
  4467. tlb_migrate_finish(p->mm);
  4468. return 0;
  4469. }
  4470. out:
  4471. task_rq_unlock(rq, p, &flags);
  4472. return ret;
  4473. }
  4474. EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
  4475. /*
  4476. * Move (not current) task off this cpu, onto dest cpu. We're doing
  4477. * this because either it can't run here any more (set_cpus_allowed()
  4478. * away from this CPU, or CPU going down), or because we're
  4479. * attempting to rebalance this task on exec (sched_exec).
  4480. *
  4481. * So we race with normal scheduler movements, but that's OK, as long
  4482. * as the task is no longer on this CPU.
  4483. *
  4484. * Returns non-zero if task was successfully migrated.
  4485. */
  4486. static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
  4487. {
  4488. struct rq *rq_dest, *rq_src;
  4489. int ret = 0;
  4490. if (unlikely(!cpu_active(dest_cpu)))
  4491. return ret;
  4492. rq_src = cpu_rq(src_cpu);
  4493. rq_dest = cpu_rq(dest_cpu);
  4494. raw_spin_lock(&p->pi_lock);
  4495. double_rq_lock(rq_src, rq_dest);
  4496. /* Already moved. */
  4497. if (task_cpu(p) != src_cpu)
  4498. goto done;
  4499. /* Affinity changed (again). */
  4500. if (!cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
  4501. goto fail;
  4502. /*
  4503. * If we're not on a rq, the next wake-up will ensure we're
  4504. * placed properly.
  4505. */
  4506. if (p->on_rq) {
  4507. dequeue_task(rq_src, p, 0);
  4508. set_task_cpu(p, dest_cpu);
  4509. enqueue_task(rq_dest, p, 0);
  4510. check_preempt_curr(rq_dest, p, 0);
  4511. }
  4512. done:
  4513. ret = 1;
  4514. fail:
  4515. double_rq_unlock(rq_src, rq_dest);
  4516. raw_spin_unlock(&p->pi_lock);
  4517. return ret;
  4518. }
  4519. /*
  4520. * migration_cpu_stop - this will be executed by a highprio stopper thread
  4521. * and performs thread migration by bumping thread off CPU then
  4522. * 'pushing' onto another runqueue.
  4523. */
  4524. static int migration_cpu_stop(void *data)
  4525. {
  4526. struct migration_arg *arg = data;
  4527. /*
  4528. * The original target cpu might have gone down and we might
  4529. * be on another cpu but it doesn't matter.
  4530. */
  4531. local_irq_disable();
  4532. __migrate_task(arg->task, raw_smp_processor_id(), arg->dest_cpu);
  4533. local_irq_enable();
  4534. return 0;
  4535. }
  4536. #ifdef CONFIG_HOTPLUG_CPU
  4537. /*
  4538. * Ensures that the idle task is using init_mm right before its cpu goes
  4539. * offline.
  4540. */
  4541. void idle_task_exit(void)
  4542. {
  4543. struct mm_struct *mm = current->active_mm;
  4544. BUG_ON(cpu_online(smp_processor_id()));
  4545. if (mm != &init_mm)
  4546. switch_mm(mm, &init_mm, current);
  4547. mmdrop(mm);
  4548. }
  4549. /*
  4550. * Since this CPU is going 'away' for a while, fold any nr_active delta
  4551. * we might have. Assumes we're called after migrate_tasks() so that the
  4552. * nr_active count is stable.
  4553. *
  4554. * Also see the comment "Global load-average calculations".
  4555. */
  4556. static void calc_load_migrate(struct rq *rq)
  4557. {
  4558. long delta = calc_load_fold_active(rq);
  4559. if (delta)
  4560. atomic_long_add(delta, &calc_load_tasks);
  4561. }
  4562. /*
  4563. * Migrate all tasks from the rq, sleeping tasks will be migrated by
  4564. * try_to_wake_up()->select_task_rq().
  4565. *
  4566. * Called with rq->lock held even though we'er in stop_machine() and
  4567. * there's no concurrency possible, we hold the required locks anyway
  4568. * because of lock validation efforts.
  4569. */
  4570. static void migrate_tasks(unsigned int dead_cpu)
  4571. {
  4572. struct rq *rq = cpu_rq(dead_cpu);
  4573. struct task_struct *next, *stop = rq->stop;
  4574. int dest_cpu;
  4575. /*
  4576. * Fudge the rq selection such that the below task selection loop
  4577. * doesn't get stuck on the currently eligible stop task.
  4578. *
  4579. * We're currently inside stop_machine() and the rq is either stuck
  4580. * in the stop_machine_cpu_stop() loop, or we're executing this code,
  4581. * either way we should never end up calling schedule() until we're
  4582. * done here.
  4583. */
  4584. rq->stop = NULL;
  4585. for ( ; ; ) {
  4586. /*
  4587. * There's this thread running, bail when that's the only
  4588. * remaining thread.
  4589. */
  4590. if (rq->nr_running == 1)
  4591. break;
  4592. next = pick_next_task(rq);
  4593. BUG_ON(!next);
  4594. next->sched_class->put_prev_task(rq, next);
  4595. /* Find suitable destination for @next, with force if needed. */
  4596. dest_cpu = select_fallback_rq(dead_cpu, next);
  4597. raw_spin_unlock(&rq->lock);
  4598. __migrate_task(next, dead_cpu, dest_cpu);
  4599. raw_spin_lock(&rq->lock);
  4600. }
  4601. rq->stop = stop;
  4602. }
  4603. #endif /* CONFIG_HOTPLUG_CPU */
  4604. #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
  4605. static struct ctl_table sd_ctl_dir[] = {
  4606. {
  4607. .procname = "sched_domain",
  4608. .mode = 0555,
  4609. },
  4610. {}
  4611. };
  4612. static struct ctl_table sd_ctl_root[] = {
  4613. {
  4614. .procname = "kernel",
  4615. .mode = 0555,
  4616. .child = sd_ctl_dir,
  4617. },
  4618. {}
  4619. };
  4620. static struct ctl_table *sd_alloc_ctl_entry(int n)
  4621. {
  4622. struct ctl_table *entry =
  4623. kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
  4624. return entry;
  4625. }
  4626. static void sd_free_ctl_entry(struct ctl_table **tablep)
  4627. {
  4628. struct ctl_table *entry;
  4629. /*
  4630. * In the intermediate directories, both the child directory and
  4631. * procname are dynamically allocated and could fail but the mode
  4632. * will always be set. In the lowest directory the names are
  4633. * static strings and all have proc handlers.
  4634. */
  4635. for (entry = *tablep; entry->mode; entry++) {
  4636. if (entry->child)
  4637. sd_free_ctl_entry(&entry->child);
  4638. if (entry->proc_handler == NULL)
  4639. kfree(entry->procname);
  4640. }
  4641. kfree(*tablep);
  4642. *tablep = NULL;
  4643. }
  4644. static void
  4645. set_table_entry(struct ctl_table *entry,
  4646. const char *procname, void *data, int maxlen,
  4647. umode_t mode, proc_handler *proc_handler)
  4648. {
  4649. entry->procname = procname;
  4650. entry->data = data;
  4651. entry->maxlen = maxlen;
  4652. entry->mode = mode;
  4653. entry->proc_handler = proc_handler;
  4654. }
  4655. static struct ctl_table *
  4656. sd_alloc_ctl_domain_table(struct sched_domain *sd)
  4657. {
  4658. struct ctl_table *table = sd_alloc_ctl_entry(13);
  4659. if (table == NULL)
  4660. return NULL;
  4661. set_table_entry(&table[0], "min_interval", &sd->min_interval,
  4662. sizeof(long), 0644, proc_doulongvec_minmax);
  4663. set_table_entry(&table[1], "max_interval", &sd->max_interval,
  4664. sizeof(long), 0644, proc_doulongvec_minmax);
  4665. set_table_entry(&table[2], "busy_idx", &sd->busy_idx,
  4666. sizeof(int), 0644, proc_dointvec_minmax);
  4667. set_table_entry(&table[3], "idle_idx", &sd->idle_idx,
  4668. sizeof(int), 0644, proc_dointvec_minmax);
  4669. set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx,
  4670. sizeof(int), 0644, proc_dointvec_minmax);
  4671. set_table_entry(&table[5], "wake_idx", &sd->wake_idx,
  4672. sizeof(int), 0644, proc_dointvec_minmax);
  4673. set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx,
  4674. sizeof(int), 0644, proc_dointvec_minmax);
  4675. set_table_entry(&table[7], "busy_factor", &sd->busy_factor,
  4676. sizeof(int), 0644, proc_dointvec_minmax);
  4677. set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct,
  4678. sizeof(int), 0644, proc_dointvec_minmax);
  4679. set_table_entry(&table[9], "cache_nice_tries",
  4680. &sd->cache_nice_tries,
  4681. sizeof(int), 0644, proc_dointvec_minmax);
  4682. set_table_entry(&table[10], "flags", &sd->flags,
  4683. sizeof(int), 0644, proc_dointvec_minmax);
  4684. set_table_entry(&table[11], "name", sd->name,
  4685. CORENAME_MAX_SIZE, 0444, proc_dostring);
  4686. /* &table[12] is terminator */
  4687. return table;
  4688. }
  4689. static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
  4690. {
  4691. struct ctl_table *entry, *table;
  4692. struct sched_domain *sd;
  4693. int domain_num = 0, i;
  4694. char buf[32];
  4695. for_each_domain(cpu, sd)
  4696. domain_num++;
  4697. entry = table = sd_alloc_ctl_entry(domain_num + 1);
  4698. if (table == NULL)
  4699. return NULL;
  4700. i = 0;
  4701. for_each_domain(cpu, sd) {
  4702. snprintf(buf, 32, "domain%d", i);
  4703. entry->procname = kstrdup(buf, GFP_KERNEL);
  4704. entry->mode = 0555;
  4705. entry->child = sd_alloc_ctl_domain_table(sd);
  4706. entry++;
  4707. i++;
  4708. }
  4709. return table;
  4710. }
  4711. static struct ctl_table_header *sd_sysctl_header;
  4712. static void register_sched_domain_sysctl(void)
  4713. {
  4714. int i, cpu_num = num_possible_cpus();
  4715. struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
  4716. char buf[32];
  4717. WARN_ON(sd_ctl_dir[0].child);
  4718. sd_ctl_dir[0].child = entry;
  4719. if (entry == NULL)
  4720. return;
  4721. for_each_possible_cpu(i) {
  4722. snprintf(buf, 32, "cpu%d", i);
  4723. entry->procname = kstrdup(buf, GFP_KERNEL);
  4724. entry->mode = 0555;
  4725. entry->child = sd_alloc_ctl_cpu_table(i);
  4726. entry++;
  4727. }
  4728. WARN_ON(sd_sysctl_header);
  4729. sd_sysctl_header = register_sysctl_table(sd_ctl_root);
  4730. }
  4731. /* may be called multiple times per register */
  4732. static void unregister_sched_domain_sysctl(void)
  4733. {
  4734. if (sd_sysctl_header)
  4735. unregister_sysctl_table(sd_sysctl_header);
  4736. sd_sysctl_header = NULL;
  4737. if (sd_ctl_dir[0].child)
  4738. sd_free_ctl_entry(&sd_ctl_dir[0].child);
  4739. }
  4740. #else
  4741. static void register_sched_domain_sysctl(void)
  4742. {
  4743. }
  4744. static void unregister_sched_domain_sysctl(void)
  4745. {
  4746. }
  4747. #endif
  4748. static void set_rq_online(struct rq *rq)
  4749. {
  4750. if (!rq->online) {
  4751. const struct sched_class *class;
  4752. cpumask_set_cpu(rq->cpu, rq->rd->online);
  4753. rq->online = 1;
  4754. for_each_class(class) {
  4755. if (class->rq_online)
  4756. class->rq_online(rq);
  4757. }
  4758. }
  4759. }
  4760. static void set_rq_offline(struct rq *rq)
  4761. {
  4762. if (rq->online) {
  4763. const struct sched_class *class;
  4764. for_each_class(class) {
  4765. if (class->rq_offline)
  4766. class->rq_offline(rq);
  4767. }
  4768. cpumask_clear_cpu(rq->cpu, rq->rd->online);
  4769. rq->online = 0;
  4770. }
  4771. }
  4772. /*
  4773. * migration_call - callback that gets triggered when a CPU is added.
  4774. * Here we can start up the necessary migration thread for the new CPU.
  4775. */
  4776. static int __cpuinit
  4777. migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
  4778. {
  4779. int cpu = (long)hcpu;
  4780. unsigned long flags;
  4781. struct rq *rq = cpu_rq(cpu);
  4782. switch (action & ~CPU_TASKS_FROZEN) {
  4783. case CPU_UP_PREPARE:
  4784. rq->calc_load_update = calc_load_update;
  4785. break;
  4786. case CPU_ONLINE:
  4787. /* Update our root-domain */
  4788. raw_spin_lock_irqsave(&rq->lock, flags);
  4789. if (rq->rd) {
  4790. BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
  4791. set_rq_online(rq);
  4792. }
  4793. raw_spin_unlock_irqrestore(&rq->lock, flags);
  4794. break;
  4795. #ifdef CONFIG_HOTPLUG_CPU
  4796. case CPU_DYING:
  4797. sched_ttwu_pending();
  4798. /* Update our root-domain */
  4799. raw_spin_lock_irqsave(&rq->lock, flags);
  4800. if (rq->rd) {
  4801. BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
  4802. set_rq_offline(rq);
  4803. }
  4804. migrate_tasks(cpu);
  4805. BUG_ON(rq->nr_running != 1); /* the migration thread */
  4806. raw_spin_unlock_irqrestore(&rq->lock, flags);
  4807. break;
  4808. case CPU_DEAD:
  4809. calc_load_migrate(rq);
  4810. break;
  4811. #endif
  4812. }
  4813. update_max_interval();
  4814. return NOTIFY_OK;
  4815. }
  4816. /*
  4817. * Register at high priority so that task migration (migrate_all_tasks)
  4818. * happens before everything else. This has to be lower priority than
  4819. * the notifier in the perf_event subsystem, though.
  4820. */
  4821. static struct notifier_block __cpuinitdata migration_notifier = {
  4822. .notifier_call = migration_call,
  4823. .priority = CPU_PRI_MIGRATION,
  4824. };
  4825. static int __cpuinit sched_cpu_active(struct notifier_block *nfb,
  4826. unsigned long action, void *hcpu)
  4827. {
  4828. switch (action & ~CPU_TASKS_FROZEN) {
  4829. case CPU_STARTING:
  4830. case CPU_DOWN_FAILED:
  4831. set_cpu_active((long)hcpu, true);
  4832. return NOTIFY_OK;
  4833. default:
  4834. return NOTIFY_DONE;
  4835. }
  4836. }
  4837. static int __cpuinit sched_cpu_inactive(struct notifier_block *nfb,
  4838. unsigned long action, void *hcpu)
  4839. {
  4840. switch (action & ~CPU_TASKS_FROZEN) {
  4841. case CPU_DOWN_PREPARE:
  4842. set_cpu_active((long)hcpu, false);
  4843. return NOTIFY_OK;
  4844. default:
  4845. return NOTIFY_DONE;
  4846. }
  4847. }
  4848. static int __init migration_init(void)
  4849. {
  4850. void *cpu = (void *)(long)smp_processor_id();
  4851. int err;
  4852. /* Initialize migration for the boot CPU */
  4853. err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu);
  4854. BUG_ON(err == NOTIFY_BAD);
  4855. migration_call(&migration_notifier, CPU_ONLINE, cpu);
  4856. register_cpu_notifier(&migration_notifier);
  4857. /* Register cpu active notifiers */
  4858. cpu_notifier(sched_cpu_active, CPU_PRI_SCHED_ACTIVE);
  4859. cpu_notifier(sched_cpu_inactive, CPU_PRI_SCHED_INACTIVE);
  4860. return 0;
  4861. }
  4862. early_initcall(migration_init);
  4863. #endif
  4864. #ifdef CONFIG_SMP
  4865. static cpumask_var_t sched_domains_tmpmask; /* sched_domains_mutex */
  4866. #ifdef CONFIG_SCHED_DEBUG
  4867. static __read_mostly int sched_debug_enabled;
  4868. static int __init sched_debug_setup(char *str)
  4869. {
  4870. sched_debug_enabled = 1;
  4871. return 0;
  4872. }
  4873. early_param("sched_debug", sched_debug_setup);
  4874. static inline bool sched_debug(void)
  4875. {
  4876. return sched_debug_enabled;
  4877. }
  4878. static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
  4879. struct cpumask *groupmask)
  4880. {
  4881. struct sched_group *group = sd->groups;
  4882. char str[256];
  4883. cpulist_scnprintf(str, sizeof(str), sched_domain_span(sd));
  4884. cpumask_clear(groupmask);
  4885. printk(KERN_DEBUG "%*s domain %d: ", level, "", level);
  4886. if (!(sd->flags & SD_LOAD_BALANCE)) {
  4887. printk("does not load-balance\n");
  4888. if (sd->parent)
  4889. printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain"
  4890. " has parent");
  4891. return -1;
  4892. }
  4893. printk(KERN_CONT "span %s level %s\n", str, sd->name);
  4894. if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
  4895. printk(KERN_ERR "ERROR: domain->span does not contain "
  4896. "CPU%d\n", cpu);
  4897. }
  4898. if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) {
  4899. printk(KERN_ERR "ERROR: domain->groups does not contain"
  4900. " CPU%d\n", cpu);
  4901. }
  4902. printk(KERN_DEBUG "%*s groups:", level + 1, "");
  4903. do {
  4904. if (!group) {
  4905. printk("\n");
  4906. printk(KERN_ERR "ERROR: group is NULL\n");
  4907. break;
  4908. }
  4909. /*
  4910. * Even though we initialize ->power to something semi-sane,
  4911. * we leave power_orig unset. This allows us to detect if
  4912. * domain iteration is still funny without causing /0 traps.
  4913. */
  4914. if (!group->sgp->power_orig) {
  4915. printk(KERN_CONT "\n");
  4916. printk(KERN_ERR "ERROR: domain->cpu_power not "
  4917. "set\n");
  4918. break;
  4919. }
  4920. if (!cpumask_weight(sched_group_cpus(group))) {
  4921. printk(KERN_CONT "\n");
  4922. printk(KERN_ERR "ERROR: empty group\n");
  4923. break;
  4924. }
  4925. if (!(sd->flags & SD_OVERLAP) &&
  4926. cpumask_intersects(groupmask, sched_group_cpus(group))) {
  4927. printk(KERN_CONT "\n");
  4928. printk(KERN_ERR "ERROR: repeated CPUs\n");
  4929. break;
  4930. }
  4931. cpumask_or(groupmask, groupmask, sched_group_cpus(group));
  4932. cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group));
  4933. printk(KERN_CONT " %s", str);
  4934. if (group->sgp->power != SCHED_POWER_SCALE) {
  4935. printk(KERN_CONT " (cpu_power = %d)",
  4936. group->sgp->power);
  4937. }
  4938. group = group->next;
  4939. } while (group != sd->groups);
  4940. printk(KERN_CONT "\n");
  4941. if (!cpumask_equal(sched_domain_span(sd), groupmask))
  4942. printk(KERN_ERR "ERROR: groups don't span domain->span\n");
  4943. if (sd->parent &&
  4944. !cpumask_subset(groupmask, sched_domain_span(sd->parent)))
  4945. printk(KERN_ERR "ERROR: parent span is not a superset "
  4946. "of domain->span\n");
  4947. return 0;
  4948. }
  4949. static void sched_domain_debug(struct sched_domain *sd, int cpu)
  4950. {
  4951. int level = 0;
  4952. if (!sched_debug_enabled)
  4953. return;
  4954. if (!sd) {
  4955. printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
  4956. return;
  4957. }
  4958. printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu);
  4959. for (;;) {
  4960. if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask))
  4961. break;
  4962. level++;
  4963. sd = sd->parent;
  4964. if (!sd)
  4965. break;
  4966. }
  4967. }
  4968. #else /* !CONFIG_SCHED_DEBUG */
  4969. # define sched_domain_debug(sd, cpu) do { } while (0)
  4970. static inline bool sched_debug(void)
  4971. {
  4972. return false;
  4973. }
  4974. #endif /* CONFIG_SCHED_DEBUG */
  4975. static int sd_degenerate(struct sched_domain *sd)
  4976. {
  4977. if (cpumask_weight(sched_domain_span(sd)) == 1)
  4978. return 1;
  4979. /* Following flags need at least 2 groups */
  4980. if (sd->flags & (SD_LOAD_BALANCE |
  4981. SD_BALANCE_NEWIDLE |
  4982. SD_BALANCE_FORK |
  4983. SD_BALANCE_EXEC |
  4984. SD_SHARE_CPUPOWER |
  4985. SD_SHARE_PKG_RESOURCES)) {
  4986. if (sd->groups != sd->groups->next)
  4987. return 0;
  4988. }
  4989. /* Following flags don't use groups */
  4990. if (sd->flags & (SD_WAKE_AFFINE))
  4991. return 0;
  4992. return 1;
  4993. }
  4994. static int
  4995. sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
  4996. {
  4997. unsigned long cflags = sd->flags, pflags = parent->flags;
  4998. if (sd_degenerate(parent))
  4999. return 1;
  5000. if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent)))
  5001. return 0;
  5002. /* Flags needing groups don't count if only 1 group in parent */
  5003. if (parent->groups == parent->groups->next) {
  5004. pflags &= ~(SD_LOAD_BALANCE |
  5005. SD_BALANCE_NEWIDLE |
  5006. SD_BALANCE_FORK |
  5007. SD_BALANCE_EXEC |
  5008. SD_SHARE_CPUPOWER |
  5009. SD_SHARE_PKG_RESOURCES);
  5010. if (nr_node_ids == 1)
  5011. pflags &= ~SD_SERIALIZE;
  5012. }
  5013. if (~cflags & pflags)
  5014. return 0;
  5015. return 1;
  5016. }
  5017. static void free_rootdomain(struct rcu_head *rcu)
  5018. {
  5019. struct root_domain *rd = container_of(rcu, struct root_domain, rcu);
  5020. cpupri_cleanup(&rd->cpupri);
  5021. free_cpumask_var(rd->rto_mask);
  5022. free_cpumask_var(rd->online);
  5023. free_cpumask_var(rd->span);
  5024. kfree(rd);
  5025. }
  5026. static void rq_attach_root(struct rq *rq, struct root_domain *rd)
  5027. {
  5028. struct root_domain *old_rd = NULL;
  5029. unsigned long flags;
  5030. raw_spin_lock_irqsave(&rq->lock, flags);
  5031. if (rq->rd) {
  5032. old_rd = rq->rd;
  5033. if (cpumask_test_cpu(rq->cpu, old_rd->online))
  5034. set_rq_offline(rq);
  5035. cpumask_clear_cpu(rq->cpu, old_rd->span);
  5036. /*
  5037. * If we dont want to free the old_rt yet then
  5038. * set old_rd to NULL to skip the freeing later
  5039. * in this function:
  5040. */
  5041. if (!atomic_dec_and_test(&old_rd->refcount))
  5042. old_rd = NULL;
  5043. }
  5044. atomic_inc(&rd->refcount);
  5045. rq->rd = rd;
  5046. cpumask_set_cpu(rq->cpu, rd->span);
  5047. if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
  5048. set_rq_online(rq);
  5049. raw_spin_unlock_irqrestore(&rq->lock, flags);
  5050. if (old_rd)
  5051. call_rcu_sched(&old_rd->rcu, free_rootdomain);
  5052. }
  5053. static int init_rootdomain(struct root_domain *rd)
  5054. {
  5055. memset(rd, 0, sizeof(*rd));
  5056. if (!alloc_cpumask_var(&rd->span, GFP_KERNEL))
  5057. goto out;
  5058. if (!alloc_cpumask_var(&rd->online, GFP_KERNEL))
  5059. goto free_span;
  5060. if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
  5061. goto free_online;
  5062. if (cpupri_init(&rd->cpupri) != 0)
  5063. goto free_rto_mask;
  5064. return 0;
  5065. free_rto_mask:
  5066. free_cpumask_var(rd->rto_mask);
  5067. free_online:
  5068. free_cpumask_var(rd->online);
  5069. free_span:
  5070. free_cpumask_var(rd->span);
  5071. out:
  5072. return -ENOMEM;
  5073. }
  5074. /*
  5075. * By default the system creates a single root-domain with all cpus as
  5076. * members (mimicking the global state we have today).
  5077. */
  5078. struct root_domain def_root_domain;
  5079. static void init_defrootdomain(void)
  5080. {
  5081. init_rootdomain(&def_root_domain);
  5082. atomic_set(&def_root_domain.refcount, 1);
  5083. }
  5084. static struct root_domain *alloc_rootdomain(void)
  5085. {
  5086. struct root_domain *rd;
  5087. rd = kmalloc(sizeof(*rd), GFP_KERNEL);
  5088. if (!rd)
  5089. return NULL;
  5090. if (init_rootdomain(rd) != 0) {
  5091. kfree(rd);
  5092. return NULL;
  5093. }
  5094. return rd;
  5095. }
  5096. static void free_sched_groups(struct sched_group *sg, int free_sgp)
  5097. {
  5098. struct sched_group *tmp, *first;
  5099. if (!sg)
  5100. return;
  5101. first = sg;
  5102. do {
  5103. tmp = sg->next;
  5104. if (free_sgp && atomic_dec_and_test(&sg->sgp->ref))
  5105. kfree(sg->sgp);
  5106. kfree(sg);
  5107. sg = tmp;
  5108. } while (sg != first);
  5109. }
  5110. static void free_sched_domain(struct rcu_head *rcu)
  5111. {
  5112. struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu);
  5113. /*
  5114. * If its an overlapping domain it has private groups, iterate and
  5115. * nuke them all.
  5116. */
  5117. if (sd->flags & SD_OVERLAP) {
  5118. free_sched_groups(sd->groups, 1);
  5119. } else if (atomic_dec_and_test(&sd->groups->ref)) {
  5120. kfree(sd->groups->sgp);
  5121. kfree(sd->groups);
  5122. }
  5123. kfree(sd);
  5124. }
  5125. static void destroy_sched_domain(struct sched_domain *sd, int cpu)
  5126. {
  5127. call_rcu(&sd->rcu, free_sched_domain);
  5128. }
  5129. static void destroy_sched_domains(struct sched_domain *sd, int cpu)
  5130. {
  5131. for (; sd; sd = sd->parent)
  5132. destroy_sched_domain(sd, cpu);
  5133. }
  5134. /*
  5135. * Keep a special pointer to the highest sched_domain that has
  5136. * SD_SHARE_PKG_RESOURCE set (Last Level Cache Domain) for this
  5137. * allows us to avoid some pointer chasing select_idle_sibling().
  5138. *
  5139. * Also keep a unique ID per domain (we use the first cpu number in
  5140. * the cpumask of the domain), this allows us to quickly tell if
  5141. * two cpus are in the same cache domain, see cpus_share_cache().
  5142. */
  5143. DEFINE_PER_CPU(struct sched_domain *, sd_llc);
  5144. DEFINE_PER_CPU(int, sd_llc_id);
  5145. static void update_top_cache_domain(int cpu)
  5146. {
  5147. struct sched_domain *sd;
  5148. int id = cpu;
  5149. sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES);
  5150. if (sd)
  5151. id = cpumask_first(sched_domain_span(sd));
  5152. rcu_assign_pointer(per_cpu(sd_llc, cpu), sd);
  5153. per_cpu(sd_llc_id, cpu) = id;
  5154. }
  5155. /*
  5156. * Attach the domain 'sd' to 'cpu' as its base domain. Callers must
  5157. * hold the hotplug lock.
  5158. */
  5159. static void
  5160. cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
  5161. {
  5162. struct rq *rq = cpu_rq(cpu);
  5163. struct sched_domain *tmp;
  5164. /* Remove the sched domains which do not contribute to scheduling. */
  5165. for (tmp = sd; tmp; ) {
  5166. struct sched_domain *parent = tmp->parent;
  5167. if (!parent)
  5168. break;
  5169. if (sd_parent_degenerate(tmp, parent)) {
  5170. tmp->parent = parent->parent;
  5171. if (parent->parent)
  5172. parent->parent->child = tmp;
  5173. destroy_sched_domain(parent, cpu);
  5174. } else
  5175. tmp = tmp->parent;
  5176. }
  5177. if (sd && sd_degenerate(sd)) {
  5178. tmp = sd;
  5179. sd = sd->parent;
  5180. destroy_sched_domain(tmp, cpu);
  5181. if (sd)
  5182. sd->child = NULL;
  5183. }
  5184. sched_domain_debug(sd, cpu);
  5185. rq_attach_root(rq, rd);
  5186. tmp = rq->sd;
  5187. rcu_assign_pointer(rq->sd, sd);
  5188. destroy_sched_domains(tmp, cpu);
  5189. update_top_cache_domain(cpu);
  5190. }
  5191. /* cpus with isolated domains */
  5192. static cpumask_var_t cpu_isolated_map;
  5193. /* Setup the mask of cpus configured for isolated domains */
  5194. static int __init isolated_cpu_setup(char *str)
  5195. {
  5196. alloc_bootmem_cpumask_var(&cpu_isolated_map);
  5197. cpulist_parse(str, cpu_isolated_map);
  5198. return 1;
  5199. }
  5200. __setup("isolcpus=", isolated_cpu_setup);
  5201. static const struct cpumask *cpu_cpu_mask(int cpu)
  5202. {
  5203. return cpumask_of_node(cpu_to_node(cpu));
  5204. }
  5205. struct sd_data {
  5206. struct sched_domain **__percpu sd;
  5207. struct sched_group **__percpu sg;
  5208. struct sched_group_power **__percpu sgp;
  5209. };
  5210. struct s_data {
  5211. struct sched_domain ** __percpu sd;
  5212. struct root_domain *rd;
  5213. };
  5214. enum s_alloc {
  5215. sa_rootdomain,
  5216. sa_sd,
  5217. sa_sd_storage,
  5218. sa_none,
  5219. };
  5220. struct sched_domain_topology_level;
  5221. typedef struct sched_domain *(*sched_domain_init_f)(struct sched_domain_topology_level *tl, int cpu);
  5222. typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
  5223. #define SDTL_OVERLAP 0x01
  5224. struct sched_domain_topology_level {
  5225. sched_domain_init_f init;
  5226. sched_domain_mask_f mask;
  5227. int flags;
  5228. int numa_level;
  5229. struct sd_data data;
  5230. };
  5231. /*
  5232. * Build an iteration mask that can exclude certain CPUs from the upwards
  5233. * domain traversal.
  5234. *
  5235. * Asymmetric node setups can result in situations where the domain tree is of
  5236. * unequal depth, make sure to skip domains that already cover the entire
  5237. * range.
  5238. *
  5239. * In that case build_sched_domains() will have terminated the iteration early
  5240. * and our sibling sd spans will be empty. Domains should always include the
  5241. * cpu they're built on, so check that.
  5242. *
  5243. */
  5244. static void build_group_mask(struct sched_domain *sd, struct sched_group *sg)
  5245. {
  5246. const struct cpumask *span = sched_domain_span(sd);
  5247. struct sd_data *sdd = sd->private;
  5248. struct sched_domain *sibling;
  5249. int i;
  5250. for_each_cpu(i, span) {
  5251. sibling = *per_cpu_ptr(sdd->sd, i);
  5252. if (!cpumask_test_cpu(i, sched_domain_span(sibling)))
  5253. continue;
  5254. cpumask_set_cpu(i, sched_group_mask(sg));
  5255. }
  5256. }
  5257. /*
  5258. * Return the canonical balance cpu for this group, this is the first cpu
  5259. * of this group that's also in the iteration mask.
  5260. */
  5261. int group_balance_cpu(struct sched_group *sg)
  5262. {
  5263. return cpumask_first_and(sched_group_cpus(sg), sched_group_mask(sg));
  5264. }
  5265. static int
  5266. build_overlap_sched_groups(struct sched_domain *sd, int cpu)
  5267. {
  5268. struct sched_group *first = NULL, *last = NULL, *groups = NULL, *sg;
  5269. const struct cpumask *span = sched_domain_span(sd);
  5270. struct cpumask *covered = sched_domains_tmpmask;
  5271. struct sd_data *sdd = sd->private;
  5272. struct sched_domain *child;
  5273. int i;
  5274. cpumask_clear(covered);
  5275. for_each_cpu(i, span) {
  5276. struct cpumask *sg_span;
  5277. if (cpumask_test_cpu(i, covered))
  5278. continue;
  5279. child = *per_cpu_ptr(sdd->sd, i);
  5280. /* See the comment near build_group_mask(). */
  5281. if (!cpumask_test_cpu(i, sched_domain_span(child)))
  5282. continue;
  5283. sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
  5284. GFP_KERNEL, cpu_to_node(cpu));
  5285. if (!sg)
  5286. goto fail;
  5287. sg_span = sched_group_cpus(sg);
  5288. if (child->child) {
  5289. child = child->child;
  5290. cpumask_copy(sg_span, sched_domain_span(child));
  5291. } else
  5292. cpumask_set_cpu(i, sg_span);
  5293. cpumask_or(covered, covered, sg_span);
  5294. sg->sgp = *per_cpu_ptr(sdd->sgp, i);
  5295. if (atomic_inc_return(&sg->sgp->ref) == 1)
  5296. build_group_mask(sd, sg);
  5297. /*
  5298. * Initialize sgp->power such that even if we mess up the
  5299. * domains and no possible iteration will get us here, we won't
  5300. * die on a /0 trap.
  5301. */
  5302. sg->sgp->power = SCHED_POWER_SCALE * cpumask_weight(sg_span);
  5303. /*
  5304. * Make sure the first group of this domain contains the
  5305. * canonical balance cpu. Otherwise the sched_domain iteration
  5306. * breaks. See update_sg_lb_stats().
  5307. */
  5308. if ((!groups && cpumask_test_cpu(cpu, sg_span)) ||
  5309. group_balance_cpu(sg) == cpu)
  5310. groups = sg;
  5311. if (!first)
  5312. first = sg;
  5313. if (last)
  5314. last->next = sg;
  5315. last = sg;
  5316. last->next = first;
  5317. }
  5318. sd->groups = groups;
  5319. return 0;
  5320. fail:
  5321. free_sched_groups(first, 0);
  5322. return -ENOMEM;
  5323. }
  5324. static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg)
  5325. {
  5326. struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
  5327. struct sched_domain *child = sd->child;
  5328. if (child)
  5329. cpu = cpumask_first(sched_domain_span(child));
  5330. if (sg) {
  5331. *sg = *per_cpu_ptr(sdd->sg, cpu);
  5332. (*sg)->sgp = *per_cpu_ptr(sdd->sgp, cpu);
  5333. atomic_set(&(*sg)->sgp->ref, 1); /* for claim_allocations */
  5334. }
  5335. return cpu;
  5336. }
  5337. /*
  5338. * build_sched_groups will build a circular linked list of the groups
  5339. * covered by the given span, and will set each group's ->cpumask correctly,
  5340. * and ->cpu_power to 0.
  5341. *
  5342. * Assumes the sched_domain tree is fully constructed
  5343. */
  5344. static int
  5345. build_sched_groups(struct sched_domain *sd, int cpu)
  5346. {
  5347. struct sched_group *first = NULL, *last = NULL;
  5348. struct sd_data *sdd = sd->private;
  5349. const struct cpumask *span = sched_domain_span(sd);
  5350. struct cpumask *covered;
  5351. int i;
  5352. get_group(cpu, sdd, &sd->groups);
  5353. atomic_inc(&sd->groups->ref);
  5354. if (cpu != cpumask_first(sched_domain_span(sd)))
  5355. return 0;
  5356. lockdep_assert_held(&sched_domains_mutex);
  5357. covered = sched_domains_tmpmask;
  5358. cpumask_clear(covered);
  5359. for_each_cpu(i, span) {
  5360. struct sched_group *sg;
  5361. int group = get_group(i, sdd, &sg);
  5362. int j;
  5363. if (cpumask_test_cpu(i, covered))
  5364. continue;
  5365. cpumask_clear(sched_group_cpus(sg));
  5366. sg->sgp->power = 0;
  5367. cpumask_setall(sched_group_mask(sg));
  5368. for_each_cpu(j, span) {
  5369. if (get_group(j, sdd, NULL) != group)
  5370. continue;
  5371. cpumask_set_cpu(j, covered);
  5372. cpumask_set_cpu(j, sched_group_cpus(sg));
  5373. }
  5374. if (!first)
  5375. first = sg;
  5376. if (last)
  5377. last->next = sg;
  5378. last = sg;
  5379. }
  5380. last->next = first;
  5381. return 0;
  5382. }
  5383. /*
  5384. * Initialize sched groups cpu_power.
  5385. *
  5386. * cpu_power indicates the capacity of sched group, which is used while
  5387. * distributing the load between different sched groups in a sched domain.
  5388. * Typically cpu_power for all the groups in a sched domain will be same unless
  5389. * there are asymmetries in the topology. If there are asymmetries, group
  5390. * having more cpu_power will pickup more load compared to the group having
  5391. * less cpu_power.
  5392. */
  5393. static void init_sched_groups_power(int cpu, struct sched_domain *sd)
  5394. {
  5395. struct sched_group *sg = sd->groups;
  5396. WARN_ON(!sd || !sg);
  5397. do {
  5398. sg->group_weight = cpumask_weight(sched_group_cpus(sg));
  5399. sg = sg->next;
  5400. } while (sg != sd->groups);
  5401. if (cpu != group_balance_cpu(sg))
  5402. return;
  5403. update_group_power(sd, cpu);
  5404. atomic_set(&sg->sgp->nr_busy_cpus, sg->group_weight);
  5405. }
  5406. int __weak arch_sd_sibling_asym_packing(void)
  5407. {
  5408. return 0*SD_ASYM_PACKING;
  5409. }
  5410. /*
  5411. * Initializers for schedule domains
  5412. * Non-inlined to reduce accumulated stack pressure in build_sched_domains()
  5413. */
  5414. #ifdef CONFIG_SCHED_DEBUG
  5415. # define SD_INIT_NAME(sd, type) sd->name = #type
  5416. #else
  5417. # define SD_INIT_NAME(sd, type) do { } while (0)
  5418. #endif
  5419. #define SD_INIT_FUNC(type) \
  5420. static noinline struct sched_domain * \
  5421. sd_init_##type(struct sched_domain_topology_level *tl, int cpu) \
  5422. { \
  5423. struct sched_domain *sd = *per_cpu_ptr(tl->data.sd, cpu); \
  5424. *sd = SD_##type##_INIT; \
  5425. SD_INIT_NAME(sd, type); \
  5426. sd->private = &tl->data; \
  5427. return sd; \
  5428. }
  5429. SD_INIT_FUNC(CPU)
  5430. #ifdef CONFIG_SCHED_SMT
  5431. SD_INIT_FUNC(SIBLING)
  5432. #endif
  5433. #ifdef CONFIG_SCHED_MC
  5434. SD_INIT_FUNC(MC)
  5435. #endif
  5436. #ifdef CONFIG_SCHED_BOOK
  5437. SD_INIT_FUNC(BOOK)
  5438. #endif
  5439. static int default_relax_domain_level = -1;
  5440. int sched_domain_level_max;
  5441. static int __init setup_relax_domain_level(char *str)
  5442. {
  5443. if (kstrtoint(str, 0, &default_relax_domain_level))
  5444. pr_warn("Unable to set relax_domain_level\n");
  5445. return 1;
  5446. }
  5447. __setup("relax_domain_level=", setup_relax_domain_level);
  5448. static void set_domain_attribute(struct sched_domain *sd,
  5449. struct sched_domain_attr *attr)
  5450. {
  5451. int request;
  5452. if (!attr || attr->relax_domain_level < 0) {
  5453. if (default_relax_domain_level < 0)
  5454. return;
  5455. else
  5456. request = default_relax_domain_level;
  5457. } else
  5458. request = attr->relax_domain_level;
  5459. if (request < sd->level) {
  5460. /* turn off idle balance on this domain */
  5461. sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
  5462. } else {
  5463. /* turn on idle balance on this domain */
  5464. sd->flags |= (SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
  5465. }
  5466. }
  5467. static void __sdt_free(const struct cpumask *cpu_map);
  5468. static int __sdt_alloc(const struct cpumask *cpu_map);
  5469. static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
  5470. const struct cpumask *cpu_map)
  5471. {
  5472. switch (what) {
  5473. case sa_rootdomain:
  5474. if (!atomic_read(&d->rd->refcount))
  5475. free_rootdomain(&d->rd->rcu); /* fall through */
  5476. case sa_sd:
  5477. free_percpu(d->sd); /* fall through */
  5478. case sa_sd_storage:
  5479. __sdt_free(cpu_map); /* fall through */
  5480. case sa_none:
  5481. break;
  5482. }
  5483. }
  5484. static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
  5485. const struct cpumask *cpu_map)
  5486. {
  5487. memset(d, 0, sizeof(*d));
  5488. if (__sdt_alloc(cpu_map))
  5489. return sa_sd_storage;
  5490. d->sd = alloc_percpu(struct sched_domain *);
  5491. if (!d->sd)
  5492. return sa_sd_storage;
  5493. d->rd = alloc_rootdomain();
  5494. if (!d->rd)
  5495. return sa_sd;
  5496. return sa_rootdomain;
  5497. }
  5498. /*
  5499. * NULL the sd_data elements we've used to build the sched_domain and
  5500. * sched_group structure so that the subsequent __free_domain_allocs()
  5501. * will not free the data we're using.
  5502. */
  5503. static void claim_allocations(int cpu, struct sched_domain *sd)
  5504. {
  5505. struct sd_data *sdd = sd->private;
  5506. WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd);
  5507. *per_cpu_ptr(sdd->sd, cpu) = NULL;
  5508. if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref))
  5509. *per_cpu_ptr(sdd->sg, cpu) = NULL;
  5510. if (atomic_read(&(*per_cpu_ptr(sdd->sgp, cpu))->ref))
  5511. *per_cpu_ptr(sdd->sgp, cpu) = NULL;
  5512. }
  5513. #ifdef CONFIG_SCHED_SMT
  5514. static const struct cpumask *cpu_smt_mask(int cpu)
  5515. {
  5516. return topology_thread_cpumask(cpu);
  5517. }
  5518. #endif
  5519. /*
  5520. * Topology list, bottom-up.
  5521. */
  5522. static struct sched_domain_topology_level default_topology[] = {
  5523. #ifdef CONFIG_SCHED_SMT
  5524. { sd_init_SIBLING, cpu_smt_mask, },
  5525. #endif
  5526. #ifdef CONFIG_SCHED_MC
  5527. { sd_init_MC, cpu_coregroup_mask, },
  5528. #endif
  5529. #ifdef CONFIG_SCHED_BOOK
  5530. { sd_init_BOOK, cpu_book_mask, },
  5531. #endif
  5532. { sd_init_CPU, cpu_cpu_mask, },
  5533. { NULL, },
  5534. };
  5535. static struct sched_domain_topology_level *sched_domain_topology = default_topology;
  5536. #ifdef CONFIG_NUMA
  5537. static int sched_domains_numa_levels;
  5538. static int *sched_domains_numa_distance;
  5539. static struct cpumask ***sched_domains_numa_masks;
  5540. static int sched_domains_curr_level;
  5541. static inline int sd_local_flags(int level)
  5542. {
  5543. if (sched_domains_numa_distance[level] > RECLAIM_DISTANCE)
  5544. return 0;
  5545. return SD_BALANCE_EXEC | SD_BALANCE_FORK | SD_WAKE_AFFINE;
  5546. }
  5547. static struct sched_domain *
  5548. sd_numa_init(struct sched_domain_topology_level *tl, int cpu)
  5549. {
  5550. struct sched_domain *sd = *per_cpu_ptr(tl->data.sd, cpu);
  5551. int level = tl->numa_level;
  5552. int sd_weight = cpumask_weight(
  5553. sched_domains_numa_masks[level][cpu_to_node(cpu)]);
  5554. *sd = (struct sched_domain){
  5555. .min_interval = sd_weight,
  5556. .max_interval = 2*sd_weight,
  5557. .busy_factor = 32,
  5558. .imbalance_pct = 125,
  5559. .cache_nice_tries = 2,
  5560. .busy_idx = 3,
  5561. .idle_idx = 2,
  5562. .newidle_idx = 0,
  5563. .wake_idx = 0,
  5564. .forkexec_idx = 0,
  5565. .flags = 1*SD_LOAD_BALANCE
  5566. | 1*SD_BALANCE_NEWIDLE
  5567. | 0*SD_BALANCE_EXEC
  5568. | 0*SD_BALANCE_FORK
  5569. | 0*SD_BALANCE_WAKE
  5570. | 0*SD_WAKE_AFFINE
  5571. | 0*SD_PREFER_LOCAL
  5572. | 0*SD_SHARE_CPUPOWER
  5573. | 0*SD_SHARE_PKG_RESOURCES
  5574. | 1*SD_SERIALIZE
  5575. | 0*SD_PREFER_SIBLING
  5576. | sd_local_flags(level)
  5577. ,
  5578. .last_balance = jiffies,
  5579. .balance_interval = sd_weight,
  5580. };
  5581. SD_INIT_NAME(sd, NUMA);
  5582. sd->private = &tl->data;
  5583. /*
  5584. * Ugly hack to pass state to sd_numa_mask()...
  5585. */
  5586. sched_domains_curr_level = tl->numa_level;
  5587. return sd;
  5588. }
  5589. static const struct cpumask *sd_numa_mask(int cpu)
  5590. {
  5591. return sched_domains_numa_masks[sched_domains_curr_level][cpu_to_node(cpu)];
  5592. }
  5593. static void sched_numa_warn(const char *str)
  5594. {
  5595. static int done = false;
  5596. int i,j;
  5597. if (done)
  5598. return;
  5599. done = true;
  5600. printk(KERN_WARNING "ERROR: %s\n\n", str);
  5601. for (i = 0; i < nr_node_ids; i++) {
  5602. printk(KERN_WARNING " ");
  5603. for (j = 0; j < nr_node_ids; j++)
  5604. printk(KERN_CONT "%02d ", node_distance(i,j));
  5605. printk(KERN_CONT "\n");
  5606. }
  5607. printk(KERN_WARNING "\n");
  5608. }
  5609. static bool find_numa_distance(int distance)
  5610. {
  5611. int i;
  5612. if (distance == node_distance(0, 0))
  5613. return true;
  5614. for (i = 0; i < sched_domains_numa_levels; i++) {
  5615. if (sched_domains_numa_distance[i] == distance)
  5616. return true;
  5617. }
  5618. return false;
  5619. }
  5620. static void sched_init_numa(void)
  5621. {
  5622. int next_distance, curr_distance = node_distance(0, 0);
  5623. struct sched_domain_topology_level *tl;
  5624. int level = 0;
  5625. int i, j, k;
  5626. sched_domains_numa_distance = kzalloc(sizeof(int) * nr_node_ids, GFP_KERNEL);
  5627. if (!sched_domains_numa_distance)
  5628. return;
  5629. /*
  5630. * O(nr_nodes^2) deduplicating selection sort -- in order to find the
  5631. * unique distances in the node_distance() table.
  5632. *
  5633. * Assumes node_distance(0,j) includes all distances in
  5634. * node_distance(i,j) in order to avoid cubic time.
  5635. */
  5636. next_distance = curr_distance;
  5637. for (i = 0; i < nr_node_ids; i++) {
  5638. for (j = 0; j < nr_node_ids; j++) {
  5639. for (k = 0; k < nr_node_ids; k++) {
  5640. int distance = node_distance(i, k);
  5641. if (distance > curr_distance &&
  5642. (distance < next_distance ||
  5643. next_distance == curr_distance))
  5644. next_distance = distance;
  5645. /*
  5646. * While not a strong assumption it would be nice to know
  5647. * about cases where if node A is connected to B, B is not
  5648. * equally connected to A.
  5649. */
  5650. if (sched_debug() && node_distance(k, i) != distance)
  5651. sched_numa_warn("Node-distance not symmetric");
  5652. if (sched_debug() && i && !find_numa_distance(distance))
  5653. sched_numa_warn("Node-0 not representative");
  5654. }
  5655. if (next_distance != curr_distance) {
  5656. sched_domains_numa_distance[level++] = next_distance;
  5657. sched_domains_numa_levels = level;
  5658. curr_distance = next_distance;
  5659. } else break;
  5660. }
  5661. /*
  5662. * In case of sched_debug() we verify the above assumption.
  5663. */
  5664. if (!sched_debug())
  5665. break;
  5666. }
  5667. /*
  5668. * 'level' contains the number of unique distances, excluding the
  5669. * identity distance node_distance(i,i).
  5670. *
  5671. * The sched_domains_nume_distance[] array includes the actual distance
  5672. * numbers.
  5673. */
  5674. sched_domains_numa_masks = kzalloc(sizeof(void *) * level, GFP_KERNEL);
  5675. if (!sched_domains_numa_masks)
  5676. return;
  5677. /*
  5678. * Now for each level, construct a mask per node which contains all
  5679. * cpus of nodes that are that many hops away from us.
  5680. */
  5681. for (i = 0; i < level; i++) {
  5682. sched_domains_numa_masks[i] =
  5683. kzalloc(nr_node_ids * sizeof(void *), GFP_KERNEL);
  5684. if (!sched_domains_numa_masks[i])
  5685. return;
  5686. for (j = 0; j < nr_node_ids; j++) {
  5687. struct cpumask *mask = kzalloc(cpumask_size(), GFP_KERNEL);
  5688. if (!mask)
  5689. return;
  5690. sched_domains_numa_masks[i][j] = mask;
  5691. for (k = 0; k < nr_node_ids; k++) {
  5692. if (node_distance(j, k) > sched_domains_numa_distance[i])
  5693. continue;
  5694. cpumask_or(mask, mask, cpumask_of_node(k));
  5695. }
  5696. }
  5697. }
  5698. tl = kzalloc((ARRAY_SIZE(default_topology) + level) *
  5699. sizeof(struct sched_domain_topology_level), GFP_KERNEL);
  5700. if (!tl)
  5701. return;
  5702. /*
  5703. * Copy the default topology bits..
  5704. */
  5705. for (i = 0; default_topology[i].init; i++)
  5706. tl[i] = default_topology[i];
  5707. /*
  5708. * .. and append 'j' levels of NUMA goodness.
  5709. */
  5710. for (j = 0; j < level; i++, j++) {
  5711. tl[i] = (struct sched_domain_topology_level){
  5712. .init = sd_numa_init,
  5713. .mask = sd_numa_mask,
  5714. .flags = SDTL_OVERLAP,
  5715. .numa_level = j,
  5716. };
  5717. }
  5718. sched_domain_topology = tl;
  5719. }
  5720. #else
  5721. static inline void sched_init_numa(void)
  5722. {
  5723. }
  5724. #endif /* CONFIG_NUMA */
  5725. static int __sdt_alloc(const struct cpumask *cpu_map)
  5726. {
  5727. struct sched_domain_topology_level *tl;
  5728. int j;
  5729. for (tl = sched_domain_topology; tl->init; tl++) {
  5730. struct sd_data *sdd = &tl->data;
  5731. sdd->sd = alloc_percpu(struct sched_domain *);
  5732. if (!sdd->sd)
  5733. return -ENOMEM;
  5734. sdd->sg = alloc_percpu(struct sched_group *);
  5735. if (!sdd->sg)
  5736. return -ENOMEM;
  5737. sdd->sgp = alloc_percpu(struct sched_group_power *);
  5738. if (!sdd->sgp)
  5739. return -ENOMEM;
  5740. for_each_cpu(j, cpu_map) {
  5741. struct sched_domain *sd;
  5742. struct sched_group *sg;
  5743. struct sched_group_power *sgp;
  5744. sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(),
  5745. GFP_KERNEL, cpu_to_node(j));
  5746. if (!sd)
  5747. return -ENOMEM;
  5748. *per_cpu_ptr(sdd->sd, j) = sd;
  5749. sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
  5750. GFP_KERNEL, cpu_to_node(j));
  5751. if (!sg)
  5752. return -ENOMEM;
  5753. sg->next = sg;
  5754. *per_cpu_ptr(sdd->sg, j) = sg;
  5755. sgp = kzalloc_node(sizeof(struct sched_group_power) + cpumask_size(),
  5756. GFP_KERNEL, cpu_to_node(j));
  5757. if (!sgp)
  5758. return -ENOMEM;
  5759. *per_cpu_ptr(sdd->sgp, j) = sgp;
  5760. }
  5761. }
  5762. return 0;
  5763. }
  5764. static void __sdt_free(const struct cpumask *cpu_map)
  5765. {
  5766. struct sched_domain_topology_level *tl;
  5767. int j;
  5768. for (tl = sched_domain_topology; tl->init; tl++) {
  5769. struct sd_data *sdd = &tl->data;
  5770. for_each_cpu(j, cpu_map) {
  5771. struct sched_domain *sd;
  5772. if (sdd->sd) {
  5773. sd = *per_cpu_ptr(sdd->sd, j);
  5774. if (sd && (sd->flags & SD_OVERLAP))
  5775. free_sched_groups(sd->groups, 0);
  5776. kfree(*per_cpu_ptr(sdd->sd, j));
  5777. }
  5778. if (sdd->sg)
  5779. kfree(*per_cpu_ptr(sdd->sg, j));
  5780. if (sdd->sgp)
  5781. kfree(*per_cpu_ptr(sdd->sgp, j));
  5782. }
  5783. free_percpu(sdd->sd);
  5784. sdd->sd = NULL;
  5785. free_percpu(sdd->sg);
  5786. sdd->sg = NULL;
  5787. free_percpu(sdd->sgp);
  5788. sdd->sgp = NULL;
  5789. }
  5790. }
  5791. struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
  5792. struct s_data *d, const struct cpumask *cpu_map,
  5793. struct sched_domain_attr *attr, struct sched_domain *child,
  5794. int cpu)
  5795. {
  5796. struct sched_domain *sd = tl->init(tl, cpu);
  5797. if (!sd)
  5798. return child;
  5799. cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu));
  5800. if (child) {
  5801. sd->level = child->level + 1;
  5802. sched_domain_level_max = max(sched_domain_level_max, sd->level);
  5803. child->parent = sd;
  5804. }
  5805. sd->child = child;
  5806. set_domain_attribute(sd, attr);
  5807. return sd;
  5808. }
  5809. /*
  5810. * Build sched domains for a given set of cpus and attach the sched domains
  5811. * to the individual cpus
  5812. */
  5813. static int build_sched_domains(const struct cpumask *cpu_map,
  5814. struct sched_domain_attr *attr)
  5815. {
  5816. enum s_alloc alloc_state = sa_none;
  5817. struct sched_domain *sd;
  5818. struct s_data d;
  5819. int i, ret = -ENOMEM;
  5820. alloc_state = __visit_domain_allocation_hell(&d, cpu_map);
  5821. if (alloc_state != sa_rootdomain)
  5822. goto error;
  5823. /* Set up domains for cpus specified by the cpu_map. */
  5824. for_each_cpu(i, cpu_map) {
  5825. struct sched_domain_topology_level *tl;
  5826. sd = NULL;
  5827. for (tl = sched_domain_topology; tl->init; tl++) {
  5828. sd = build_sched_domain(tl, &d, cpu_map, attr, sd, i);
  5829. if (tl->flags & SDTL_OVERLAP || sched_feat(FORCE_SD_OVERLAP))
  5830. sd->flags |= SD_OVERLAP;
  5831. if (cpumask_equal(cpu_map, sched_domain_span(sd)))
  5832. break;
  5833. }
  5834. while (sd->child)
  5835. sd = sd->child;
  5836. *per_cpu_ptr(d.sd, i) = sd;
  5837. }
  5838. /* Build the groups for the domains */
  5839. for_each_cpu(i, cpu_map) {
  5840. for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
  5841. sd->span_weight = cpumask_weight(sched_domain_span(sd));
  5842. if (sd->flags & SD_OVERLAP) {
  5843. if (build_overlap_sched_groups(sd, i))
  5844. goto error;
  5845. } else {
  5846. if (build_sched_groups(sd, i))
  5847. goto error;
  5848. }
  5849. }
  5850. }
  5851. /* Calculate CPU power for physical packages and nodes */
  5852. for (i = nr_cpumask_bits-1; i >= 0; i--) {
  5853. if (!cpumask_test_cpu(i, cpu_map))
  5854. continue;
  5855. for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
  5856. claim_allocations(i, sd);
  5857. init_sched_groups_power(i, sd);
  5858. }
  5859. }
  5860. /* Attach the domains */
  5861. rcu_read_lock();
  5862. for_each_cpu(i, cpu_map) {
  5863. sd = *per_cpu_ptr(d.sd, i);
  5864. cpu_attach_domain(sd, d.rd, i);
  5865. }
  5866. rcu_read_unlock();
  5867. ret = 0;
  5868. error:
  5869. __free_domain_allocs(&d, alloc_state, cpu_map);
  5870. return ret;
  5871. }
  5872. static cpumask_var_t *doms_cur; /* current sched domains */
  5873. static int ndoms_cur; /* number of sched domains in 'doms_cur' */
  5874. static struct sched_domain_attr *dattr_cur;
  5875. /* attribues of custom domains in 'doms_cur' */
  5876. /*
  5877. * Special case: If a kmalloc of a doms_cur partition (array of
  5878. * cpumask) fails, then fallback to a single sched domain,
  5879. * as determined by the single cpumask fallback_doms.
  5880. */
  5881. static cpumask_var_t fallback_doms;
  5882. /*
  5883. * arch_update_cpu_topology lets virtualized architectures update the
  5884. * cpu core maps. It is supposed to return 1 if the topology changed
  5885. * or 0 if it stayed the same.
  5886. */
  5887. int __attribute__((weak)) arch_update_cpu_topology(void)
  5888. {
  5889. return 0;
  5890. }
  5891. cpumask_var_t *alloc_sched_domains(unsigned int ndoms)
  5892. {
  5893. int i;
  5894. cpumask_var_t *doms;
  5895. doms = kmalloc(sizeof(*doms) * ndoms, GFP_KERNEL);
  5896. if (!doms)
  5897. return NULL;
  5898. for (i = 0; i < ndoms; i++) {
  5899. if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) {
  5900. free_sched_domains(doms, i);
  5901. return NULL;
  5902. }
  5903. }
  5904. return doms;
  5905. }
  5906. void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms)
  5907. {
  5908. unsigned int i;
  5909. for (i = 0; i < ndoms; i++)
  5910. free_cpumask_var(doms[i]);
  5911. kfree(doms);
  5912. }
  5913. /*
  5914. * Set up scheduler domains and groups. Callers must hold the hotplug lock.
  5915. * For now this just excludes isolated cpus, but could be used to
  5916. * exclude other special cases in the future.
  5917. */
  5918. static int init_sched_domains(const struct cpumask *cpu_map)
  5919. {
  5920. int err;
  5921. arch_update_cpu_topology();
  5922. ndoms_cur = 1;
  5923. doms_cur = alloc_sched_domains(ndoms_cur);
  5924. if (!doms_cur)
  5925. doms_cur = &fallback_doms;
  5926. cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map);
  5927. err = build_sched_domains(doms_cur[0], NULL);
  5928. register_sched_domain_sysctl();
  5929. return err;
  5930. }
  5931. /*
  5932. * Detach sched domains from a group of cpus specified in cpu_map
  5933. * These cpus will now be attached to the NULL domain
  5934. */
  5935. static void detach_destroy_domains(const struct cpumask *cpu_map)
  5936. {
  5937. int i;
  5938. rcu_read_lock();
  5939. for_each_cpu(i, cpu_map)
  5940. cpu_attach_domain(NULL, &def_root_domain, i);
  5941. rcu_read_unlock();
  5942. }
  5943. /* handle null as "default" */
  5944. static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
  5945. struct sched_domain_attr *new, int idx_new)
  5946. {
  5947. struct sched_domain_attr tmp;
  5948. /* fast path */
  5949. if (!new && !cur)
  5950. return 1;
  5951. tmp = SD_ATTR_INIT;
  5952. return !memcmp(cur ? (cur + idx_cur) : &tmp,
  5953. new ? (new + idx_new) : &tmp,
  5954. sizeof(struct sched_domain_attr));
  5955. }
  5956. /*
  5957. * Partition sched domains as specified by the 'ndoms_new'
  5958. * cpumasks in the array doms_new[] of cpumasks. This compares
  5959. * doms_new[] to the current sched domain partitioning, doms_cur[].
  5960. * It destroys each deleted domain and builds each new domain.
  5961. *
  5962. * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'.
  5963. * The masks don't intersect (don't overlap.) We should setup one
  5964. * sched domain for each mask. CPUs not in any of the cpumasks will
  5965. * not be load balanced. If the same cpumask appears both in the
  5966. * current 'doms_cur' domains and in the new 'doms_new', we can leave
  5967. * it as it is.
  5968. *
  5969. * The passed in 'doms_new' should be allocated using
  5970. * alloc_sched_domains. This routine takes ownership of it and will
  5971. * free_sched_domains it when done with it. If the caller failed the
  5972. * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1,
  5973. * and partition_sched_domains() will fallback to the single partition
  5974. * 'fallback_doms', it also forces the domains to be rebuilt.
  5975. *
  5976. * If doms_new == NULL it will be replaced with cpu_online_mask.
  5977. * ndoms_new == 0 is a special case for destroying existing domains,
  5978. * and it will not create the default domain.
  5979. *
  5980. * Call with hotplug lock held
  5981. */
  5982. void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
  5983. struct sched_domain_attr *dattr_new)
  5984. {
  5985. int i, j, n;
  5986. int new_topology;
  5987. mutex_lock(&sched_domains_mutex);
  5988. /* always unregister in case we don't destroy any domains */
  5989. unregister_sched_domain_sysctl();
  5990. /* Let architecture update cpu core mappings. */
  5991. new_topology = arch_update_cpu_topology();
  5992. n = doms_new ? ndoms_new : 0;
  5993. /* Destroy deleted domains */
  5994. for (i = 0; i < ndoms_cur; i++) {
  5995. for (j = 0; j < n && !new_topology; j++) {
  5996. if (cpumask_equal(doms_cur[i], doms_new[j])
  5997. && dattrs_equal(dattr_cur, i, dattr_new, j))
  5998. goto match1;
  5999. }
  6000. /* no match - a current sched domain not in new doms_new[] */
  6001. detach_destroy_domains(doms_cur[i]);
  6002. match1:
  6003. ;
  6004. }
  6005. if (doms_new == NULL) {
  6006. ndoms_cur = 0;
  6007. doms_new = &fallback_doms;
  6008. cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map);
  6009. WARN_ON_ONCE(dattr_new);
  6010. }
  6011. /* Build new domains */
  6012. for (i = 0; i < ndoms_new; i++) {
  6013. for (j = 0; j < ndoms_cur && !new_topology; j++) {
  6014. if (cpumask_equal(doms_new[i], doms_cur[j])
  6015. && dattrs_equal(dattr_new, i, dattr_cur, j))
  6016. goto match2;
  6017. }
  6018. /* no match - add a new doms_new */
  6019. build_sched_domains(doms_new[i], dattr_new ? dattr_new + i : NULL);
  6020. match2:
  6021. ;
  6022. }
  6023. /* Remember the new sched domains */
  6024. if (doms_cur != &fallback_doms)
  6025. free_sched_domains(doms_cur, ndoms_cur);
  6026. kfree(dattr_cur); /* kfree(NULL) is safe */
  6027. doms_cur = doms_new;
  6028. dattr_cur = dattr_new;
  6029. ndoms_cur = ndoms_new;
  6030. register_sched_domain_sysctl();
  6031. mutex_unlock(&sched_domains_mutex);
  6032. }
  6033. static int num_cpus_frozen; /* used to mark begin/end of suspend/resume */
  6034. /*
  6035. * Update cpusets according to cpu_active mask. If cpusets are
  6036. * disabled, cpuset_update_active_cpus() becomes a simple wrapper
  6037. * around partition_sched_domains().
  6038. *
  6039. * If we come here as part of a suspend/resume, don't touch cpusets because we
  6040. * want to restore it back to its original state upon resume anyway.
  6041. */
  6042. static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action,
  6043. void *hcpu)
  6044. {
  6045. switch (action) {
  6046. case CPU_ONLINE_FROZEN:
  6047. case CPU_DOWN_FAILED_FROZEN:
  6048. /*
  6049. * num_cpus_frozen tracks how many CPUs are involved in suspend
  6050. * resume sequence. As long as this is not the last online
  6051. * operation in the resume sequence, just build a single sched
  6052. * domain, ignoring cpusets.
  6053. */
  6054. num_cpus_frozen--;
  6055. if (likely(num_cpus_frozen)) {
  6056. partition_sched_domains(1, NULL, NULL);
  6057. break;
  6058. }
  6059. /*
  6060. * This is the last CPU online operation. So fall through and
  6061. * restore the original sched domains by considering the
  6062. * cpuset configurations.
  6063. */
  6064. case CPU_ONLINE:
  6065. case CPU_DOWN_FAILED:
  6066. cpuset_update_active_cpus(true);
  6067. break;
  6068. default:
  6069. return NOTIFY_DONE;
  6070. }
  6071. return NOTIFY_OK;
  6072. }
  6073. static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action,
  6074. void *hcpu)
  6075. {
  6076. switch (action) {
  6077. case CPU_DOWN_PREPARE:
  6078. cpuset_update_active_cpus(false);
  6079. break;
  6080. case CPU_DOWN_PREPARE_FROZEN:
  6081. num_cpus_frozen++;
  6082. partition_sched_domains(1, NULL, NULL);
  6083. break;
  6084. default:
  6085. return NOTIFY_DONE;
  6086. }
  6087. return NOTIFY_OK;
  6088. }
  6089. void __init sched_init_smp(void)
  6090. {
  6091. cpumask_var_t non_isolated_cpus;
  6092. alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL);
  6093. alloc_cpumask_var(&fallback_doms, GFP_KERNEL);
  6094. sched_init_numa();
  6095. get_online_cpus();
  6096. mutex_lock(&sched_domains_mutex);
  6097. init_sched_domains(cpu_active_mask);
  6098. cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
  6099. if (cpumask_empty(non_isolated_cpus))
  6100. cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);
  6101. mutex_unlock(&sched_domains_mutex);
  6102. put_online_cpus();
  6103. hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE);
  6104. hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE);
  6105. /* RT runtime code needs to handle some hotplug events */
  6106. hotcpu_notifier(update_runtime, 0);
  6107. init_hrtick();
  6108. /* Move init over to a non-isolated CPU */
  6109. if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0)
  6110. BUG();
  6111. sched_init_granularity();
  6112. free_cpumask_var(non_isolated_cpus);
  6113. init_sched_rt_class();
  6114. }
  6115. #else
  6116. void __init sched_init_smp(void)
  6117. {
  6118. sched_init_granularity();
  6119. }
  6120. #endif /* CONFIG_SMP */
  6121. const_debug unsigned int sysctl_timer_migration = 1;
  6122. int in_sched_functions(unsigned long addr)
  6123. {
  6124. return in_lock_functions(addr) ||
  6125. (addr >= (unsigned long)__sched_text_start
  6126. && addr < (unsigned long)__sched_text_end);
  6127. }
  6128. #ifdef CONFIG_CGROUP_SCHED
  6129. struct task_group root_task_group;
  6130. LIST_HEAD(task_groups);
  6131. #endif
  6132. DECLARE_PER_CPU(cpumask_var_t, load_balance_tmpmask);
  6133. void __init sched_init(void)
  6134. {
  6135. int i, j;
  6136. unsigned long alloc_size = 0, ptr;
  6137. #ifdef CONFIG_FAIR_GROUP_SCHED
  6138. alloc_size += 2 * nr_cpu_ids * sizeof(void **);
  6139. #endif
  6140. #ifdef CONFIG_RT_GROUP_SCHED
  6141. alloc_size += 2 * nr_cpu_ids * sizeof(void **);
  6142. #endif
  6143. #ifdef CONFIG_CPUMASK_OFFSTACK
  6144. alloc_size += num_possible_cpus() * cpumask_size();
  6145. #endif
  6146. if (alloc_size) {
  6147. ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
  6148. #ifdef CONFIG_FAIR_GROUP_SCHED
  6149. root_task_group.se = (struct sched_entity **)ptr;
  6150. ptr += nr_cpu_ids * sizeof(void **);
  6151. root_task_group.cfs_rq = (struct cfs_rq **)ptr;
  6152. ptr += nr_cpu_ids * sizeof(void **);
  6153. #endif /* CONFIG_FAIR_GROUP_SCHED */
  6154. #ifdef CONFIG_RT_GROUP_SCHED
  6155. root_task_group.rt_se = (struct sched_rt_entity **)ptr;
  6156. ptr += nr_cpu_ids * sizeof(void **);
  6157. root_task_group.rt_rq = (struct rt_rq **)ptr;
  6158. ptr += nr_cpu_ids * sizeof(void **);
  6159. #endif /* CONFIG_RT_GROUP_SCHED */
  6160. #ifdef CONFIG_CPUMASK_OFFSTACK
  6161. for_each_possible_cpu(i) {
  6162. per_cpu(load_balance_tmpmask, i) = (void *)ptr;
  6163. ptr += cpumask_size();
  6164. }
  6165. #endif /* CONFIG_CPUMASK_OFFSTACK */
  6166. }
  6167. #ifdef CONFIG_SMP
  6168. init_defrootdomain();
  6169. #endif
  6170. init_rt_bandwidth(&def_rt_bandwidth,
  6171. global_rt_period(), global_rt_runtime());
  6172. #ifdef CONFIG_RT_GROUP_SCHED
  6173. init_rt_bandwidth(&root_task_group.rt_bandwidth,
  6174. global_rt_period(), global_rt_runtime());
  6175. #endif /* CONFIG_RT_GROUP_SCHED */
  6176. #ifdef CONFIG_CGROUP_SCHED
  6177. list_add(&root_task_group.list, &task_groups);
  6178. INIT_LIST_HEAD(&root_task_group.children);
  6179. INIT_LIST_HEAD(&root_task_group.siblings);
  6180. autogroup_init(&init_task);
  6181. #endif /* CONFIG_CGROUP_SCHED */
  6182. #ifdef CONFIG_CGROUP_CPUACCT
  6183. root_cpuacct.cpustat = &kernel_cpustat;
  6184. root_cpuacct.cpuusage = alloc_percpu(u64);
  6185. /* Too early, not expected to fail */
  6186. BUG_ON(!root_cpuacct.cpuusage);
  6187. #endif
  6188. for_each_possible_cpu(i) {
  6189. struct rq *rq;
  6190. rq = cpu_rq(i);
  6191. raw_spin_lock_init(&rq->lock);
  6192. rq->nr_running = 0;
  6193. rq->calc_load_active = 0;
  6194. rq->calc_load_update = jiffies + LOAD_FREQ;
  6195. init_cfs_rq(&rq->cfs);
  6196. init_rt_rq(&rq->rt, rq);
  6197. #ifdef CONFIG_FAIR_GROUP_SCHED
  6198. root_task_group.shares = ROOT_TASK_GROUP_LOAD;
  6199. INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
  6200. /*
  6201. * How much cpu bandwidth does root_task_group get?
  6202. *
  6203. * In case of task-groups formed thr' the cgroup filesystem, it
  6204. * gets 100% of the cpu resources in the system. This overall
  6205. * system cpu resource is divided among the tasks of
  6206. * root_task_group and its child task-groups in a fair manner,
  6207. * based on each entity's (task or task-group's) weight
  6208. * (se->load.weight).
  6209. *
  6210. * In other words, if root_task_group has 10 tasks of weight
  6211. * 1024) and two child groups A0 and A1 (of weight 1024 each),
  6212. * then A0's share of the cpu resource is:
  6213. *
  6214. * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
  6215. *
  6216. * We achieve this by letting root_task_group's tasks sit
  6217. * directly in rq->cfs (i.e root_task_group->se[] = NULL).
  6218. */
  6219. init_cfs_bandwidth(&root_task_group.cfs_bandwidth);
  6220. init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL);
  6221. #endif /* CONFIG_FAIR_GROUP_SCHED */
  6222. rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
  6223. #ifdef CONFIG_RT_GROUP_SCHED
  6224. INIT_LIST_HEAD(&rq->leaf_rt_rq_list);
  6225. init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
  6226. #endif
  6227. for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
  6228. rq->cpu_load[j] = 0;
  6229. rq->last_load_update_tick = jiffies;
  6230. #ifdef CONFIG_SMP
  6231. rq->sd = NULL;
  6232. rq->rd = NULL;
  6233. rq->cpu_power = SCHED_POWER_SCALE;
  6234. rq->post_schedule = 0;
  6235. rq->active_balance = 0;
  6236. rq->next_balance = jiffies;
  6237. rq->push_cpu = 0;
  6238. rq->cpu = i;
  6239. rq->online = 0;
  6240. rq->idle_stamp = 0;
  6241. rq->avg_idle = 2*sysctl_sched_migration_cost;
  6242. INIT_LIST_HEAD(&rq->cfs_tasks);
  6243. rq_attach_root(rq, &def_root_domain);
  6244. #ifdef CONFIG_NO_HZ
  6245. rq->nohz_flags = 0;
  6246. #endif
  6247. #endif
  6248. init_rq_hrtick(rq);
  6249. atomic_set(&rq->nr_iowait, 0);
  6250. }
  6251. set_load_weight(&init_task);
  6252. #ifdef CONFIG_PREEMPT_NOTIFIERS
  6253. INIT_HLIST_HEAD(&init_task.preempt_notifiers);
  6254. #endif
  6255. #ifdef CONFIG_RT_MUTEXES
  6256. plist_head_init(&init_task.pi_waiters);
  6257. #endif
  6258. /*
  6259. * The boot idle thread does lazy MMU switching as well:
  6260. */
  6261. atomic_inc(&init_mm.mm_count);
  6262. enter_lazy_tlb(&init_mm, current);
  6263. /*
  6264. * Make us the idle thread. Technically, schedule() should not be
  6265. * called from this thread, however somewhere below it might be,
  6266. * but because we are the idle thread, we just pick up running again
  6267. * when this runqueue becomes "idle".
  6268. */
  6269. init_idle(current, smp_processor_id());
  6270. calc_load_update = jiffies + LOAD_FREQ;
  6271. /*
  6272. * During early bootup we pretend to be a normal task:
  6273. */
  6274. current->sched_class = &fair_sched_class;
  6275. #ifdef CONFIG_SMP
  6276. zalloc_cpumask_var(&sched_domains_tmpmask, GFP_NOWAIT);
  6277. /* May be allocated at isolcpus cmdline parse time */
  6278. if (cpu_isolated_map == NULL)
  6279. zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
  6280. idle_thread_set_boot_cpu();
  6281. #endif
  6282. init_sched_fair_class();
  6283. scheduler_running = 1;
  6284. }
  6285. #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
  6286. static inline int preempt_count_equals(int preempt_offset)
  6287. {
  6288. int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth();
  6289. return (nested == preempt_offset);
  6290. }
  6291. void __might_sleep(const char *file, int line, int preempt_offset)
  6292. {
  6293. static unsigned long prev_jiffy; /* ratelimiting */
  6294. rcu_sleep_check(); /* WARN_ON_ONCE() by default, no rate limit reqd. */
  6295. if ((preempt_count_equals(preempt_offset) && !irqs_disabled()) ||
  6296. system_state != SYSTEM_RUNNING || oops_in_progress)
  6297. return;
  6298. if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
  6299. return;
  6300. prev_jiffy = jiffies;
  6301. printk(KERN_ERR
  6302. "BUG: sleeping function called from invalid context at %s:%d\n",
  6303. file, line);
  6304. printk(KERN_ERR
  6305. "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
  6306. in_atomic(), irqs_disabled(),
  6307. current->pid, current->comm);
  6308. debug_show_held_locks(current);
  6309. if (irqs_disabled())
  6310. print_irqtrace_events(current);
  6311. dump_stack();
  6312. }
  6313. EXPORT_SYMBOL(__might_sleep);
  6314. #endif
  6315. #ifdef CONFIG_MAGIC_SYSRQ
  6316. static void normalize_task(struct rq *rq, struct task_struct *p)
  6317. {
  6318. const struct sched_class *prev_class = p->sched_class;
  6319. int old_prio = p->prio;
  6320. int on_rq;
  6321. on_rq = p->on_rq;
  6322. if (on_rq)
  6323. dequeue_task(rq, p, 0);
  6324. __setscheduler(rq, p, SCHED_NORMAL, 0);
  6325. if (on_rq) {
  6326. enqueue_task(rq, p, 0);
  6327. resched_task(rq->curr);
  6328. }
  6329. check_class_changed(rq, p, prev_class, old_prio);
  6330. }
  6331. void normalize_rt_tasks(void)
  6332. {
  6333. struct task_struct *g, *p;
  6334. unsigned long flags;
  6335. struct rq *rq;
  6336. read_lock_irqsave(&tasklist_lock, flags);
  6337. do_each_thread(g, p) {
  6338. /*
  6339. * Only normalize user tasks:
  6340. */
  6341. if (!p->mm)
  6342. continue;
  6343. p->se.exec_start = 0;
  6344. #ifdef CONFIG_SCHEDSTATS
  6345. p->se.statistics.wait_start = 0;
  6346. p->se.statistics.sleep_start = 0;
  6347. p->se.statistics.block_start = 0;
  6348. #endif
  6349. if (!rt_task(p)) {
  6350. /*
  6351. * Renice negative nice level userspace
  6352. * tasks back to 0:
  6353. */
  6354. if (TASK_NICE(p) < 0 && p->mm)
  6355. set_user_nice(p, 0);
  6356. continue;
  6357. }
  6358. raw_spin_lock(&p->pi_lock);
  6359. rq = __task_rq_lock(p);
  6360. normalize_task(rq, p);
  6361. __task_rq_unlock(rq);
  6362. raw_spin_unlock(&p->pi_lock);
  6363. } while_each_thread(g, p);
  6364. read_unlock_irqrestore(&tasklist_lock, flags);
  6365. }
  6366. #endif /* CONFIG_MAGIC_SYSRQ */
  6367. #if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB)
  6368. /*
  6369. * These functions are only useful for the IA64 MCA handling, or kdb.
  6370. *
  6371. * They can only be called when the whole system has been
  6372. * stopped - every CPU needs to be quiescent, and no scheduling
  6373. * activity can take place. Using them for anything else would
  6374. * be a serious bug, and as a result, they aren't even visible
  6375. * under any other configuration.
  6376. */
  6377. /**
  6378. * curr_task - return the current task for a given cpu.
  6379. * @cpu: the processor in question.
  6380. *
  6381. * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
  6382. */
  6383. struct task_struct *curr_task(int cpu)
  6384. {
  6385. return cpu_curr(cpu);
  6386. }
  6387. #endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */
  6388. #ifdef CONFIG_IA64
  6389. /**
  6390. * set_curr_task - set the current task for a given cpu.
  6391. * @cpu: the processor in question.
  6392. * @p: the task pointer to set.
  6393. *
  6394. * Description: This function must only be used when non-maskable interrupts
  6395. * are serviced on a separate stack. It allows the architecture to switch the
  6396. * notion of the current task on a cpu in a non-blocking manner. This function
  6397. * must be called with all CPU's synchronized, and interrupts disabled, the
  6398. * and caller must save the original value of the current task (see
  6399. * curr_task() above) and restore that value before reenabling interrupts and
  6400. * re-starting the system.
  6401. *
  6402. * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
  6403. */
  6404. void set_curr_task(int cpu, struct task_struct *p)
  6405. {
  6406. cpu_curr(cpu) = p;
  6407. }
  6408. #endif
  6409. #ifdef CONFIG_CGROUP_SCHED
  6410. /* task_group_lock serializes the addition/removal of task groups */
  6411. static DEFINE_SPINLOCK(task_group_lock);
  6412. static void free_sched_group(struct task_group *tg)
  6413. {
  6414. free_fair_sched_group(tg);
  6415. free_rt_sched_group(tg);
  6416. autogroup_free(tg);
  6417. kfree(tg);
  6418. }
  6419. /* allocate runqueue etc for a new task group */
  6420. struct task_group *sched_create_group(struct task_group *parent)
  6421. {
  6422. struct task_group *tg;
  6423. unsigned long flags;
  6424. tg = kzalloc(sizeof(*tg), GFP_KERNEL);
  6425. if (!tg)
  6426. return ERR_PTR(-ENOMEM);
  6427. if (!alloc_fair_sched_group(tg, parent))
  6428. goto err;
  6429. if (!alloc_rt_sched_group(tg, parent))
  6430. goto err;
  6431. spin_lock_irqsave(&task_group_lock, flags);
  6432. list_add_rcu(&tg->list, &task_groups);
  6433. WARN_ON(!parent); /* root should already exist */
  6434. tg->parent = parent;
  6435. INIT_LIST_HEAD(&tg->children);
  6436. list_add_rcu(&tg->siblings, &parent->children);
  6437. spin_unlock_irqrestore(&task_group_lock, flags);
  6438. return tg;
  6439. err:
  6440. free_sched_group(tg);
  6441. return ERR_PTR(-ENOMEM);
  6442. }
  6443. /* rcu callback to free various structures associated with a task group */
  6444. static void free_sched_group_rcu(struct rcu_head *rhp)
  6445. {
  6446. /* now it should be safe to free those cfs_rqs */
  6447. free_sched_group(container_of(rhp, struct task_group, rcu));
  6448. }
  6449. /* Destroy runqueue etc associated with a task group */
  6450. void sched_destroy_group(struct task_group *tg)
  6451. {
  6452. unsigned long flags;
  6453. int i;
  6454. /* end participation in shares distribution */
  6455. for_each_possible_cpu(i)
  6456. unregister_fair_sched_group(tg, i);
  6457. spin_lock_irqsave(&task_group_lock, flags);
  6458. list_del_rcu(&tg->list);
  6459. list_del_rcu(&tg->siblings);
  6460. spin_unlock_irqrestore(&task_group_lock, flags);
  6461. /* wait for possible concurrent references to cfs_rqs complete */
  6462. call_rcu(&tg->rcu, free_sched_group_rcu);
  6463. }
  6464. /* change task's runqueue when it moves between groups.
  6465. * The caller of this function should have put the task in its new group
  6466. * by now. This function just updates tsk->se.cfs_rq and tsk->se.parent to
  6467. * reflect its new group.
  6468. */
  6469. void sched_move_task(struct task_struct *tsk)
  6470. {
  6471. struct task_group *tg;
  6472. int on_rq, running;
  6473. unsigned long flags;
  6474. struct rq *rq;
  6475. rq = task_rq_lock(tsk, &flags);
  6476. running = task_current(rq, tsk);
  6477. on_rq = tsk->on_rq;
  6478. if (on_rq)
  6479. dequeue_task(rq, tsk, 0);
  6480. if (unlikely(running))
  6481. tsk->sched_class->put_prev_task(rq, tsk);
  6482. tg = container_of(task_subsys_state_check(tsk, cpu_cgroup_subsys_id,
  6483. lockdep_is_held(&tsk->sighand->siglock)),
  6484. struct task_group, css);
  6485. tg = autogroup_task_group(tsk, tg);
  6486. tsk->sched_task_group = tg;
  6487. #ifdef CONFIG_FAIR_GROUP_SCHED
  6488. if (tsk->sched_class->task_move_group)
  6489. tsk->sched_class->task_move_group(tsk, on_rq);
  6490. else
  6491. #endif
  6492. set_task_rq(tsk, task_cpu(tsk));
  6493. if (unlikely(running))
  6494. tsk->sched_class->set_curr_task(rq);
  6495. if (on_rq)
  6496. enqueue_task(rq, tsk, 0);
  6497. task_rq_unlock(rq, tsk, &flags);
  6498. }
  6499. #endif /* CONFIG_CGROUP_SCHED */
  6500. #if defined(CONFIG_RT_GROUP_SCHED) || defined(CONFIG_CFS_BANDWIDTH)
  6501. static unsigned long to_ratio(u64 period, u64 runtime)
  6502. {
  6503. if (runtime == RUNTIME_INF)
  6504. return 1ULL << 20;
  6505. return div64_u64(runtime << 20, period);
  6506. }
  6507. #endif
  6508. #ifdef CONFIG_RT_GROUP_SCHED
  6509. /*
  6510. * Ensure that the real time constraints are schedulable.
  6511. */
  6512. static DEFINE_MUTEX(rt_constraints_mutex);
  6513. /* Must be called with tasklist_lock held */
  6514. static inline int tg_has_rt_tasks(struct task_group *tg)
  6515. {
  6516. struct task_struct *g, *p;
  6517. do_each_thread(g, p) {
  6518. if (rt_task(p) && task_rq(p)->rt.tg == tg)
  6519. return 1;
  6520. } while_each_thread(g, p);
  6521. return 0;
  6522. }
  6523. struct rt_schedulable_data {
  6524. struct task_group *tg;
  6525. u64 rt_period;
  6526. u64 rt_runtime;
  6527. };
  6528. static int tg_rt_schedulable(struct task_group *tg, void *data)
  6529. {
  6530. struct rt_schedulable_data *d = data;
  6531. struct task_group *child;
  6532. unsigned long total, sum = 0;
  6533. u64 period, runtime;
  6534. period = ktime_to_ns(tg->rt_bandwidth.rt_period);
  6535. runtime = tg->rt_bandwidth.rt_runtime;
  6536. if (tg == d->tg) {
  6537. period = d->rt_period;
  6538. runtime = d->rt_runtime;
  6539. }
  6540. /*
  6541. * Cannot have more runtime than the period.
  6542. */
  6543. if (runtime > period && runtime != RUNTIME_INF)
  6544. return -EINVAL;
  6545. /*
  6546. * Ensure we don't starve existing RT tasks.
  6547. */
  6548. if (rt_bandwidth_enabled() && !runtime && tg_has_rt_tasks(tg))
  6549. return -EBUSY;
  6550. total = to_ratio(period, runtime);
  6551. /*
  6552. * Nobody can have more than the global setting allows.
  6553. */
  6554. if (total > to_ratio(global_rt_period(), global_rt_runtime()))
  6555. return -EINVAL;
  6556. /*
  6557. * The sum of our children's runtime should not exceed our own.
  6558. */
  6559. list_for_each_entry_rcu(child, &tg->children, siblings) {
  6560. period = ktime_to_ns(child->rt_bandwidth.rt_period);
  6561. runtime = child->rt_bandwidth.rt_runtime;
  6562. if (child == d->tg) {
  6563. period = d->rt_period;
  6564. runtime = d->rt_runtime;
  6565. }
  6566. sum += to_ratio(period, runtime);
  6567. }
  6568. if (sum > total)
  6569. return -EINVAL;
  6570. return 0;
  6571. }
  6572. static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
  6573. {
  6574. int ret;
  6575. struct rt_schedulable_data data = {
  6576. .tg = tg,
  6577. .rt_period = period,
  6578. .rt_runtime = runtime,
  6579. };
  6580. rcu_read_lock();
  6581. ret = walk_tg_tree(tg_rt_schedulable, tg_nop, &data);
  6582. rcu_read_unlock();
  6583. return ret;
  6584. }
  6585. static int tg_set_rt_bandwidth(struct task_group *tg,
  6586. u64 rt_period, u64 rt_runtime)
  6587. {
  6588. int i, err = 0;
  6589. mutex_lock(&rt_constraints_mutex);
  6590. read_lock(&tasklist_lock);
  6591. err = __rt_schedulable(tg, rt_period, rt_runtime);
  6592. if (err)
  6593. goto unlock;
  6594. raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
  6595. tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
  6596. tg->rt_bandwidth.rt_runtime = rt_runtime;
  6597. for_each_possible_cpu(i) {
  6598. struct rt_rq *rt_rq = tg->rt_rq[i];
  6599. raw_spin_lock(&rt_rq->rt_runtime_lock);
  6600. rt_rq->rt_runtime = rt_runtime;
  6601. raw_spin_unlock(&rt_rq->rt_runtime_lock);
  6602. }
  6603. raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
  6604. unlock:
  6605. read_unlock(&tasklist_lock);
  6606. mutex_unlock(&rt_constraints_mutex);
  6607. return err;
  6608. }
  6609. int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
  6610. {
  6611. u64 rt_runtime, rt_period;
  6612. rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
  6613. rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
  6614. if (rt_runtime_us < 0)
  6615. rt_runtime = RUNTIME_INF;
  6616. return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
  6617. }
  6618. long sched_group_rt_runtime(struct task_group *tg)
  6619. {
  6620. u64 rt_runtime_us;
  6621. if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF)
  6622. return -1;
  6623. rt_runtime_us = tg->rt_bandwidth.rt_runtime;
  6624. do_div(rt_runtime_us, NSEC_PER_USEC);
  6625. return rt_runtime_us;
  6626. }
  6627. int sched_group_set_rt_period(struct task_group *tg, long rt_period_us)
  6628. {
  6629. u64 rt_runtime, rt_period;
  6630. rt_period = (u64)rt_period_us * NSEC_PER_USEC;
  6631. rt_runtime = tg->rt_bandwidth.rt_runtime;
  6632. if (rt_period == 0)
  6633. return -EINVAL;
  6634. return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
  6635. }
  6636. long sched_group_rt_period(struct task_group *tg)
  6637. {
  6638. u64 rt_period_us;
  6639. rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period);
  6640. do_div(rt_period_us, NSEC_PER_USEC);
  6641. return rt_period_us;
  6642. }
  6643. static int sched_rt_global_constraints(void)
  6644. {
  6645. u64 runtime, period;
  6646. int ret = 0;
  6647. if (sysctl_sched_rt_period <= 0)
  6648. return -EINVAL;
  6649. runtime = global_rt_runtime();
  6650. period = global_rt_period();
  6651. /*
  6652. * Sanity check on the sysctl variables.
  6653. */
  6654. if (runtime > period && runtime != RUNTIME_INF)
  6655. return -EINVAL;
  6656. mutex_lock(&rt_constraints_mutex);
  6657. read_lock(&tasklist_lock);
  6658. ret = __rt_schedulable(NULL, 0, 0);
  6659. read_unlock(&tasklist_lock);
  6660. mutex_unlock(&rt_constraints_mutex);
  6661. return ret;
  6662. }
  6663. int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
  6664. {
  6665. /* Don't accept realtime tasks when there is no way for them to run */
  6666. if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
  6667. return 0;
  6668. return 1;
  6669. }
  6670. #else /* !CONFIG_RT_GROUP_SCHED */
  6671. static int sched_rt_global_constraints(void)
  6672. {
  6673. unsigned long flags;
  6674. int i;
  6675. if (sysctl_sched_rt_period <= 0)
  6676. return -EINVAL;
  6677. /*
  6678. * There's always some RT tasks in the root group
  6679. * -- migration, kstopmachine etc..
  6680. */
  6681. if (sysctl_sched_rt_runtime == 0)
  6682. return -EBUSY;
  6683. raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
  6684. for_each_possible_cpu(i) {
  6685. struct rt_rq *rt_rq = &cpu_rq(i)->rt;
  6686. raw_spin_lock(&rt_rq->rt_runtime_lock);
  6687. rt_rq->rt_runtime = global_rt_runtime();
  6688. raw_spin_unlock(&rt_rq->rt_runtime_lock);
  6689. }
  6690. raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
  6691. return 0;
  6692. }
  6693. #endif /* CONFIG_RT_GROUP_SCHED */
  6694. int sched_rt_handler(struct ctl_table *table, int write,
  6695. void __user *buffer, size_t *lenp,
  6696. loff_t *ppos)
  6697. {
  6698. int ret;
  6699. int old_period, old_runtime;
  6700. static DEFINE_MUTEX(mutex);
  6701. mutex_lock(&mutex);
  6702. old_period = sysctl_sched_rt_period;
  6703. old_runtime = sysctl_sched_rt_runtime;
  6704. ret = proc_dointvec(table, write, buffer, lenp, ppos);
  6705. if (!ret && write) {
  6706. ret = sched_rt_global_constraints();
  6707. if (ret) {
  6708. sysctl_sched_rt_period = old_period;
  6709. sysctl_sched_rt_runtime = old_runtime;
  6710. } else {
  6711. def_rt_bandwidth.rt_runtime = global_rt_runtime();
  6712. def_rt_bandwidth.rt_period =
  6713. ns_to_ktime(global_rt_period());
  6714. }
  6715. }
  6716. mutex_unlock(&mutex);
  6717. return ret;
  6718. }
  6719. #ifdef CONFIG_CGROUP_SCHED
  6720. /* return corresponding task_group object of a cgroup */
  6721. static inline struct task_group *cgroup_tg(struct cgroup *cgrp)
  6722. {
  6723. return container_of(cgroup_subsys_state(cgrp, cpu_cgroup_subsys_id),
  6724. struct task_group, css);
  6725. }
  6726. static struct cgroup_subsys_state *cpu_cgroup_create(struct cgroup *cgrp)
  6727. {
  6728. struct task_group *tg, *parent;
  6729. if (!cgrp->parent) {
  6730. /* This is early initialization for the top cgroup */
  6731. return &root_task_group.css;
  6732. }
  6733. parent = cgroup_tg(cgrp->parent);
  6734. tg = sched_create_group(parent);
  6735. if (IS_ERR(tg))
  6736. return ERR_PTR(-ENOMEM);
  6737. return &tg->css;
  6738. }
  6739. static void cpu_cgroup_destroy(struct cgroup *cgrp)
  6740. {
  6741. struct task_group *tg = cgroup_tg(cgrp);
  6742. sched_destroy_group(tg);
  6743. }
  6744. static int cpu_cgroup_can_attach(struct cgroup *cgrp,
  6745. struct cgroup_taskset *tset)
  6746. {
  6747. struct task_struct *task;
  6748. cgroup_taskset_for_each(task, cgrp, tset) {
  6749. #ifdef CONFIG_RT_GROUP_SCHED
  6750. if (!sched_rt_can_attach(cgroup_tg(cgrp), task))
  6751. return -EINVAL;
  6752. #else
  6753. /* We don't support RT-tasks being in separate groups */
  6754. if (task->sched_class != &fair_sched_class)
  6755. return -EINVAL;
  6756. #endif
  6757. }
  6758. return 0;
  6759. }
  6760. static void cpu_cgroup_attach(struct cgroup *cgrp,
  6761. struct cgroup_taskset *tset)
  6762. {
  6763. struct task_struct *task;
  6764. cgroup_taskset_for_each(task, cgrp, tset)
  6765. sched_move_task(task);
  6766. }
  6767. static void
  6768. cpu_cgroup_exit(struct cgroup *cgrp, struct cgroup *old_cgrp,
  6769. struct task_struct *task)
  6770. {
  6771. /*
  6772. * cgroup_exit() is called in the copy_process() failure path.
  6773. * Ignore this case since the task hasn't ran yet, this avoids
  6774. * trying to poke a half freed task state from generic code.
  6775. */
  6776. if (!(task->flags & PF_EXITING))
  6777. return;
  6778. sched_move_task(task);
  6779. }
  6780. #ifdef CONFIG_FAIR_GROUP_SCHED
  6781. static int cpu_shares_write_u64(struct cgroup *cgrp, struct cftype *cftype,
  6782. u64 shareval)
  6783. {
  6784. return sched_group_set_shares(cgroup_tg(cgrp), scale_load(shareval));
  6785. }
  6786. static u64 cpu_shares_read_u64(struct cgroup *cgrp, struct cftype *cft)
  6787. {
  6788. struct task_group *tg = cgroup_tg(cgrp);
  6789. return (u64) scale_load_down(tg->shares);
  6790. }
  6791. #ifdef CONFIG_CFS_BANDWIDTH
  6792. static DEFINE_MUTEX(cfs_constraints_mutex);
  6793. const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */
  6794. const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */
  6795. static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime);
  6796. static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
  6797. {
  6798. int i, ret = 0, runtime_enabled, runtime_was_enabled;
  6799. struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
  6800. if (tg == &root_task_group)
  6801. return -EINVAL;
  6802. /*
  6803. * Ensure we have at some amount of bandwidth every period. This is
  6804. * to prevent reaching a state of large arrears when throttled via
  6805. * entity_tick() resulting in prolonged exit starvation.
  6806. */
  6807. if (quota < min_cfs_quota_period || period < min_cfs_quota_period)
  6808. return -EINVAL;
  6809. /*
  6810. * Likewise, bound things on the otherside by preventing insane quota
  6811. * periods. This also allows us to normalize in computing quota
  6812. * feasibility.
  6813. */
  6814. if (period > max_cfs_quota_period)
  6815. return -EINVAL;
  6816. mutex_lock(&cfs_constraints_mutex);
  6817. ret = __cfs_schedulable(tg, period, quota);
  6818. if (ret)
  6819. goto out_unlock;
  6820. runtime_enabled = quota != RUNTIME_INF;
  6821. runtime_was_enabled = cfs_b->quota != RUNTIME_INF;
  6822. account_cfs_bandwidth_used(runtime_enabled, runtime_was_enabled);
  6823. raw_spin_lock_irq(&cfs_b->lock);
  6824. cfs_b->period = ns_to_ktime(period);
  6825. cfs_b->quota = quota;
  6826. __refill_cfs_bandwidth_runtime(cfs_b);
  6827. /* restart the period timer (if active) to handle new period expiry */
  6828. if (runtime_enabled && cfs_b->timer_active) {
  6829. /* force a reprogram */
  6830. cfs_b->timer_active = 0;
  6831. __start_cfs_bandwidth(cfs_b);
  6832. }
  6833. raw_spin_unlock_irq(&cfs_b->lock);
  6834. for_each_possible_cpu(i) {
  6835. struct cfs_rq *cfs_rq = tg->cfs_rq[i];
  6836. struct rq *rq = cfs_rq->rq;
  6837. raw_spin_lock_irq(&rq->lock);
  6838. cfs_rq->runtime_enabled = runtime_enabled;
  6839. cfs_rq->runtime_remaining = 0;
  6840. if (cfs_rq->throttled)
  6841. unthrottle_cfs_rq(cfs_rq);
  6842. raw_spin_unlock_irq(&rq->lock);
  6843. }
  6844. out_unlock:
  6845. mutex_unlock(&cfs_constraints_mutex);
  6846. return ret;
  6847. }
  6848. int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us)
  6849. {
  6850. u64 quota, period;
  6851. period = ktime_to_ns(tg->cfs_bandwidth.period);
  6852. if (cfs_quota_us < 0)
  6853. quota = RUNTIME_INF;
  6854. else
  6855. quota = (u64)cfs_quota_us * NSEC_PER_USEC;
  6856. return tg_set_cfs_bandwidth(tg, period, quota);
  6857. }
  6858. long tg_get_cfs_quota(struct task_group *tg)
  6859. {
  6860. u64 quota_us;
  6861. if (tg->cfs_bandwidth.quota == RUNTIME_INF)
  6862. return -1;
  6863. quota_us = tg->cfs_bandwidth.quota;
  6864. do_div(quota_us, NSEC_PER_USEC);
  6865. return quota_us;
  6866. }
  6867. int tg_set_cfs_period(struct task_group *tg, long cfs_period_us)
  6868. {
  6869. u64 quota, period;
  6870. period = (u64)cfs_period_us * NSEC_PER_USEC;
  6871. quota = tg->cfs_bandwidth.quota;
  6872. return tg_set_cfs_bandwidth(tg, period, quota);
  6873. }
  6874. long tg_get_cfs_period(struct task_group *tg)
  6875. {
  6876. u64 cfs_period_us;
  6877. cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period);
  6878. do_div(cfs_period_us, NSEC_PER_USEC);
  6879. return cfs_period_us;
  6880. }
  6881. static s64 cpu_cfs_quota_read_s64(struct cgroup *cgrp, struct cftype *cft)
  6882. {
  6883. return tg_get_cfs_quota(cgroup_tg(cgrp));
  6884. }
  6885. static int cpu_cfs_quota_write_s64(struct cgroup *cgrp, struct cftype *cftype,
  6886. s64 cfs_quota_us)
  6887. {
  6888. return tg_set_cfs_quota(cgroup_tg(cgrp), cfs_quota_us);
  6889. }
  6890. static u64 cpu_cfs_period_read_u64(struct cgroup *cgrp, struct cftype *cft)
  6891. {
  6892. return tg_get_cfs_period(cgroup_tg(cgrp));
  6893. }
  6894. static int cpu_cfs_period_write_u64(struct cgroup *cgrp, struct cftype *cftype,
  6895. u64 cfs_period_us)
  6896. {
  6897. return tg_set_cfs_period(cgroup_tg(cgrp), cfs_period_us);
  6898. }
  6899. struct cfs_schedulable_data {
  6900. struct task_group *tg;
  6901. u64 period, quota;
  6902. };
  6903. /*
  6904. * normalize group quota/period to be quota/max_period
  6905. * note: units are usecs
  6906. */
  6907. static u64 normalize_cfs_quota(struct task_group *tg,
  6908. struct cfs_schedulable_data *d)
  6909. {
  6910. u64 quota, period;
  6911. if (tg == d->tg) {
  6912. period = d->period;
  6913. quota = d->quota;
  6914. } else {
  6915. period = tg_get_cfs_period(tg);
  6916. quota = tg_get_cfs_quota(tg);
  6917. }
  6918. /* note: these should typically be equivalent */
  6919. if (quota == RUNTIME_INF || quota == -1)
  6920. return RUNTIME_INF;
  6921. return to_ratio(period, quota);
  6922. }
  6923. static int tg_cfs_schedulable_down(struct task_group *tg, void *data)
  6924. {
  6925. struct cfs_schedulable_data *d = data;
  6926. struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
  6927. s64 quota = 0, parent_quota = -1;
  6928. if (!tg->parent) {
  6929. quota = RUNTIME_INF;
  6930. } else {
  6931. struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth;
  6932. quota = normalize_cfs_quota(tg, d);
  6933. parent_quota = parent_b->hierarchal_quota;
  6934. /*
  6935. * ensure max(child_quota) <= parent_quota, inherit when no
  6936. * limit is set
  6937. */
  6938. if (quota == RUNTIME_INF)
  6939. quota = parent_quota;
  6940. else if (parent_quota != RUNTIME_INF && quota > parent_quota)
  6941. return -EINVAL;
  6942. }
  6943. cfs_b->hierarchal_quota = quota;
  6944. return 0;
  6945. }
  6946. static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota)
  6947. {
  6948. int ret;
  6949. struct cfs_schedulable_data data = {
  6950. .tg = tg,
  6951. .period = period,
  6952. .quota = quota,
  6953. };
  6954. if (quota != RUNTIME_INF) {
  6955. do_div(data.period, NSEC_PER_USEC);
  6956. do_div(data.quota, NSEC_PER_USEC);
  6957. }
  6958. rcu_read_lock();
  6959. ret = walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data);
  6960. rcu_read_unlock();
  6961. return ret;
  6962. }
  6963. static int cpu_stats_show(struct cgroup *cgrp, struct cftype *cft,
  6964. struct cgroup_map_cb *cb)
  6965. {
  6966. struct task_group *tg = cgroup_tg(cgrp);
  6967. struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
  6968. cb->fill(cb, "nr_periods", cfs_b->nr_periods);
  6969. cb->fill(cb, "nr_throttled", cfs_b->nr_throttled);
  6970. cb->fill(cb, "throttled_time", cfs_b->throttled_time);
  6971. return 0;
  6972. }
  6973. #endif /* CONFIG_CFS_BANDWIDTH */
  6974. #endif /* CONFIG_FAIR_GROUP_SCHED */
  6975. #ifdef CONFIG_RT_GROUP_SCHED
  6976. static int cpu_rt_runtime_write(struct cgroup *cgrp, struct cftype *cft,
  6977. s64 val)
  6978. {
  6979. return sched_group_set_rt_runtime(cgroup_tg(cgrp), val);
  6980. }
  6981. static s64 cpu_rt_runtime_read(struct cgroup *cgrp, struct cftype *cft)
  6982. {
  6983. return sched_group_rt_runtime(cgroup_tg(cgrp));
  6984. }
  6985. static int cpu_rt_period_write_uint(struct cgroup *cgrp, struct cftype *cftype,
  6986. u64 rt_period_us)
  6987. {
  6988. return sched_group_set_rt_period(cgroup_tg(cgrp), rt_period_us);
  6989. }
  6990. static u64 cpu_rt_period_read_uint(struct cgroup *cgrp, struct cftype *cft)
  6991. {
  6992. return sched_group_rt_period(cgroup_tg(cgrp));
  6993. }
  6994. #endif /* CONFIG_RT_GROUP_SCHED */
  6995. static struct cftype cpu_files[] = {
  6996. #ifdef CONFIG_FAIR_GROUP_SCHED
  6997. {
  6998. .name = "shares",
  6999. .read_u64 = cpu_shares_read_u64,
  7000. .write_u64 = cpu_shares_write_u64,
  7001. },
  7002. #endif
  7003. #ifdef CONFIG_CFS_BANDWIDTH
  7004. {
  7005. .name = "cfs_quota_us",
  7006. .read_s64 = cpu_cfs_quota_read_s64,
  7007. .write_s64 = cpu_cfs_quota_write_s64,
  7008. },
  7009. {
  7010. .name = "cfs_period_us",
  7011. .read_u64 = cpu_cfs_period_read_u64,
  7012. .write_u64 = cpu_cfs_period_write_u64,
  7013. },
  7014. {
  7015. .name = "stat",
  7016. .read_map = cpu_stats_show,
  7017. },
  7018. #endif
  7019. #ifdef CONFIG_RT_GROUP_SCHED
  7020. {
  7021. .name = "rt_runtime_us",
  7022. .read_s64 = cpu_rt_runtime_read,
  7023. .write_s64 = cpu_rt_runtime_write,
  7024. },
  7025. {
  7026. .name = "rt_period_us",
  7027. .read_u64 = cpu_rt_period_read_uint,
  7028. .write_u64 = cpu_rt_period_write_uint,
  7029. },
  7030. #endif
  7031. { } /* terminate */
  7032. };
  7033. struct cgroup_subsys cpu_cgroup_subsys = {
  7034. .name = "cpu",
  7035. .create = cpu_cgroup_create,
  7036. .destroy = cpu_cgroup_destroy,
  7037. .can_attach = cpu_cgroup_can_attach,
  7038. .attach = cpu_cgroup_attach,
  7039. .exit = cpu_cgroup_exit,
  7040. .subsys_id = cpu_cgroup_subsys_id,
  7041. .base_cftypes = cpu_files,
  7042. .early_init = 1,
  7043. };
  7044. #endif /* CONFIG_CGROUP_SCHED */
  7045. #ifdef CONFIG_CGROUP_CPUACCT
  7046. /*
  7047. * CPU accounting code for task groups.
  7048. *
  7049. * Based on the work by Paul Menage (menage@google.com) and Balbir Singh
  7050. * (balbir@in.ibm.com).
  7051. */
  7052. /* create a new cpu accounting group */
  7053. static struct cgroup_subsys_state *cpuacct_create(struct cgroup *cgrp)
  7054. {
  7055. struct cpuacct *ca;
  7056. if (!cgrp->parent)
  7057. return &root_cpuacct.css;
  7058. ca = kzalloc(sizeof(*ca), GFP_KERNEL);
  7059. if (!ca)
  7060. goto out;
  7061. ca->cpuusage = alloc_percpu(u64);
  7062. if (!ca->cpuusage)
  7063. goto out_free_ca;
  7064. ca->cpustat = alloc_percpu(struct kernel_cpustat);
  7065. if (!ca->cpustat)
  7066. goto out_free_cpuusage;
  7067. return &ca->css;
  7068. out_free_cpuusage:
  7069. free_percpu(ca->cpuusage);
  7070. out_free_ca:
  7071. kfree(ca);
  7072. out:
  7073. return ERR_PTR(-ENOMEM);
  7074. }
  7075. /* destroy an existing cpu accounting group */
  7076. static void cpuacct_destroy(struct cgroup *cgrp)
  7077. {
  7078. struct cpuacct *ca = cgroup_ca(cgrp);
  7079. free_percpu(ca->cpustat);
  7080. free_percpu(ca->cpuusage);
  7081. kfree(ca);
  7082. }
  7083. static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu)
  7084. {
  7085. u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
  7086. u64 data;
  7087. #ifndef CONFIG_64BIT
  7088. /*
  7089. * Take rq->lock to make 64-bit read safe on 32-bit platforms.
  7090. */
  7091. raw_spin_lock_irq(&cpu_rq(cpu)->lock);
  7092. data = *cpuusage;
  7093. raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
  7094. #else
  7095. data = *cpuusage;
  7096. #endif
  7097. return data;
  7098. }
  7099. static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val)
  7100. {
  7101. u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
  7102. #ifndef CONFIG_64BIT
  7103. /*
  7104. * Take rq->lock to make 64-bit write safe on 32-bit platforms.
  7105. */
  7106. raw_spin_lock_irq(&cpu_rq(cpu)->lock);
  7107. *cpuusage = val;
  7108. raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
  7109. #else
  7110. *cpuusage = val;
  7111. #endif
  7112. }
  7113. /* return total cpu usage (in nanoseconds) of a group */
  7114. static u64 cpuusage_read(struct cgroup *cgrp, struct cftype *cft)
  7115. {
  7116. struct cpuacct *ca = cgroup_ca(cgrp);
  7117. u64 totalcpuusage = 0;
  7118. int i;
  7119. for_each_present_cpu(i)
  7120. totalcpuusage += cpuacct_cpuusage_read(ca, i);
  7121. return totalcpuusage;
  7122. }
  7123. static int cpuusage_write(struct cgroup *cgrp, struct cftype *cftype,
  7124. u64 reset)
  7125. {
  7126. struct cpuacct *ca = cgroup_ca(cgrp);
  7127. int err = 0;
  7128. int i;
  7129. if (reset) {
  7130. err = -EINVAL;
  7131. goto out;
  7132. }
  7133. for_each_present_cpu(i)
  7134. cpuacct_cpuusage_write(ca, i, 0);
  7135. out:
  7136. return err;
  7137. }
  7138. static int cpuacct_percpu_seq_read(struct cgroup *cgroup, struct cftype *cft,
  7139. struct seq_file *m)
  7140. {
  7141. struct cpuacct *ca = cgroup_ca(cgroup);
  7142. u64 percpu;
  7143. int i;
  7144. for_each_present_cpu(i) {
  7145. percpu = cpuacct_cpuusage_read(ca, i);
  7146. seq_printf(m, "%llu ", (unsigned long long) percpu);
  7147. }
  7148. seq_printf(m, "\n");
  7149. return 0;
  7150. }
  7151. static const char *cpuacct_stat_desc[] = {
  7152. [CPUACCT_STAT_USER] = "user",
  7153. [CPUACCT_STAT_SYSTEM] = "system",
  7154. };
  7155. static int cpuacct_stats_show(struct cgroup *cgrp, struct cftype *cft,
  7156. struct cgroup_map_cb *cb)
  7157. {
  7158. struct cpuacct *ca = cgroup_ca(cgrp);
  7159. int cpu;
  7160. s64 val = 0;
  7161. for_each_online_cpu(cpu) {
  7162. struct kernel_cpustat *kcpustat = per_cpu_ptr(ca->cpustat, cpu);
  7163. val += kcpustat->cpustat[CPUTIME_USER];
  7164. val += kcpustat->cpustat[CPUTIME_NICE];
  7165. }
  7166. val = cputime64_to_clock_t(val);
  7167. cb->fill(cb, cpuacct_stat_desc[CPUACCT_STAT_USER], val);
  7168. val = 0;
  7169. for_each_online_cpu(cpu) {
  7170. struct kernel_cpustat *kcpustat = per_cpu_ptr(ca->cpustat, cpu);
  7171. val += kcpustat->cpustat[CPUTIME_SYSTEM];
  7172. val += kcpustat->cpustat[CPUTIME_IRQ];
  7173. val += kcpustat->cpustat[CPUTIME_SOFTIRQ];
  7174. }
  7175. val = cputime64_to_clock_t(val);
  7176. cb->fill(cb, cpuacct_stat_desc[CPUACCT_STAT_SYSTEM], val);
  7177. return 0;
  7178. }
  7179. static struct cftype files[] = {
  7180. {
  7181. .name = "usage",
  7182. .read_u64 = cpuusage_read,
  7183. .write_u64 = cpuusage_write,
  7184. },
  7185. {
  7186. .name = "usage_percpu",
  7187. .read_seq_string = cpuacct_percpu_seq_read,
  7188. },
  7189. {
  7190. .name = "stat",
  7191. .read_map = cpuacct_stats_show,
  7192. },
  7193. { } /* terminate */
  7194. };
  7195. /*
  7196. * charge this task's execution time to its accounting group.
  7197. *
  7198. * called with rq->lock held.
  7199. */
  7200. void cpuacct_charge(struct task_struct *tsk, u64 cputime)
  7201. {
  7202. struct cpuacct *ca;
  7203. int cpu;
  7204. if (unlikely(!cpuacct_subsys.active))
  7205. return;
  7206. cpu = task_cpu(tsk);
  7207. rcu_read_lock();
  7208. ca = task_ca(tsk);
  7209. for (; ca; ca = parent_ca(ca)) {
  7210. u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
  7211. *cpuusage += cputime;
  7212. }
  7213. rcu_read_unlock();
  7214. }
  7215. struct cgroup_subsys cpuacct_subsys = {
  7216. .name = "cpuacct",
  7217. .create = cpuacct_create,
  7218. .destroy = cpuacct_destroy,
  7219. .subsys_id = cpuacct_subsys_id,
  7220. .base_cftypes = files,
  7221. };
  7222. #endif /* CONFIG_CGROUP_CPUACCT */