sched.c 223 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973697469756976697769786979698069816982698369846985698669876988698969906991699269936994699569966997699869997000700170027003700470057006700770087009701070117012701370147015701670177018701970207021702270237024702570267027702870297030703170327033703470357036703770387039704070417042704370447045704670477048704970507051705270537054705570567057705870597060706170627063706470657066706770687069707070717072707370747075707670777078707970807081708270837084708570867087708870897090709170927093709470957096709770987099710071017102710371047105710671077108710971107111711271137114711571167117711871197120712171227123712471257126712771287129713071317132713371347135713671377138713971407141714271437144714571467147714871497150715171527153715471557156715771587159716071617162716371647165716671677168716971707171717271737174717571767177717871797180718171827183718471857186718771887189719071917192719371947195719671977198719972007201720272037204720572067207720872097210721172127213721472157216721772187219722072217222722372247225722672277228722972307231723272337234723572367237723872397240724172427243724472457246724772487249725072517252725372547255725672577258725972607261726272637264726572667267726872697270727172727273727472757276727772787279728072817282728372847285728672877288728972907291729272937294729572967297729872997300730173027303730473057306730773087309731073117312731373147315731673177318731973207321732273237324732573267327732873297330733173327333733473357336733773387339734073417342734373447345734673477348734973507351735273537354735573567357735873597360736173627363736473657366736773687369737073717372737373747375737673777378737973807381738273837384738573867387738873897390739173927393739473957396739773987399740074017402740374047405740674077408740974107411741274137414741574167417741874197420742174227423742474257426742774287429743074317432743374347435743674377438743974407441744274437444744574467447744874497450745174527453745474557456745774587459746074617462746374647465746674677468746974707471747274737474747574767477747874797480748174827483748474857486748774887489749074917492749374947495749674977498749975007501750275037504750575067507750875097510751175127513751475157516751775187519752075217522752375247525752675277528752975307531753275337534753575367537753875397540754175427543754475457546754775487549755075517552755375547555755675577558755975607561756275637564756575667567756875697570757175727573757475757576757775787579758075817582758375847585758675877588758975907591759275937594759575967597759875997600760176027603760476057606760776087609761076117612761376147615761676177618761976207621762276237624762576267627762876297630763176327633763476357636763776387639764076417642764376447645764676477648764976507651765276537654765576567657765876597660766176627663766476657666766776687669767076717672767376747675767676777678767976807681768276837684768576867687768876897690769176927693769476957696769776987699770077017702770377047705770677077708770977107711771277137714771577167717771877197720772177227723772477257726772777287729773077317732773377347735773677377738773977407741774277437744774577467747774877497750775177527753775477557756775777587759776077617762776377647765776677677768776977707771777277737774777577767777777877797780778177827783778477857786778777887789779077917792779377947795779677977798779978007801780278037804780578067807780878097810781178127813781478157816781778187819782078217822782378247825782678277828782978307831783278337834783578367837783878397840784178427843784478457846784778487849785078517852785378547855785678577858785978607861786278637864786578667867786878697870787178727873787478757876787778787879788078817882788378847885788678877888788978907891789278937894789578967897789878997900790179027903790479057906790779087909791079117912791379147915791679177918791979207921792279237924792579267927792879297930793179327933793479357936793779387939794079417942794379447945794679477948794979507951795279537954795579567957795879597960796179627963796479657966796779687969797079717972797379747975797679777978797979807981798279837984798579867987798879897990799179927993799479957996799779987999800080018002800380048005800680078008800980108011801280138014801580168017801880198020802180228023802480258026802780288029803080318032803380348035803680378038803980408041804280438044804580468047804880498050805180528053805480558056805780588059806080618062806380648065806680678068806980708071807280738074807580768077807880798080808180828083808480858086808780888089809080918092809380948095809680978098809981008101810281038104810581068107810881098110811181128113811481158116811781188119812081218122812381248125812681278128812981308131813281338134813581368137813881398140814181428143814481458146814781488149815081518152815381548155815681578158815981608161816281638164816581668167816881698170817181728173817481758176817781788179818081818182818381848185818681878188818981908191819281938194819581968197819881998200820182028203820482058206820782088209821082118212821382148215821682178218821982208221822282238224822582268227822882298230823182328233823482358236823782388239824082418242824382448245824682478248824982508251825282538254825582568257825882598260826182628263826482658266826782688269827082718272827382748275827682778278827982808281828282838284828582868287828882898290829182928293829482958296829782988299830083018302830383048305830683078308830983108311831283138314831583168317831883198320832183228323832483258326832783288329833083318332833383348335833683378338833983408341834283438344834583468347834883498350835183528353835483558356835783588359836083618362836383648365836683678368836983708371837283738374837583768377837883798380838183828383838483858386838783888389839083918392839383948395839683978398839984008401840284038404840584068407840884098410841184128413841484158416841784188419842084218422842384248425842684278428842984308431843284338434843584368437843884398440844184428443844484458446844784488449845084518452845384548455845684578458845984608461846284638464846584668467846884698470847184728473847484758476847784788479848084818482848384848485848684878488848984908491849284938494849584968497849884998500850185028503850485058506850785088509851085118512851385148515851685178518851985208521852285238524852585268527852885298530853185328533853485358536853785388539854085418542854385448545854685478548854985508551855285538554855585568557855885598560856185628563856485658566856785688569857085718572857385748575857685778578857985808581858285838584858585868587858885898590859185928593859485958596859785988599860086018602860386048605860686078608860986108611861286138614861586168617861886198620862186228623862486258626862786288629863086318632863386348635863686378638863986408641864286438644864586468647864886498650865186528653865486558656865786588659866086618662866386648665866686678668866986708671867286738674867586768677867886798680868186828683868486858686868786888689869086918692869386948695869686978698869987008701870287038704870587068707870887098710871187128713871487158716871787188719872087218722872387248725872687278728872987308731873287338734873587368737873887398740874187428743874487458746874787488749875087518752875387548755875687578758875987608761876287638764876587668767876887698770877187728773877487758776877787788779878087818782878387848785878687878788878987908791879287938794879587968797879887998800880188028803880488058806880788088809881088118812881388148815881688178818881988208821882288238824882588268827882888298830883188328833883488358836883788388839884088418842884388448845884688478848884988508851885288538854885588568857885888598860886188628863886488658866886788688869887088718872887388748875887688778878887988808881888288838884888588868887888888898890889188928893889488958896889788988899890089018902890389048905890689078908890989108911891289138914891589168917891889198920892189228923892489258926892789288929893089318932893389348935893689378938893989408941894289438944894589468947894889498950895189528953895489558956895789588959896089618962896389648965896689678968896989708971897289738974897589768977897889798980898189828983898489858986898789888989899089918992899389948995899689978998899990009001900290039004900590069007900890099010901190129013901490159016901790189019902090219022902390249025902690279028902990309031903290339034903590369037903890399040904190429043904490459046904790489049905090519052905390549055905690579058905990609061906290639064906590669067906890699070907190729073907490759076907790789079908090819082908390849085908690879088908990909091909290939094909590969097909890999100910191029103910491059106910791089109911091119112911391149115911691179118911991209121912291239124912591269127912891299130913191329133913491359136913791389139914091419142914391449145914691479148914991509151915291539154915591569157915891599160916191629163916491659166916791689169917091719172917391749175917691779178917991809181918291839184918591869187918891899190919191929193919491959196919791989199920092019202920392049205920692079208920992109211921292139214921592169217921892199220922192229223922492259226922792289229923092319232923392349235923692379238923992409241924292439244924592469247924892499250925192529253925492559256925792589259926092619262926392649265926692679268926992709271927292739274927592769277927892799280928192829283928492859286928792889289929092919292929392949295929692979298929993009301930293039304930593069307930893099310931193129313931493159316931793189319932093219322932393249325932693279328932993309331933293339334933593369337933893399340934193429343934493459346934793489349935093519352935393549355
  1. /*
  2. * kernel/sched.c
  3. *
  4. * Kernel scheduler and related syscalls
  5. *
  6. * Copyright (C) 1991-2002 Linus Torvalds
  7. *
  8. * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and
  9. * make semaphores SMP safe
  10. * 1998-11-19 Implemented schedule_timeout() and related stuff
  11. * by Andrea Arcangeli
  12. * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar:
  13. * hybrid priority-list and round-robin design with
  14. * an array-switch method of distributing timeslices
  15. * and per-CPU runqueues. Cleanups and useful suggestions
  16. * by Davide Libenzi, preemptible kernel bits by Robert Love.
  17. * 2003-09-03 Interactivity tuning by Con Kolivas.
  18. * 2004-04-02 Scheduler domains code by Nick Piggin
  19. * 2007-04-15 Work begun on replacing all interactivity tuning with a
  20. * fair scheduling design by Con Kolivas.
  21. * 2007-05-05 Load balancing (smp-nice) and other improvements
  22. * by Peter Williams
  23. * 2007-05-06 Interactivity improvements to CFS by Mike Galbraith
  24. * 2007-07-01 Group scheduling enhancements by Srivatsa Vaddagiri
  25. * 2007-11-29 RT balancing improvements by Steven Rostedt, Gregory Haskins,
  26. * Thomas Gleixner, Mike Kravetz
  27. */
  28. #include <linux/mm.h>
  29. #include <linux/module.h>
  30. #include <linux/nmi.h>
  31. #include <linux/init.h>
  32. #include <linux/uaccess.h>
  33. #include <linux/highmem.h>
  34. #include <asm/mmu_context.h>
  35. #include <linux/interrupt.h>
  36. #include <linux/capability.h>
  37. #include <linux/completion.h>
  38. #include <linux/kernel_stat.h>
  39. #include <linux/debug_locks.h>
  40. #include <linux/perf_event.h>
  41. #include <linux/security.h>
  42. #include <linux/notifier.h>
  43. #include <linux/profile.h>
  44. #include <linux/freezer.h>
  45. #include <linux/vmalloc.h>
  46. #include <linux/blkdev.h>
  47. #include <linux/delay.h>
  48. #include <linux/pid_namespace.h>
  49. #include <linux/smp.h>
  50. #include <linux/threads.h>
  51. #include <linux/timer.h>
  52. #include <linux/rcupdate.h>
  53. #include <linux/cpu.h>
  54. #include <linux/cpuset.h>
  55. #include <linux/percpu.h>
  56. #include <linux/proc_fs.h>
  57. #include <linux/seq_file.h>
  58. #include <linux/stop_machine.h>
  59. #include <linux/sysctl.h>
  60. #include <linux/syscalls.h>
  61. #include <linux/times.h>
  62. #include <linux/tsacct_kern.h>
  63. #include <linux/kprobes.h>
  64. #include <linux/delayacct.h>
  65. #include <linux/unistd.h>
  66. #include <linux/pagemap.h>
  67. #include <linux/hrtimer.h>
  68. #include <linux/tick.h>
  69. #include <linux/debugfs.h>
  70. #include <linux/ctype.h>
  71. #include <linux/ftrace.h>
  72. #include <linux/slab.h>
  73. #include <asm/tlb.h>
  74. #include <asm/irq_regs.h>
  75. #include <asm/mutex.h>
  76. #ifdef CONFIG_PARAVIRT
  77. #include <asm/paravirt.h>
  78. #endif
  79. #include "sched_cpupri.h"
  80. #include "workqueue_sched.h"
  81. #include "sched_autogroup.h"
  82. #define CREATE_TRACE_POINTS
  83. #include <trace/events/sched.h>
  84. /*
  85. * Convert user-nice values [ -20 ... 0 ... 19 ]
  86. * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
  87. * and back.
  88. */
  89. #define NICE_TO_PRIO(nice) (MAX_RT_PRIO + (nice) + 20)
  90. #define PRIO_TO_NICE(prio) ((prio) - MAX_RT_PRIO - 20)
  91. #define TASK_NICE(p) PRIO_TO_NICE((p)->static_prio)
  92. /*
  93. * 'User priority' is the nice value converted to something we
  94. * can work with better when scaling various scheduler parameters,
  95. * it's a [ 0 ... 39 ] range.
  96. */
  97. #define USER_PRIO(p) ((p)-MAX_RT_PRIO)
  98. #define TASK_USER_PRIO(p) USER_PRIO((p)->static_prio)
  99. #define MAX_USER_PRIO (USER_PRIO(MAX_PRIO))
  100. /*
  101. * Helpers for converting nanosecond timing to jiffy resolution
  102. */
  103. #define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ))
  104. #define NICE_0_LOAD SCHED_LOAD_SCALE
  105. #define NICE_0_SHIFT SCHED_LOAD_SHIFT
  106. /*
  107. * These are the 'tuning knobs' of the scheduler:
  108. *
  109. * default timeslice is 100 msecs (used only for SCHED_RR tasks).
  110. * Timeslices get refilled after they expire.
  111. */
  112. #define DEF_TIMESLICE (100 * HZ / 1000)
  113. /*
  114. * single value that denotes runtime == period, ie unlimited time.
  115. */
  116. #define RUNTIME_INF ((u64)~0ULL)
  117. static inline int rt_policy(int policy)
  118. {
  119. if (policy == SCHED_FIFO || policy == SCHED_RR)
  120. return 1;
  121. return 0;
  122. }
  123. static inline int task_has_rt_policy(struct task_struct *p)
  124. {
  125. return rt_policy(p->policy);
  126. }
  127. /*
  128. * This is the priority-queue data structure of the RT scheduling class:
  129. */
  130. struct rt_prio_array {
  131. DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */
  132. struct list_head queue[MAX_RT_PRIO];
  133. };
  134. struct rt_bandwidth {
  135. /* nests inside the rq lock: */
  136. raw_spinlock_t rt_runtime_lock;
  137. ktime_t rt_period;
  138. u64 rt_runtime;
  139. struct hrtimer rt_period_timer;
  140. };
  141. static struct rt_bandwidth def_rt_bandwidth;
  142. static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
  143. static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
  144. {
  145. struct rt_bandwidth *rt_b =
  146. container_of(timer, struct rt_bandwidth, rt_period_timer);
  147. ktime_t now;
  148. int overrun;
  149. int idle = 0;
  150. for (;;) {
  151. now = hrtimer_cb_get_time(timer);
  152. overrun = hrtimer_forward(timer, now, rt_b->rt_period);
  153. if (!overrun)
  154. break;
  155. idle = do_sched_rt_period_timer(rt_b, overrun);
  156. }
  157. return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
  158. }
  159. static
  160. void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
  161. {
  162. rt_b->rt_period = ns_to_ktime(period);
  163. rt_b->rt_runtime = runtime;
  164. raw_spin_lock_init(&rt_b->rt_runtime_lock);
  165. hrtimer_init(&rt_b->rt_period_timer,
  166. CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  167. rt_b->rt_period_timer.function = sched_rt_period_timer;
  168. }
  169. static inline int rt_bandwidth_enabled(void)
  170. {
  171. return sysctl_sched_rt_runtime >= 0;
  172. }
  173. static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
  174. {
  175. ktime_t now;
  176. if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
  177. return;
  178. if (hrtimer_active(&rt_b->rt_period_timer))
  179. return;
  180. raw_spin_lock(&rt_b->rt_runtime_lock);
  181. for (;;) {
  182. unsigned long delta;
  183. ktime_t soft, hard;
  184. if (hrtimer_active(&rt_b->rt_period_timer))
  185. break;
  186. now = hrtimer_cb_get_time(&rt_b->rt_period_timer);
  187. hrtimer_forward(&rt_b->rt_period_timer, now, rt_b->rt_period);
  188. soft = hrtimer_get_softexpires(&rt_b->rt_period_timer);
  189. hard = hrtimer_get_expires(&rt_b->rt_period_timer);
  190. delta = ktime_to_ns(ktime_sub(hard, soft));
  191. __hrtimer_start_range_ns(&rt_b->rt_period_timer, soft, delta,
  192. HRTIMER_MODE_ABS_PINNED, 0);
  193. }
  194. raw_spin_unlock(&rt_b->rt_runtime_lock);
  195. }
  196. #ifdef CONFIG_RT_GROUP_SCHED
  197. static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
  198. {
  199. hrtimer_cancel(&rt_b->rt_period_timer);
  200. }
  201. #endif
  202. /*
  203. * sched_domains_mutex serializes calls to init_sched_domains,
  204. * detach_destroy_domains and partition_sched_domains.
  205. */
  206. static DEFINE_MUTEX(sched_domains_mutex);
  207. #ifdef CONFIG_CGROUP_SCHED
  208. #include <linux/cgroup.h>
  209. struct cfs_rq;
  210. static LIST_HEAD(task_groups);
  211. /* task group related information */
  212. struct task_group {
  213. struct cgroup_subsys_state css;
  214. #ifdef CONFIG_FAIR_GROUP_SCHED
  215. /* schedulable entities of this group on each cpu */
  216. struct sched_entity **se;
  217. /* runqueue "owned" by this group on each cpu */
  218. struct cfs_rq **cfs_rq;
  219. unsigned long shares;
  220. atomic_t load_weight;
  221. #endif
  222. #ifdef CONFIG_RT_GROUP_SCHED
  223. struct sched_rt_entity **rt_se;
  224. struct rt_rq **rt_rq;
  225. struct rt_bandwidth rt_bandwidth;
  226. #endif
  227. struct rcu_head rcu;
  228. struct list_head list;
  229. struct task_group *parent;
  230. struct list_head siblings;
  231. struct list_head children;
  232. #ifdef CONFIG_SCHED_AUTOGROUP
  233. struct autogroup *autogroup;
  234. #endif
  235. };
  236. /* task_group_lock serializes the addition/removal of task groups */
  237. static DEFINE_SPINLOCK(task_group_lock);
  238. #ifdef CONFIG_FAIR_GROUP_SCHED
  239. # define ROOT_TASK_GROUP_LOAD NICE_0_LOAD
  240. /*
  241. * A weight of 0 or 1 can cause arithmetics problems.
  242. * A weight of a cfs_rq is the sum of weights of which entities
  243. * are queued on this cfs_rq, so a weight of a entity should not be
  244. * too large, so as the shares value of a task group.
  245. * (The default weight is 1024 - so there's no practical
  246. * limitation from this.)
  247. */
  248. #define MIN_SHARES (1UL << 1)
  249. #define MAX_SHARES (1UL << 18)
  250. static int root_task_group_load = ROOT_TASK_GROUP_LOAD;
  251. #endif
  252. /* Default task group.
  253. * Every task in system belong to this group at bootup.
  254. */
  255. struct task_group root_task_group;
  256. #endif /* CONFIG_CGROUP_SCHED */
  257. /* CFS-related fields in a runqueue */
  258. struct cfs_rq {
  259. struct load_weight load;
  260. unsigned long nr_running;
  261. u64 exec_clock;
  262. u64 min_vruntime;
  263. #ifndef CONFIG_64BIT
  264. u64 min_vruntime_copy;
  265. #endif
  266. struct rb_root tasks_timeline;
  267. struct rb_node *rb_leftmost;
  268. struct list_head tasks;
  269. struct list_head *balance_iterator;
  270. /*
  271. * 'curr' points to currently running entity on this cfs_rq.
  272. * It is set to NULL otherwise (i.e when none are currently running).
  273. */
  274. struct sched_entity *curr, *next, *last, *skip;
  275. #ifdef CONFIG_SCHED_DEBUG
  276. unsigned int nr_spread_over;
  277. #endif
  278. #ifdef CONFIG_FAIR_GROUP_SCHED
  279. struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */
  280. /*
  281. * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
  282. * a hierarchy). Non-leaf lrqs hold other higher schedulable entities
  283. * (like users, containers etc.)
  284. *
  285. * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This
  286. * list is used during load balance.
  287. */
  288. int on_list;
  289. struct list_head leaf_cfs_rq_list;
  290. struct task_group *tg; /* group that "owns" this runqueue */
  291. #ifdef CONFIG_SMP
  292. /*
  293. * the part of load.weight contributed by tasks
  294. */
  295. unsigned long task_weight;
  296. /*
  297. * h_load = weight * f(tg)
  298. *
  299. * Where f(tg) is the recursive weight fraction assigned to
  300. * this group.
  301. */
  302. unsigned long h_load;
  303. /*
  304. * Maintaining per-cpu shares distribution for group scheduling
  305. *
  306. * load_stamp is the last time we updated the load average
  307. * load_last is the last time we updated the load average and saw load
  308. * load_unacc_exec_time is currently unaccounted execution time
  309. */
  310. u64 load_avg;
  311. u64 load_period;
  312. u64 load_stamp, load_last, load_unacc_exec_time;
  313. unsigned long load_contribution;
  314. #endif
  315. #endif
  316. };
  317. /* Real-Time classes' related field in a runqueue: */
  318. struct rt_rq {
  319. struct rt_prio_array active;
  320. unsigned long rt_nr_running;
  321. #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
  322. struct {
  323. int curr; /* highest queued rt task prio */
  324. #ifdef CONFIG_SMP
  325. int next; /* next highest */
  326. #endif
  327. } highest_prio;
  328. #endif
  329. #ifdef CONFIG_SMP
  330. unsigned long rt_nr_migratory;
  331. unsigned long rt_nr_total;
  332. int overloaded;
  333. struct plist_head pushable_tasks;
  334. #endif
  335. int rt_throttled;
  336. u64 rt_time;
  337. u64 rt_runtime;
  338. /* Nests inside the rq lock: */
  339. raw_spinlock_t rt_runtime_lock;
  340. #ifdef CONFIG_RT_GROUP_SCHED
  341. unsigned long rt_nr_boosted;
  342. struct rq *rq;
  343. struct list_head leaf_rt_rq_list;
  344. struct task_group *tg;
  345. #endif
  346. };
  347. #ifdef CONFIG_SMP
  348. /*
  349. * We add the notion of a root-domain which will be used to define per-domain
  350. * variables. Each exclusive cpuset essentially defines an island domain by
  351. * fully partitioning the member cpus from any other cpuset. Whenever a new
  352. * exclusive cpuset is created, we also create and attach a new root-domain
  353. * object.
  354. *
  355. */
  356. struct root_domain {
  357. atomic_t refcount;
  358. atomic_t rto_count;
  359. struct rcu_head rcu;
  360. cpumask_var_t span;
  361. cpumask_var_t online;
  362. /*
  363. * The "RT overload" flag: it gets set if a CPU has more than
  364. * one runnable RT task.
  365. */
  366. cpumask_var_t rto_mask;
  367. struct cpupri cpupri;
  368. };
  369. /*
  370. * By default the system creates a single root-domain with all cpus as
  371. * members (mimicking the global state we have today).
  372. */
  373. static struct root_domain def_root_domain;
  374. #endif /* CONFIG_SMP */
  375. /*
  376. * This is the main, per-CPU runqueue data structure.
  377. *
  378. * Locking rule: those places that want to lock multiple runqueues
  379. * (such as the load balancing or the thread migration code), lock
  380. * acquire operations must be ordered by ascending &runqueue.
  381. */
  382. struct rq {
  383. /* runqueue lock: */
  384. raw_spinlock_t lock;
  385. /*
  386. * nr_running and cpu_load should be in the same cacheline because
  387. * remote CPUs use both these fields when doing load calculation.
  388. */
  389. unsigned long nr_running;
  390. #define CPU_LOAD_IDX_MAX 5
  391. unsigned long cpu_load[CPU_LOAD_IDX_MAX];
  392. unsigned long last_load_update_tick;
  393. #ifdef CONFIG_NO_HZ
  394. u64 nohz_stamp;
  395. unsigned char nohz_balance_kick;
  396. #endif
  397. int skip_clock_update;
  398. /* capture load from *all* tasks on this cpu: */
  399. struct load_weight load;
  400. unsigned long nr_load_updates;
  401. u64 nr_switches;
  402. struct cfs_rq cfs;
  403. struct rt_rq rt;
  404. #ifdef CONFIG_FAIR_GROUP_SCHED
  405. /* list of leaf cfs_rq on this cpu: */
  406. struct list_head leaf_cfs_rq_list;
  407. #endif
  408. #ifdef CONFIG_RT_GROUP_SCHED
  409. struct list_head leaf_rt_rq_list;
  410. #endif
  411. /*
  412. * This is part of a global counter where only the total sum
  413. * over all CPUs matters. A task can increase this counter on
  414. * one CPU and if it got migrated afterwards it may decrease
  415. * it on another CPU. Always updated under the runqueue lock:
  416. */
  417. unsigned long nr_uninterruptible;
  418. struct task_struct *curr, *idle, *stop;
  419. unsigned long next_balance;
  420. struct mm_struct *prev_mm;
  421. u64 clock;
  422. u64 clock_task;
  423. atomic_t nr_iowait;
  424. #ifdef CONFIG_SMP
  425. struct root_domain *rd;
  426. struct sched_domain *sd;
  427. unsigned long cpu_power;
  428. unsigned char idle_at_tick;
  429. /* For active balancing */
  430. int post_schedule;
  431. int active_balance;
  432. int push_cpu;
  433. struct cpu_stop_work active_balance_work;
  434. /* cpu of this runqueue: */
  435. int cpu;
  436. int online;
  437. u64 rt_avg;
  438. u64 age_stamp;
  439. u64 idle_stamp;
  440. u64 avg_idle;
  441. #endif
  442. #ifdef CONFIG_IRQ_TIME_ACCOUNTING
  443. u64 prev_irq_time;
  444. #endif
  445. #ifdef CONFIG_PARAVIRT
  446. u64 prev_steal_time;
  447. #endif
  448. #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
  449. u64 prev_steal_time_rq;
  450. #endif
  451. /* calc_load related fields */
  452. unsigned long calc_load_update;
  453. long calc_load_active;
  454. #ifdef CONFIG_SCHED_HRTICK
  455. #ifdef CONFIG_SMP
  456. int hrtick_csd_pending;
  457. struct call_single_data hrtick_csd;
  458. #endif
  459. struct hrtimer hrtick_timer;
  460. #endif
  461. #ifdef CONFIG_SCHEDSTATS
  462. /* latency stats */
  463. struct sched_info rq_sched_info;
  464. unsigned long long rq_cpu_time;
  465. /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
  466. /* sys_sched_yield() stats */
  467. unsigned int yld_count;
  468. /* schedule() stats */
  469. unsigned int sched_switch;
  470. unsigned int sched_count;
  471. unsigned int sched_goidle;
  472. /* try_to_wake_up() stats */
  473. unsigned int ttwu_count;
  474. unsigned int ttwu_local;
  475. #endif
  476. #ifdef CONFIG_SMP
  477. struct task_struct *wake_list;
  478. #endif
  479. };
  480. static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
  481. static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
  482. static inline int cpu_of(struct rq *rq)
  483. {
  484. #ifdef CONFIG_SMP
  485. return rq->cpu;
  486. #else
  487. return 0;
  488. #endif
  489. }
  490. #define rcu_dereference_check_sched_domain(p) \
  491. rcu_dereference_check((p), \
  492. lockdep_is_held(&sched_domains_mutex))
  493. /*
  494. * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
  495. * See detach_destroy_domains: synchronize_sched for details.
  496. *
  497. * The domain tree of any CPU may only be accessed from within
  498. * preempt-disabled sections.
  499. */
  500. #define for_each_domain(cpu, __sd) \
  501. for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent)
  502. #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
  503. #define this_rq() (&__get_cpu_var(runqueues))
  504. #define task_rq(p) cpu_rq(task_cpu(p))
  505. #define cpu_curr(cpu) (cpu_rq(cpu)->curr)
  506. #define raw_rq() (&__raw_get_cpu_var(runqueues))
  507. #ifdef CONFIG_CGROUP_SCHED
  508. /*
  509. * Return the group to which this tasks belongs.
  510. *
  511. * We use task_subsys_state_check() and extend the RCU verification with
  512. * pi->lock and rq->lock because cpu_cgroup_attach() holds those locks for each
  513. * task it moves into the cgroup. Therefore by holding either of those locks,
  514. * we pin the task to the current cgroup.
  515. */
  516. static inline struct task_group *task_group(struct task_struct *p)
  517. {
  518. struct task_group *tg;
  519. struct cgroup_subsys_state *css;
  520. css = task_subsys_state_check(p, cpu_cgroup_subsys_id,
  521. lockdep_is_held(&p->pi_lock) ||
  522. lockdep_is_held(&task_rq(p)->lock));
  523. tg = container_of(css, struct task_group, css);
  524. return autogroup_task_group(p, tg);
  525. }
  526. /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
  527. static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
  528. {
  529. #ifdef CONFIG_FAIR_GROUP_SCHED
  530. p->se.cfs_rq = task_group(p)->cfs_rq[cpu];
  531. p->se.parent = task_group(p)->se[cpu];
  532. #endif
  533. #ifdef CONFIG_RT_GROUP_SCHED
  534. p->rt.rt_rq = task_group(p)->rt_rq[cpu];
  535. p->rt.parent = task_group(p)->rt_se[cpu];
  536. #endif
  537. }
  538. #else /* CONFIG_CGROUP_SCHED */
  539. static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
  540. static inline struct task_group *task_group(struct task_struct *p)
  541. {
  542. return NULL;
  543. }
  544. #endif /* CONFIG_CGROUP_SCHED */
  545. static void update_rq_clock_task(struct rq *rq, s64 delta);
  546. static void update_rq_clock(struct rq *rq)
  547. {
  548. s64 delta;
  549. if (rq->skip_clock_update > 0)
  550. return;
  551. delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
  552. rq->clock += delta;
  553. update_rq_clock_task(rq, delta);
  554. }
  555. /*
  556. * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
  557. */
  558. #ifdef CONFIG_SCHED_DEBUG
  559. # define const_debug __read_mostly
  560. #else
  561. # define const_debug static const
  562. #endif
  563. /**
  564. * runqueue_is_locked - Returns true if the current cpu runqueue is locked
  565. * @cpu: the processor in question.
  566. *
  567. * This interface allows printk to be called with the runqueue lock
  568. * held and know whether or not it is OK to wake up the klogd.
  569. */
  570. int runqueue_is_locked(int cpu)
  571. {
  572. return raw_spin_is_locked(&cpu_rq(cpu)->lock);
  573. }
  574. /*
  575. * Debugging: various feature bits
  576. */
  577. #define SCHED_FEAT(name, enabled) \
  578. __SCHED_FEAT_##name ,
  579. enum {
  580. #include "sched_features.h"
  581. };
  582. #undef SCHED_FEAT
  583. #define SCHED_FEAT(name, enabled) \
  584. (1UL << __SCHED_FEAT_##name) * enabled |
  585. const_debug unsigned int sysctl_sched_features =
  586. #include "sched_features.h"
  587. 0;
  588. #undef SCHED_FEAT
  589. #ifdef CONFIG_SCHED_DEBUG
  590. #define SCHED_FEAT(name, enabled) \
  591. #name ,
  592. static __read_mostly char *sched_feat_names[] = {
  593. #include "sched_features.h"
  594. NULL
  595. };
  596. #undef SCHED_FEAT
  597. static int sched_feat_show(struct seq_file *m, void *v)
  598. {
  599. int i;
  600. for (i = 0; sched_feat_names[i]; i++) {
  601. if (!(sysctl_sched_features & (1UL << i)))
  602. seq_puts(m, "NO_");
  603. seq_printf(m, "%s ", sched_feat_names[i]);
  604. }
  605. seq_puts(m, "\n");
  606. return 0;
  607. }
  608. static ssize_t
  609. sched_feat_write(struct file *filp, const char __user *ubuf,
  610. size_t cnt, loff_t *ppos)
  611. {
  612. char buf[64];
  613. char *cmp;
  614. int neg = 0;
  615. int i;
  616. if (cnt > 63)
  617. cnt = 63;
  618. if (copy_from_user(&buf, ubuf, cnt))
  619. return -EFAULT;
  620. buf[cnt] = 0;
  621. cmp = strstrip(buf);
  622. if (strncmp(cmp, "NO_", 3) == 0) {
  623. neg = 1;
  624. cmp += 3;
  625. }
  626. for (i = 0; sched_feat_names[i]; i++) {
  627. if (strcmp(cmp, sched_feat_names[i]) == 0) {
  628. if (neg)
  629. sysctl_sched_features &= ~(1UL << i);
  630. else
  631. sysctl_sched_features |= (1UL << i);
  632. break;
  633. }
  634. }
  635. if (!sched_feat_names[i])
  636. return -EINVAL;
  637. *ppos += cnt;
  638. return cnt;
  639. }
  640. static int sched_feat_open(struct inode *inode, struct file *filp)
  641. {
  642. return single_open(filp, sched_feat_show, NULL);
  643. }
  644. static const struct file_operations sched_feat_fops = {
  645. .open = sched_feat_open,
  646. .write = sched_feat_write,
  647. .read = seq_read,
  648. .llseek = seq_lseek,
  649. .release = single_release,
  650. };
  651. static __init int sched_init_debug(void)
  652. {
  653. debugfs_create_file("sched_features", 0644, NULL, NULL,
  654. &sched_feat_fops);
  655. return 0;
  656. }
  657. late_initcall(sched_init_debug);
  658. #endif
  659. #define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
  660. /*
  661. * Number of tasks to iterate in a single balance run.
  662. * Limited because this is done with IRQs disabled.
  663. */
  664. const_debug unsigned int sysctl_sched_nr_migrate = 32;
  665. /*
  666. * period over which we average the RT time consumption, measured
  667. * in ms.
  668. *
  669. * default: 1s
  670. */
  671. const_debug unsigned int sysctl_sched_time_avg = MSEC_PER_SEC;
  672. /*
  673. * period over which we measure -rt task cpu usage in us.
  674. * default: 1s
  675. */
  676. unsigned int sysctl_sched_rt_period = 1000000;
  677. static __read_mostly int scheduler_running;
  678. /*
  679. * part of the period that we allow rt tasks to run in us.
  680. * default: 0.95s
  681. */
  682. int sysctl_sched_rt_runtime = 950000;
  683. static inline u64 global_rt_period(void)
  684. {
  685. return (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
  686. }
  687. static inline u64 global_rt_runtime(void)
  688. {
  689. if (sysctl_sched_rt_runtime < 0)
  690. return RUNTIME_INF;
  691. return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
  692. }
  693. #ifndef prepare_arch_switch
  694. # define prepare_arch_switch(next) do { } while (0)
  695. #endif
  696. #ifndef finish_arch_switch
  697. # define finish_arch_switch(prev) do { } while (0)
  698. #endif
  699. static inline int task_current(struct rq *rq, struct task_struct *p)
  700. {
  701. return rq->curr == p;
  702. }
  703. static inline int task_running(struct rq *rq, struct task_struct *p)
  704. {
  705. #ifdef CONFIG_SMP
  706. return p->on_cpu;
  707. #else
  708. return task_current(rq, p);
  709. #endif
  710. }
  711. #ifndef __ARCH_WANT_UNLOCKED_CTXSW
  712. static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
  713. {
  714. #ifdef CONFIG_SMP
  715. /*
  716. * We can optimise this out completely for !SMP, because the
  717. * SMP rebalancing from interrupt is the only thing that cares
  718. * here.
  719. */
  720. next->on_cpu = 1;
  721. #endif
  722. }
  723. static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
  724. {
  725. #ifdef CONFIG_SMP
  726. /*
  727. * After ->on_cpu is cleared, the task can be moved to a different CPU.
  728. * We must ensure this doesn't happen until the switch is completely
  729. * finished.
  730. */
  731. smp_wmb();
  732. prev->on_cpu = 0;
  733. #endif
  734. #ifdef CONFIG_DEBUG_SPINLOCK
  735. /* this is a valid case when another task releases the spinlock */
  736. rq->lock.owner = current;
  737. #endif
  738. /*
  739. * If we are tracking spinlock dependencies then we have to
  740. * fix up the runqueue lock - which gets 'carried over' from
  741. * prev into current:
  742. */
  743. spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
  744. raw_spin_unlock_irq(&rq->lock);
  745. }
  746. #else /* __ARCH_WANT_UNLOCKED_CTXSW */
  747. static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
  748. {
  749. #ifdef CONFIG_SMP
  750. /*
  751. * We can optimise this out completely for !SMP, because the
  752. * SMP rebalancing from interrupt is the only thing that cares
  753. * here.
  754. */
  755. next->on_cpu = 1;
  756. #endif
  757. #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
  758. raw_spin_unlock_irq(&rq->lock);
  759. #else
  760. raw_spin_unlock(&rq->lock);
  761. #endif
  762. }
  763. static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
  764. {
  765. #ifdef CONFIG_SMP
  766. /*
  767. * After ->on_cpu is cleared, the task can be moved to a different CPU.
  768. * We must ensure this doesn't happen until the switch is completely
  769. * finished.
  770. */
  771. smp_wmb();
  772. prev->on_cpu = 0;
  773. #endif
  774. #ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW
  775. local_irq_enable();
  776. #endif
  777. }
  778. #endif /* __ARCH_WANT_UNLOCKED_CTXSW */
  779. /*
  780. * __task_rq_lock - lock the rq @p resides on.
  781. */
  782. static inline struct rq *__task_rq_lock(struct task_struct *p)
  783. __acquires(rq->lock)
  784. {
  785. struct rq *rq;
  786. lockdep_assert_held(&p->pi_lock);
  787. for (;;) {
  788. rq = task_rq(p);
  789. raw_spin_lock(&rq->lock);
  790. if (likely(rq == task_rq(p)))
  791. return rq;
  792. raw_spin_unlock(&rq->lock);
  793. }
  794. }
  795. /*
  796. * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
  797. */
  798. static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
  799. __acquires(p->pi_lock)
  800. __acquires(rq->lock)
  801. {
  802. struct rq *rq;
  803. for (;;) {
  804. raw_spin_lock_irqsave(&p->pi_lock, *flags);
  805. rq = task_rq(p);
  806. raw_spin_lock(&rq->lock);
  807. if (likely(rq == task_rq(p)))
  808. return rq;
  809. raw_spin_unlock(&rq->lock);
  810. raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
  811. }
  812. }
  813. static void __task_rq_unlock(struct rq *rq)
  814. __releases(rq->lock)
  815. {
  816. raw_spin_unlock(&rq->lock);
  817. }
  818. static inline void
  819. task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags)
  820. __releases(rq->lock)
  821. __releases(p->pi_lock)
  822. {
  823. raw_spin_unlock(&rq->lock);
  824. raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
  825. }
  826. /*
  827. * this_rq_lock - lock this runqueue and disable interrupts.
  828. */
  829. static struct rq *this_rq_lock(void)
  830. __acquires(rq->lock)
  831. {
  832. struct rq *rq;
  833. local_irq_disable();
  834. rq = this_rq();
  835. raw_spin_lock(&rq->lock);
  836. return rq;
  837. }
  838. #ifdef CONFIG_SCHED_HRTICK
  839. /*
  840. * Use HR-timers to deliver accurate preemption points.
  841. *
  842. * Its all a bit involved since we cannot program an hrt while holding the
  843. * rq->lock. So what we do is store a state in in rq->hrtick_* and ask for a
  844. * reschedule event.
  845. *
  846. * When we get rescheduled we reprogram the hrtick_timer outside of the
  847. * rq->lock.
  848. */
  849. /*
  850. * Use hrtick when:
  851. * - enabled by features
  852. * - hrtimer is actually high res
  853. */
  854. static inline int hrtick_enabled(struct rq *rq)
  855. {
  856. if (!sched_feat(HRTICK))
  857. return 0;
  858. if (!cpu_active(cpu_of(rq)))
  859. return 0;
  860. return hrtimer_is_hres_active(&rq->hrtick_timer);
  861. }
  862. static void hrtick_clear(struct rq *rq)
  863. {
  864. if (hrtimer_active(&rq->hrtick_timer))
  865. hrtimer_cancel(&rq->hrtick_timer);
  866. }
  867. /*
  868. * High-resolution timer tick.
  869. * Runs from hardirq context with interrupts disabled.
  870. */
  871. static enum hrtimer_restart hrtick(struct hrtimer *timer)
  872. {
  873. struct rq *rq = container_of(timer, struct rq, hrtick_timer);
  874. WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
  875. raw_spin_lock(&rq->lock);
  876. update_rq_clock(rq);
  877. rq->curr->sched_class->task_tick(rq, rq->curr, 1);
  878. raw_spin_unlock(&rq->lock);
  879. return HRTIMER_NORESTART;
  880. }
  881. #ifdef CONFIG_SMP
  882. /*
  883. * called from hardirq (IPI) context
  884. */
  885. static void __hrtick_start(void *arg)
  886. {
  887. struct rq *rq = arg;
  888. raw_spin_lock(&rq->lock);
  889. hrtimer_restart(&rq->hrtick_timer);
  890. rq->hrtick_csd_pending = 0;
  891. raw_spin_unlock(&rq->lock);
  892. }
  893. /*
  894. * Called to set the hrtick timer state.
  895. *
  896. * called with rq->lock held and irqs disabled
  897. */
  898. static void hrtick_start(struct rq *rq, u64 delay)
  899. {
  900. struct hrtimer *timer = &rq->hrtick_timer;
  901. ktime_t time = ktime_add_ns(timer->base->get_time(), delay);
  902. hrtimer_set_expires(timer, time);
  903. if (rq == this_rq()) {
  904. hrtimer_restart(timer);
  905. } else if (!rq->hrtick_csd_pending) {
  906. __smp_call_function_single(cpu_of(rq), &rq->hrtick_csd, 0);
  907. rq->hrtick_csd_pending = 1;
  908. }
  909. }
  910. static int
  911. hotplug_hrtick(struct notifier_block *nfb, unsigned long action, void *hcpu)
  912. {
  913. int cpu = (int)(long)hcpu;
  914. switch (action) {
  915. case CPU_UP_CANCELED:
  916. case CPU_UP_CANCELED_FROZEN:
  917. case CPU_DOWN_PREPARE:
  918. case CPU_DOWN_PREPARE_FROZEN:
  919. case CPU_DEAD:
  920. case CPU_DEAD_FROZEN:
  921. hrtick_clear(cpu_rq(cpu));
  922. return NOTIFY_OK;
  923. }
  924. return NOTIFY_DONE;
  925. }
  926. static __init void init_hrtick(void)
  927. {
  928. hotcpu_notifier(hotplug_hrtick, 0);
  929. }
  930. #else
  931. /*
  932. * Called to set the hrtick timer state.
  933. *
  934. * called with rq->lock held and irqs disabled
  935. */
  936. static void hrtick_start(struct rq *rq, u64 delay)
  937. {
  938. __hrtimer_start_range_ns(&rq->hrtick_timer, ns_to_ktime(delay), 0,
  939. HRTIMER_MODE_REL_PINNED, 0);
  940. }
  941. static inline void init_hrtick(void)
  942. {
  943. }
  944. #endif /* CONFIG_SMP */
  945. static void init_rq_hrtick(struct rq *rq)
  946. {
  947. #ifdef CONFIG_SMP
  948. rq->hrtick_csd_pending = 0;
  949. rq->hrtick_csd.flags = 0;
  950. rq->hrtick_csd.func = __hrtick_start;
  951. rq->hrtick_csd.info = rq;
  952. #endif
  953. hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  954. rq->hrtick_timer.function = hrtick;
  955. }
  956. #else /* CONFIG_SCHED_HRTICK */
  957. static inline void hrtick_clear(struct rq *rq)
  958. {
  959. }
  960. static inline void init_rq_hrtick(struct rq *rq)
  961. {
  962. }
  963. static inline void init_hrtick(void)
  964. {
  965. }
  966. #endif /* CONFIG_SCHED_HRTICK */
  967. /*
  968. * resched_task - mark a task 'to be rescheduled now'.
  969. *
  970. * On UP this means the setting of the need_resched flag, on SMP it
  971. * might also involve a cross-CPU call to trigger the scheduler on
  972. * the target CPU.
  973. */
  974. #ifdef CONFIG_SMP
  975. #ifndef tsk_is_polling
  976. #define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
  977. #endif
  978. static void resched_task(struct task_struct *p)
  979. {
  980. int cpu;
  981. assert_raw_spin_locked(&task_rq(p)->lock);
  982. if (test_tsk_need_resched(p))
  983. return;
  984. set_tsk_need_resched(p);
  985. cpu = task_cpu(p);
  986. if (cpu == smp_processor_id())
  987. return;
  988. /* NEED_RESCHED must be visible before we test polling */
  989. smp_mb();
  990. if (!tsk_is_polling(p))
  991. smp_send_reschedule(cpu);
  992. }
  993. static void resched_cpu(int cpu)
  994. {
  995. struct rq *rq = cpu_rq(cpu);
  996. unsigned long flags;
  997. if (!raw_spin_trylock_irqsave(&rq->lock, flags))
  998. return;
  999. resched_task(cpu_curr(cpu));
  1000. raw_spin_unlock_irqrestore(&rq->lock, flags);
  1001. }
  1002. #ifdef CONFIG_NO_HZ
  1003. /*
  1004. * In the semi idle case, use the nearest busy cpu for migrating timers
  1005. * from an idle cpu. This is good for power-savings.
  1006. *
  1007. * We don't do similar optimization for completely idle system, as
  1008. * selecting an idle cpu will add more delays to the timers than intended
  1009. * (as that cpu's timer base may not be uptodate wrt jiffies etc).
  1010. */
  1011. int get_nohz_timer_target(void)
  1012. {
  1013. int cpu = smp_processor_id();
  1014. int i;
  1015. struct sched_domain *sd;
  1016. rcu_read_lock();
  1017. for_each_domain(cpu, sd) {
  1018. for_each_cpu(i, sched_domain_span(sd)) {
  1019. if (!idle_cpu(i)) {
  1020. cpu = i;
  1021. goto unlock;
  1022. }
  1023. }
  1024. }
  1025. unlock:
  1026. rcu_read_unlock();
  1027. return cpu;
  1028. }
  1029. /*
  1030. * When add_timer_on() enqueues a timer into the timer wheel of an
  1031. * idle CPU then this timer might expire before the next timer event
  1032. * which is scheduled to wake up that CPU. In case of a completely
  1033. * idle system the next event might even be infinite time into the
  1034. * future. wake_up_idle_cpu() ensures that the CPU is woken up and
  1035. * leaves the inner idle loop so the newly added timer is taken into
  1036. * account when the CPU goes back to idle and evaluates the timer
  1037. * wheel for the next timer event.
  1038. */
  1039. void wake_up_idle_cpu(int cpu)
  1040. {
  1041. struct rq *rq = cpu_rq(cpu);
  1042. if (cpu == smp_processor_id())
  1043. return;
  1044. /*
  1045. * This is safe, as this function is called with the timer
  1046. * wheel base lock of (cpu) held. When the CPU is on the way
  1047. * to idle and has not yet set rq->curr to idle then it will
  1048. * be serialized on the timer wheel base lock and take the new
  1049. * timer into account automatically.
  1050. */
  1051. if (rq->curr != rq->idle)
  1052. return;
  1053. /*
  1054. * We can set TIF_RESCHED on the idle task of the other CPU
  1055. * lockless. The worst case is that the other CPU runs the
  1056. * idle task through an additional NOOP schedule()
  1057. */
  1058. set_tsk_need_resched(rq->idle);
  1059. /* NEED_RESCHED must be visible before we test polling */
  1060. smp_mb();
  1061. if (!tsk_is_polling(rq->idle))
  1062. smp_send_reschedule(cpu);
  1063. }
  1064. #endif /* CONFIG_NO_HZ */
  1065. static u64 sched_avg_period(void)
  1066. {
  1067. return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2;
  1068. }
  1069. static void sched_avg_update(struct rq *rq)
  1070. {
  1071. s64 period = sched_avg_period();
  1072. while ((s64)(rq->clock - rq->age_stamp) > period) {
  1073. /*
  1074. * Inline assembly required to prevent the compiler
  1075. * optimising this loop into a divmod call.
  1076. * See __iter_div_u64_rem() for another example of this.
  1077. */
  1078. asm("" : "+rm" (rq->age_stamp));
  1079. rq->age_stamp += period;
  1080. rq->rt_avg /= 2;
  1081. }
  1082. }
  1083. static void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
  1084. {
  1085. rq->rt_avg += rt_delta;
  1086. sched_avg_update(rq);
  1087. }
  1088. #else /* !CONFIG_SMP */
  1089. static void resched_task(struct task_struct *p)
  1090. {
  1091. assert_raw_spin_locked(&task_rq(p)->lock);
  1092. set_tsk_need_resched(p);
  1093. }
  1094. static void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
  1095. {
  1096. }
  1097. static void sched_avg_update(struct rq *rq)
  1098. {
  1099. }
  1100. #endif /* CONFIG_SMP */
  1101. #if BITS_PER_LONG == 32
  1102. # define WMULT_CONST (~0UL)
  1103. #else
  1104. # define WMULT_CONST (1UL << 32)
  1105. #endif
  1106. #define WMULT_SHIFT 32
  1107. /*
  1108. * Shift right and round:
  1109. */
  1110. #define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y))
  1111. /*
  1112. * delta *= weight / lw
  1113. */
  1114. static unsigned long
  1115. calc_delta_mine(unsigned long delta_exec, unsigned long weight,
  1116. struct load_weight *lw)
  1117. {
  1118. u64 tmp;
  1119. /*
  1120. * weight can be less than 2^SCHED_LOAD_RESOLUTION for task group sched
  1121. * entities since MIN_SHARES = 2. Treat weight as 1 if less than
  1122. * 2^SCHED_LOAD_RESOLUTION.
  1123. */
  1124. if (likely(weight > (1UL << SCHED_LOAD_RESOLUTION)))
  1125. tmp = (u64)delta_exec * scale_load_down(weight);
  1126. else
  1127. tmp = (u64)delta_exec;
  1128. if (!lw->inv_weight) {
  1129. unsigned long w = scale_load_down(lw->weight);
  1130. if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST))
  1131. lw->inv_weight = 1;
  1132. else if (unlikely(!w))
  1133. lw->inv_weight = WMULT_CONST;
  1134. else
  1135. lw->inv_weight = WMULT_CONST / w;
  1136. }
  1137. /*
  1138. * Check whether we'd overflow the 64-bit multiplication:
  1139. */
  1140. if (unlikely(tmp > WMULT_CONST))
  1141. tmp = SRR(SRR(tmp, WMULT_SHIFT/2) * lw->inv_weight,
  1142. WMULT_SHIFT/2);
  1143. else
  1144. tmp = SRR(tmp * lw->inv_weight, WMULT_SHIFT);
  1145. return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX);
  1146. }
  1147. static inline void update_load_add(struct load_weight *lw, unsigned long inc)
  1148. {
  1149. lw->weight += inc;
  1150. lw->inv_weight = 0;
  1151. }
  1152. static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
  1153. {
  1154. lw->weight -= dec;
  1155. lw->inv_weight = 0;
  1156. }
  1157. static inline void update_load_set(struct load_weight *lw, unsigned long w)
  1158. {
  1159. lw->weight = w;
  1160. lw->inv_weight = 0;
  1161. }
  1162. /*
  1163. * To aid in avoiding the subversion of "niceness" due to uneven distribution
  1164. * of tasks with abnormal "nice" values across CPUs the contribution that
  1165. * each task makes to its run queue's load is weighted according to its
  1166. * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a
  1167. * scaled version of the new time slice allocation that they receive on time
  1168. * slice expiry etc.
  1169. */
  1170. #define WEIGHT_IDLEPRIO 3
  1171. #define WMULT_IDLEPRIO 1431655765
  1172. /*
  1173. * Nice levels are multiplicative, with a gentle 10% change for every
  1174. * nice level changed. I.e. when a CPU-bound task goes from nice 0 to
  1175. * nice 1, it will get ~10% less CPU time than another CPU-bound task
  1176. * that remained on nice 0.
  1177. *
  1178. * The "10% effect" is relative and cumulative: from _any_ nice level,
  1179. * if you go up 1 level, it's -10% CPU usage, if you go down 1 level
  1180. * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25.
  1181. * If a task goes up by ~10% and another task goes down by ~10% then
  1182. * the relative distance between them is ~25%.)
  1183. */
  1184. static const int prio_to_weight[40] = {
  1185. /* -20 */ 88761, 71755, 56483, 46273, 36291,
  1186. /* -15 */ 29154, 23254, 18705, 14949, 11916,
  1187. /* -10 */ 9548, 7620, 6100, 4904, 3906,
  1188. /* -5 */ 3121, 2501, 1991, 1586, 1277,
  1189. /* 0 */ 1024, 820, 655, 526, 423,
  1190. /* 5 */ 335, 272, 215, 172, 137,
  1191. /* 10 */ 110, 87, 70, 56, 45,
  1192. /* 15 */ 36, 29, 23, 18, 15,
  1193. };
  1194. /*
  1195. * Inverse (2^32/x) values of the prio_to_weight[] array, precalculated.
  1196. *
  1197. * In cases where the weight does not change often, we can use the
  1198. * precalculated inverse to speed up arithmetics by turning divisions
  1199. * into multiplications:
  1200. */
  1201. static const u32 prio_to_wmult[40] = {
  1202. /* -20 */ 48388, 59856, 76040, 92818, 118348,
  1203. /* -15 */ 147320, 184698, 229616, 287308, 360437,
  1204. /* -10 */ 449829, 563644, 704093, 875809, 1099582,
  1205. /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326,
  1206. /* 0 */ 4194304, 5237765, 6557202, 8165337, 10153587,
  1207. /* 5 */ 12820798, 15790321, 19976592, 24970740, 31350126,
  1208. /* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717,
  1209. /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
  1210. };
  1211. /* Time spent by the tasks of the cpu accounting group executing in ... */
  1212. enum cpuacct_stat_index {
  1213. CPUACCT_STAT_USER, /* ... user mode */
  1214. CPUACCT_STAT_SYSTEM, /* ... kernel mode */
  1215. CPUACCT_STAT_NSTATS,
  1216. };
  1217. #ifdef CONFIG_CGROUP_CPUACCT
  1218. static void cpuacct_charge(struct task_struct *tsk, u64 cputime);
  1219. static void cpuacct_update_stats(struct task_struct *tsk,
  1220. enum cpuacct_stat_index idx, cputime_t val);
  1221. #else
  1222. static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
  1223. static inline void cpuacct_update_stats(struct task_struct *tsk,
  1224. enum cpuacct_stat_index idx, cputime_t val) {}
  1225. #endif
  1226. static inline void inc_cpu_load(struct rq *rq, unsigned long load)
  1227. {
  1228. update_load_add(&rq->load, load);
  1229. }
  1230. static inline void dec_cpu_load(struct rq *rq, unsigned long load)
  1231. {
  1232. update_load_sub(&rq->load, load);
  1233. }
  1234. #if (defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)) || defined(CONFIG_RT_GROUP_SCHED)
  1235. typedef int (*tg_visitor)(struct task_group *, void *);
  1236. /*
  1237. * Iterate the full tree, calling @down when first entering a node and @up when
  1238. * leaving it for the final time.
  1239. */
  1240. static int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
  1241. {
  1242. struct task_group *parent, *child;
  1243. int ret;
  1244. rcu_read_lock();
  1245. parent = &root_task_group;
  1246. down:
  1247. ret = (*down)(parent, data);
  1248. if (ret)
  1249. goto out_unlock;
  1250. list_for_each_entry_rcu(child, &parent->children, siblings) {
  1251. parent = child;
  1252. goto down;
  1253. up:
  1254. continue;
  1255. }
  1256. ret = (*up)(parent, data);
  1257. if (ret)
  1258. goto out_unlock;
  1259. child = parent;
  1260. parent = parent->parent;
  1261. if (parent)
  1262. goto up;
  1263. out_unlock:
  1264. rcu_read_unlock();
  1265. return ret;
  1266. }
  1267. static int tg_nop(struct task_group *tg, void *data)
  1268. {
  1269. return 0;
  1270. }
  1271. #endif
  1272. #ifdef CONFIG_SMP
  1273. /* Used instead of source_load when we know the type == 0 */
  1274. static unsigned long weighted_cpuload(const int cpu)
  1275. {
  1276. return cpu_rq(cpu)->load.weight;
  1277. }
  1278. /*
  1279. * Return a low guess at the load of a migration-source cpu weighted
  1280. * according to the scheduling class and "nice" value.
  1281. *
  1282. * We want to under-estimate the load of migration sources, to
  1283. * balance conservatively.
  1284. */
  1285. static unsigned long source_load(int cpu, int type)
  1286. {
  1287. struct rq *rq = cpu_rq(cpu);
  1288. unsigned long total = weighted_cpuload(cpu);
  1289. if (type == 0 || !sched_feat(LB_BIAS))
  1290. return total;
  1291. return min(rq->cpu_load[type-1], total);
  1292. }
  1293. /*
  1294. * Return a high guess at the load of a migration-target cpu weighted
  1295. * according to the scheduling class and "nice" value.
  1296. */
  1297. static unsigned long target_load(int cpu, int type)
  1298. {
  1299. struct rq *rq = cpu_rq(cpu);
  1300. unsigned long total = weighted_cpuload(cpu);
  1301. if (type == 0 || !sched_feat(LB_BIAS))
  1302. return total;
  1303. return max(rq->cpu_load[type-1], total);
  1304. }
  1305. static unsigned long power_of(int cpu)
  1306. {
  1307. return cpu_rq(cpu)->cpu_power;
  1308. }
  1309. static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd);
  1310. static unsigned long cpu_avg_load_per_task(int cpu)
  1311. {
  1312. struct rq *rq = cpu_rq(cpu);
  1313. unsigned long nr_running = ACCESS_ONCE(rq->nr_running);
  1314. if (nr_running)
  1315. return rq->load.weight / nr_running;
  1316. return 0;
  1317. }
  1318. #ifdef CONFIG_PREEMPT
  1319. static void double_rq_lock(struct rq *rq1, struct rq *rq2);
  1320. /*
  1321. * fair double_lock_balance: Safely acquires both rq->locks in a fair
  1322. * way at the expense of forcing extra atomic operations in all
  1323. * invocations. This assures that the double_lock is acquired using the
  1324. * same underlying policy as the spinlock_t on this architecture, which
  1325. * reduces latency compared to the unfair variant below. However, it
  1326. * also adds more overhead and therefore may reduce throughput.
  1327. */
  1328. static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
  1329. __releases(this_rq->lock)
  1330. __acquires(busiest->lock)
  1331. __acquires(this_rq->lock)
  1332. {
  1333. raw_spin_unlock(&this_rq->lock);
  1334. double_rq_lock(this_rq, busiest);
  1335. return 1;
  1336. }
  1337. #else
  1338. /*
  1339. * Unfair double_lock_balance: Optimizes throughput at the expense of
  1340. * latency by eliminating extra atomic operations when the locks are
  1341. * already in proper order on entry. This favors lower cpu-ids and will
  1342. * grant the double lock to lower cpus over higher ids under contention,
  1343. * regardless of entry order into the function.
  1344. */
  1345. static int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
  1346. __releases(this_rq->lock)
  1347. __acquires(busiest->lock)
  1348. __acquires(this_rq->lock)
  1349. {
  1350. int ret = 0;
  1351. if (unlikely(!raw_spin_trylock(&busiest->lock))) {
  1352. if (busiest < this_rq) {
  1353. raw_spin_unlock(&this_rq->lock);
  1354. raw_spin_lock(&busiest->lock);
  1355. raw_spin_lock_nested(&this_rq->lock,
  1356. SINGLE_DEPTH_NESTING);
  1357. ret = 1;
  1358. } else
  1359. raw_spin_lock_nested(&busiest->lock,
  1360. SINGLE_DEPTH_NESTING);
  1361. }
  1362. return ret;
  1363. }
  1364. #endif /* CONFIG_PREEMPT */
  1365. /*
  1366. * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
  1367. */
  1368. static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
  1369. {
  1370. if (unlikely(!irqs_disabled())) {
  1371. /* printk() doesn't work good under rq->lock */
  1372. raw_spin_unlock(&this_rq->lock);
  1373. BUG_ON(1);
  1374. }
  1375. return _double_lock_balance(this_rq, busiest);
  1376. }
  1377. static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
  1378. __releases(busiest->lock)
  1379. {
  1380. raw_spin_unlock(&busiest->lock);
  1381. lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
  1382. }
  1383. /*
  1384. * double_rq_lock - safely lock two runqueues
  1385. *
  1386. * Note this does not disable interrupts like task_rq_lock,
  1387. * you need to do so manually before calling.
  1388. */
  1389. static void double_rq_lock(struct rq *rq1, struct rq *rq2)
  1390. __acquires(rq1->lock)
  1391. __acquires(rq2->lock)
  1392. {
  1393. BUG_ON(!irqs_disabled());
  1394. if (rq1 == rq2) {
  1395. raw_spin_lock(&rq1->lock);
  1396. __acquire(rq2->lock); /* Fake it out ;) */
  1397. } else {
  1398. if (rq1 < rq2) {
  1399. raw_spin_lock(&rq1->lock);
  1400. raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
  1401. } else {
  1402. raw_spin_lock(&rq2->lock);
  1403. raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
  1404. }
  1405. }
  1406. }
  1407. /*
  1408. * double_rq_unlock - safely unlock two runqueues
  1409. *
  1410. * Note this does not restore interrupts like task_rq_unlock,
  1411. * you need to do so manually after calling.
  1412. */
  1413. static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
  1414. __releases(rq1->lock)
  1415. __releases(rq2->lock)
  1416. {
  1417. raw_spin_unlock(&rq1->lock);
  1418. if (rq1 != rq2)
  1419. raw_spin_unlock(&rq2->lock);
  1420. else
  1421. __release(rq2->lock);
  1422. }
  1423. #else /* CONFIG_SMP */
  1424. /*
  1425. * double_rq_lock - safely lock two runqueues
  1426. *
  1427. * Note this does not disable interrupts like task_rq_lock,
  1428. * you need to do so manually before calling.
  1429. */
  1430. static void double_rq_lock(struct rq *rq1, struct rq *rq2)
  1431. __acquires(rq1->lock)
  1432. __acquires(rq2->lock)
  1433. {
  1434. BUG_ON(!irqs_disabled());
  1435. BUG_ON(rq1 != rq2);
  1436. raw_spin_lock(&rq1->lock);
  1437. __acquire(rq2->lock); /* Fake it out ;) */
  1438. }
  1439. /*
  1440. * double_rq_unlock - safely unlock two runqueues
  1441. *
  1442. * Note this does not restore interrupts like task_rq_unlock,
  1443. * you need to do so manually after calling.
  1444. */
  1445. static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
  1446. __releases(rq1->lock)
  1447. __releases(rq2->lock)
  1448. {
  1449. BUG_ON(rq1 != rq2);
  1450. raw_spin_unlock(&rq1->lock);
  1451. __release(rq2->lock);
  1452. }
  1453. #endif
  1454. static void calc_load_account_idle(struct rq *this_rq);
  1455. static void update_sysctl(void);
  1456. static int get_update_sysctl_factor(void);
  1457. static void update_cpu_load(struct rq *this_rq);
  1458. static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
  1459. {
  1460. set_task_rq(p, cpu);
  1461. #ifdef CONFIG_SMP
  1462. /*
  1463. * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
  1464. * successfuly executed on another CPU. We must ensure that updates of
  1465. * per-task data have been completed by this moment.
  1466. */
  1467. smp_wmb();
  1468. task_thread_info(p)->cpu = cpu;
  1469. #endif
  1470. }
  1471. static const struct sched_class rt_sched_class;
  1472. #define sched_class_highest (&stop_sched_class)
  1473. #define for_each_class(class) \
  1474. for (class = sched_class_highest; class; class = class->next)
  1475. #include "sched_stats.h"
  1476. static void inc_nr_running(struct rq *rq)
  1477. {
  1478. rq->nr_running++;
  1479. }
  1480. static void dec_nr_running(struct rq *rq)
  1481. {
  1482. rq->nr_running--;
  1483. }
  1484. static void set_load_weight(struct task_struct *p)
  1485. {
  1486. int prio = p->static_prio - MAX_RT_PRIO;
  1487. struct load_weight *load = &p->se.load;
  1488. /*
  1489. * SCHED_IDLE tasks get minimal weight:
  1490. */
  1491. if (p->policy == SCHED_IDLE) {
  1492. load->weight = scale_load(WEIGHT_IDLEPRIO);
  1493. load->inv_weight = WMULT_IDLEPRIO;
  1494. return;
  1495. }
  1496. load->weight = scale_load(prio_to_weight[prio]);
  1497. load->inv_weight = prio_to_wmult[prio];
  1498. }
  1499. static void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
  1500. {
  1501. update_rq_clock(rq);
  1502. sched_info_queued(p);
  1503. p->sched_class->enqueue_task(rq, p, flags);
  1504. }
  1505. static void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
  1506. {
  1507. update_rq_clock(rq);
  1508. sched_info_dequeued(p);
  1509. p->sched_class->dequeue_task(rq, p, flags);
  1510. }
  1511. /*
  1512. * activate_task - move a task to the runqueue.
  1513. */
  1514. static void activate_task(struct rq *rq, struct task_struct *p, int flags)
  1515. {
  1516. if (task_contributes_to_load(p))
  1517. rq->nr_uninterruptible--;
  1518. enqueue_task(rq, p, flags);
  1519. inc_nr_running(rq);
  1520. }
  1521. /*
  1522. * deactivate_task - remove a task from the runqueue.
  1523. */
  1524. static void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
  1525. {
  1526. if (task_contributes_to_load(p))
  1527. rq->nr_uninterruptible++;
  1528. dequeue_task(rq, p, flags);
  1529. dec_nr_running(rq);
  1530. }
  1531. #ifdef CONFIG_IRQ_TIME_ACCOUNTING
  1532. /*
  1533. * There are no locks covering percpu hardirq/softirq time.
  1534. * They are only modified in account_system_vtime, on corresponding CPU
  1535. * with interrupts disabled. So, writes are safe.
  1536. * They are read and saved off onto struct rq in update_rq_clock().
  1537. * This may result in other CPU reading this CPU's irq time and can
  1538. * race with irq/account_system_vtime on this CPU. We would either get old
  1539. * or new value with a side effect of accounting a slice of irq time to wrong
  1540. * task when irq is in progress while we read rq->clock. That is a worthy
  1541. * compromise in place of having locks on each irq in account_system_time.
  1542. */
  1543. static DEFINE_PER_CPU(u64, cpu_hardirq_time);
  1544. static DEFINE_PER_CPU(u64, cpu_softirq_time);
  1545. static DEFINE_PER_CPU(u64, irq_start_time);
  1546. static int sched_clock_irqtime;
  1547. void enable_sched_clock_irqtime(void)
  1548. {
  1549. sched_clock_irqtime = 1;
  1550. }
  1551. void disable_sched_clock_irqtime(void)
  1552. {
  1553. sched_clock_irqtime = 0;
  1554. }
  1555. #ifndef CONFIG_64BIT
  1556. static DEFINE_PER_CPU(seqcount_t, irq_time_seq);
  1557. static inline void irq_time_write_begin(void)
  1558. {
  1559. __this_cpu_inc(irq_time_seq.sequence);
  1560. smp_wmb();
  1561. }
  1562. static inline void irq_time_write_end(void)
  1563. {
  1564. smp_wmb();
  1565. __this_cpu_inc(irq_time_seq.sequence);
  1566. }
  1567. static inline u64 irq_time_read(int cpu)
  1568. {
  1569. u64 irq_time;
  1570. unsigned seq;
  1571. do {
  1572. seq = read_seqcount_begin(&per_cpu(irq_time_seq, cpu));
  1573. irq_time = per_cpu(cpu_softirq_time, cpu) +
  1574. per_cpu(cpu_hardirq_time, cpu);
  1575. } while (read_seqcount_retry(&per_cpu(irq_time_seq, cpu), seq));
  1576. return irq_time;
  1577. }
  1578. #else /* CONFIG_64BIT */
  1579. static inline void irq_time_write_begin(void)
  1580. {
  1581. }
  1582. static inline void irq_time_write_end(void)
  1583. {
  1584. }
  1585. static inline u64 irq_time_read(int cpu)
  1586. {
  1587. return per_cpu(cpu_softirq_time, cpu) + per_cpu(cpu_hardirq_time, cpu);
  1588. }
  1589. #endif /* CONFIG_64BIT */
  1590. /*
  1591. * Called before incrementing preempt_count on {soft,}irq_enter
  1592. * and before decrementing preempt_count on {soft,}irq_exit.
  1593. */
  1594. void account_system_vtime(struct task_struct *curr)
  1595. {
  1596. unsigned long flags;
  1597. s64 delta;
  1598. int cpu;
  1599. if (!sched_clock_irqtime)
  1600. return;
  1601. local_irq_save(flags);
  1602. cpu = smp_processor_id();
  1603. delta = sched_clock_cpu(cpu) - __this_cpu_read(irq_start_time);
  1604. __this_cpu_add(irq_start_time, delta);
  1605. irq_time_write_begin();
  1606. /*
  1607. * We do not account for softirq time from ksoftirqd here.
  1608. * We want to continue accounting softirq time to ksoftirqd thread
  1609. * in that case, so as not to confuse scheduler with a special task
  1610. * that do not consume any time, but still wants to run.
  1611. */
  1612. if (hardirq_count())
  1613. __this_cpu_add(cpu_hardirq_time, delta);
  1614. else if (in_serving_softirq() && curr != this_cpu_ksoftirqd())
  1615. __this_cpu_add(cpu_softirq_time, delta);
  1616. irq_time_write_end();
  1617. local_irq_restore(flags);
  1618. }
  1619. EXPORT_SYMBOL_GPL(account_system_vtime);
  1620. #endif /* CONFIG_IRQ_TIME_ACCOUNTING */
  1621. #ifdef CONFIG_PARAVIRT
  1622. static inline u64 steal_ticks(u64 steal)
  1623. {
  1624. if (unlikely(steal > NSEC_PER_SEC))
  1625. return div_u64(steal, TICK_NSEC);
  1626. return __iter_div_u64_rem(steal, TICK_NSEC, &steal);
  1627. }
  1628. #endif
  1629. static void update_rq_clock_task(struct rq *rq, s64 delta)
  1630. {
  1631. /*
  1632. * In theory, the compile should just see 0 here, and optimize out the call
  1633. * to sched_rt_avg_update. But I don't trust it...
  1634. */
  1635. #if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
  1636. s64 steal = 0, irq_delta = 0;
  1637. #endif
  1638. #ifdef CONFIG_IRQ_TIME_ACCOUNTING
  1639. irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
  1640. /*
  1641. * Since irq_time is only updated on {soft,}irq_exit, we might run into
  1642. * this case when a previous update_rq_clock() happened inside a
  1643. * {soft,}irq region.
  1644. *
  1645. * When this happens, we stop ->clock_task and only update the
  1646. * prev_irq_time stamp to account for the part that fit, so that a next
  1647. * update will consume the rest. This ensures ->clock_task is
  1648. * monotonic.
  1649. *
  1650. * It does however cause some slight miss-attribution of {soft,}irq
  1651. * time, a more accurate solution would be to update the irq_time using
  1652. * the current rq->clock timestamp, except that would require using
  1653. * atomic ops.
  1654. */
  1655. if (irq_delta > delta)
  1656. irq_delta = delta;
  1657. rq->prev_irq_time += irq_delta;
  1658. delta -= irq_delta;
  1659. #endif
  1660. #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
  1661. if (static_branch((&paravirt_steal_rq_enabled))) {
  1662. u64 st;
  1663. steal = paravirt_steal_clock(cpu_of(rq));
  1664. steal -= rq->prev_steal_time_rq;
  1665. if (unlikely(steal > delta))
  1666. steal = delta;
  1667. st = steal_ticks(steal);
  1668. steal = st * TICK_NSEC;
  1669. rq->prev_steal_time_rq += steal;
  1670. delta -= steal;
  1671. }
  1672. #endif
  1673. rq->clock_task += delta;
  1674. #if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
  1675. if ((irq_delta + steal) && sched_feat(NONTASK_POWER))
  1676. sched_rt_avg_update(rq, irq_delta + steal);
  1677. #endif
  1678. }
  1679. #ifdef CONFIG_IRQ_TIME_ACCOUNTING
  1680. static int irqtime_account_hi_update(void)
  1681. {
  1682. struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
  1683. unsigned long flags;
  1684. u64 latest_ns;
  1685. int ret = 0;
  1686. local_irq_save(flags);
  1687. latest_ns = this_cpu_read(cpu_hardirq_time);
  1688. if (cputime64_gt(nsecs_to_cputime64(latest_ns), cpustat->irq))
  1689. ret = 1;
  1690. local_irq_restore(flags);
  1691. return ret;
  1692. }
  1693. static int irqtime_account_si_update(void)
  1694. {
  1695. struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
  1696. unsigned long flags;
  1697. u64 latest_ns;
  1698. int ret = 0;
  1699. local_irq_save(flags);
  1700. latest_ns = this_cpu_read(cpu_softirq_time);
  1701. if (cputime64_gt(nsecs_to_cputime64(latest_ns), cpustat->softirq))
  1702. ret = 1;
  1703. local_irq_restore(flags);
  1704. return ret;
  1705. }
  1706. #else /* CONFIG_IRQ_TIME_ACCOUNTING */
  1707. #define sched_clock_irqtime (0)
  1708. #endif
  1709. #include "sched_idletask.c"
  1710. #include "sched_fair.c"
  1711. #include "sched_rt.c"
  1712. #include "sched_autogroup.c"
  1713. #include "sched_stoptask.c"
  1714. #ifdef CONFIG_SCHED_DEBUG
  1715. # include "sched_debug.c"
  1716. #endif
  1717. void sched_set_stop_task(int cpu, struct task_struct *stop)
  1718. {
  1719. struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
  1720. struct task_struct *old_stop = cpu_rq(cpu)->stop;
  1721. if (stop) {
  1722. /*
  1723. * Make it appear like a SCHED_FIFO task, its something
  1724. * userspace knows about and won't get confused about.
  1725. *
  1726. * Also, it will make PI more or less work without too
  1727. * much confusion -- but then, stop work should not
  1728. * rely on PI working anyway.
  1729. */
  1730. sched_setscheduler_nocheck(stop, SCHED_FIFO, &param);
  1731. stop->sched_class = &stop_sched_class;
  1732. }
  1733. cpu_rq(cpu)->stop = stop;
  1734. if (old_stop) {
  1735. /*
  1736. * Reset it back to a normal scheduling class so that
  1737. * it can die in pieces.
  1738. */
  1739. old_stop->sched_class = &rt_sched_class;
  1740. }
  1741. }
  1742. /*
  1743. * __normal_prio - return the priority that is based on the static prio
  1744. */
  1745. static inline int __normal_prio(struct task_struct *p)
  1746. {
  1747. return p->static_prio;
  1748. }
  1749. /*
  1750. * Calculate the expected normal priority: i.e. priority
  1751. * without taking RT-inheritance into account. Might be
  1752. * boosted by interactivity modifiers. Changes upon fork,
  1753. * setprio syscalls, and whenever the interactivity
  1754. * estimator recalculates.
  1755. */
  1756. static inline int normal_prio(struct task_struct *p)
  1757. {
  1758. int prio;
  1759. if (task_has_rt_policy(p))
  1760. prio = MAX_RT_PRIO-1 - p->rt_priority;
  1761. else
  1762. prio = __normal_prio(p);
  1763. return prio;
  1764. }
  1765. /*
  1766. * Calculate the current priority, i.e. the priority
  1767. * taken into account by the scheduler. This value might
  1768. * be boosted by RT tasks, or might be boosted by
  1769. * interactivity modifiers. Will be RT if the task got
  1770. * RT-boosted. If not then it returns p->normal_prio.
  1771. */
  1772. static int effective_prio(struct task_struct *p)
  1773. {
  1774. p->normal_prio = normal_prio(p);
  1775. /*
  1776. * If we are RT tasks or we were boosted to RT priority,
  1777. * keep the priority unchanged. Otherwise, update priority
  1778. * to the normal priority:
  1779. */
  1780. if (!rt_prio(p->prio))
  1781. return p->normal_prio;
  1782. return p->prio;
  1783. }
  1784. /**
  1785. * task_curr - is this task currently executing on a CPU?
  1786. * @p: the task in question.
  1787. */
  1788. inline int task_curr(const struct task_struct *p)
  1789. {
  1790. return cpu_curr(task_cpu(p)) == p;
  1791. }
  1792. static inline void check_class_changed(struct rq *rq, struct task_struct *p,
  1793. const struct sched_class *prev_class,
  1794. int oldprio)
  1795. {
  1796. if (prev_class != p->sched_class) {
  1797. if (prev_class->switched_from)
  1798. prev_class->switched_from(rq, p);
  1799. p->sched_class->switched_to(rq, p);
  1800. } else if (oldprio != p->prio)
  1801. p->sched_class->prio_changed(rq, p, oldprio);
  1802. }
  1803. static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
  1804. {
  1805. const struct sched_class *class;
  1806. if (p->sched_class == rq->curr->sched_class) {
  1807. rq->curr->sched_class->check_preempt_curr(rq, p, flags);
  1808. } else {
  1809. for_each_class(class) {
  1810. if (class == rq->curr->sched_class)
  1811. break;
  1812. if (class == p->sched_class) {
  1813. resched_task(rq->curr);
  1814. break;
  1815. }
  1816. }
  1817. }
  1818. /*
  1819. * A queue event has occurred, and we're going to schedule. In
  1820. * this case, we can save a useless back to back clock update.
  1821. */
  1822. if (rq->curr->on_rq && test_tsk_need_resched(rq->curr))
  1823. rq->skip_clock_update = 1;
  1824. }
  1825. #ifdef CONFIG_SMP
  1826. /*
  1827. * Is this task likely cache-hot:
  1828. */
  1829. static int
  1830. task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
  1831. {
  1832. s64 delta;
  1833. if (p->sched_class != &fair_sched_class)
  1834. return 0;
  1835. if (unlikely(p->policy == SCHED_IDLE))
  1836. return 0;
  1837. /*
  1838. * Buddy candidates are cache hot:
  1839. */
  1840. if (sched_feat(CACHE_HOT_BUDDY) && this_rq()->nr_running &&
  1841. (&p->se == cfs_rq_of(&p->se)->next ||
  1842. &p->se == cfs_rq_of(&p->se)->last))
  1843. return 1;
  1844. if (sysctl_sched_migration_cost == -1)
  1845. return 1;
  1846. if (sysctl_sched_migration_cost == 0)
  1847. return 0;
  1848. delta = now - p->se.exec_start;
  1849. return delta < (s64)sysctl_sched_migration_cost;
  1850. }
  1851. void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
  1852. {
  1853. #ifdef CONFIG_SCHED_DEBUG
  1854. /*
  1855. * We should never call set_task_cpu() on a blocked task,
  1856. * ttwu() will sort out the placement.
  1857. */
  1858. WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
  1859. !(task_thread_info(p)->preempt_count & PREEMPT_ACTIVE));
  1860. #ifdef CONFIG_LOCKDEP
  1861. /*
  1862. * The caller should hold either p->pi_lock or rq->lock, when changing
  1863. * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
  1864. *
  1865. * sched_move_task() holds both and thus holding either pins the cgroup,
  1866. * see set_task_rq().
  1867. *
  1868. * Furthermore, all task_rq users should acquire both locks, see
  1869. * task_rq_lock().
  1870. */
  1871. WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
  1872. lockdep_is_held(&task_rq(p)->lock)));
  1873. #endif
  1874. #endif
  1875. trace_sched_migrate_task(p, new_cpu);
  1876. if (task_cpu(p) != new_cpu) {
  1877. p->se.nr_migrations++;
  1878. perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, NULL, 0);
  1879. }
  1880. __set_task_cpu(p, new_cpu);
  1881. }
  1882. struct migration_arg {
  1883. struct task_struct *task;
  1884. int dest_cpu;
  1885. };
  1886. static int migration_cpu_stop(void *data);
  1887. /*
  1888. * wait_task_inactive - wait for a thread to unschedule.
  1889. *
  1890. * If @match_state is nonzero, it's the @p->state value just checked and
  1891. * not expected to change. If it changes, i.e. @p might have woken up,
  1892. * then return zero. When we succeed in waiting for @p to be off its CPU,
  1893. * we return a positive number (its total switch count). If a second call
  1894. * a short while later returns the same number, the caller can be sure that
  1895. * @p has remained unscheduled the whole time.
  1896. *
  1897. * The caller must ensure that the task *will* unschedule sometime soon,
  1898. * else this function might spin for a *long* time. This function can't
  1899. * be called with interrupts off, or it may introduce deadlock with
  1900. * smp_call_function() if an IPI is sent by the same process we are
  1901. * waiting to become inactive.
  1902. */
  1903. unsigned long wait_task_inactive(struct task_struct *p, long match_state)
  1904. {
  1905. unsigned long flags;
  1906. int running, on_rq;
  1907. unsigned long ncsw;
  1908. struct rq *rq;
  1909. for (;;) {
  1910. /*
  1911. * We do the initial early heuristics without holding
  1912. * any task-queue locks at all. We'll only try to get
  1913. * the runqueue lock when things look like they will
  1914. * work out!
  1915. */
  1916. rq = task_rq(p);
  1917. /*
  1918. * If the task is actively running on another CPU
  1919. * still, just relax and busy-wait without holding
  1920. * any locks.
  1921. *
  1922. * NOTE! Since we don't hold any locks, it's not
  1923. * even sure that "rq" stays as the right runqueue!
  1924. * But we don't care, since "task_running()" will
  1925. * return false if the runqueue has changed and p
  1926. * is actually now running somewhere else!
  1927. */
  1928. while (task_running(rq, p)) {
  1929. if (match_state && unlikely(p->state != match_state))
  1930. return 0;
  1931. cpu_relax();
  1932. }
  1933. /*
  1934. * Ok, time to look more closely! We need the rq
  1935. * lock now, to be *sure*. If we're wrong, we'll
  1936. * just go back and repeat.
  1937. */
  1938. rq = task_rq_lock(p, &flags);
  1939. trace_sched_wait_task(p);
  1940. running = task_running(rq, p);
  1941. on_rq = p->on_rq;
  1942. ncsw = 0;
  1943. if (!match_state || p->state == match_state)
  1944. ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
  1945. task_rq_unlock(rq, p, &flags);
  1946. /*
  1947. * If it changed from the expected state, bail out now.
  1948. */
  1949. if (unlikely(!ncsw))
  1950. break;
  1951. /*
  1952. * Was it really running after all now that we
  1953. * checked with the proper locks actually held?
  1954. *
  1955. * Oops. Go back and try again..
  1956. */
  1957. if (unlikely(running)) {
  1958. cpu_relax();
  1959. continue;
  1960. }
  1961. /*
  1962. * It's not enough that it's not actively running,
  1963. * it must be off the runqueue _entirely_, and not
  1964. * preempted!
  1965. *
  1966. * So if it was still runnable (but just not actively
  1967. * running right now), it's preempted, and we should
  1968. * yield - it could be a while.
  1969. */
  1970. if (unlikely(on_rq)) {
  1971. ktime_t to = ktime_set(0, NSEC_PER_SEC/HZ);
  1972. set_current_state(TASK_UNINTERRUPTIBLE);
  1973. schedule_hrtimeout(&to, HRTIMER_MODE_REL);
  1974. continue;
  1975. }
  1976. /*
  1977. * Ahh, all good. It wasn't running, and it wasn't
  1978. * runnable, which means that it will never become
  1979. * running in the future either. We're all done!
  1980. */
  1981. break;
  1982. }
  1983. return ncsw;
  1984. }
  1985. /***
  1986. * kick_process - kick a running thread to enter/exit the kernel
  1987. * @p: the to-be-kicked thread
  1988. *
  1989. * Cause a process which is running on another CPU to enter
  1990. * kernel-mode, without any delay. (to get signals handled.)
  1991. *
  1992. * NOTE: this function doesn't have to take the runqueue lock,
  1993. * because all it wants to ensure is that the remote task enters
  1994. * the kernel. If the IPI races and the task has been migrated
  1995. * to another CPU then no harm is done and the purpose has been
  1996. * achieved as well.
  1997. */
  1998. void kick_process(struct task_struct *p)
  1999. {
  2000. int cpu;
  2001. preempt_disable();
  2002. cpu = task_cpu(p);
  2003. if ((cpu != smp_processor_id()) && task_curr(p))
  2004. smp_send_reschedule(cpu);
  2005. preempt_enable();
  2006. }
  2007. EXPORT_SYMBOL_GPL(kick_process);
  2008. #endif /* CONFIG_SMP */
  2009. #ifdef CONFIG_SMP
  2010. /*
  2011. * ->cpus_allowed is protected by both rq->lock and p->pi_lock
  2012. */
  2013. static int select_fallback_rq(int cpu, struct task_struct *p)
  2014. {
  2015. int dest_cpu;
  2016. const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(cpu));
  2017. /* Look for allowed, online CPU in same node. */
  2018. for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask)
  2019. if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
  2020. return dest_cpu;
  2021. /* Any allowed, online CPU? */
  2022. dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_active_mask);
  2023. if (dest_cpu < nr_cpu_ids)
  2024. return dest_cpu;
  2025. /* No more Mr. Nice Guy. */
  2026. dest_cpu = cpuset_cpus_allowed_fallback(p);
  2027. /*
  2028. * Don't tell them about moving exiting tasks or
  2029. * kernel threads (both mm NULL), since they never
  2030. * leave kernel.
  2031. */
  2032. if (p->mm && printk_ratelimit()) {
  2033. printk(KERN_INFO "process %d (%s) no longer affine to cpu%d\n",
  2034. task_pid_nr(p), p->comm, cpu);
  2035. }
  2036. return dest_cpu;
  2037. }
  2038. /*
  2039. * The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable.
  2040. */
  2041. static inline
  2042. int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags)
  2043. {
  2044. int cpu = p->sched_class->select_task_rq(p, sd_flags, wake_flags);
  2045. /*
  2046. * In order not to call set_task_cpu() on a blocking task we need
  2047. * to rely on ttwu() to place the task on a valid ->cpus_allowed
  2048. * cpu.
  2049. *
  2050. * Since this is common to all placement strategies, this lives here.
  2051. *
  2052. * [ this allows ->select_task() to simply return task_cpu(p) and
  2053. * not worry about this generic constraint ]
  2054. */
  2055. if (unlikely(!cpumask_test_cpu(cpu, &p->cpus_allowed) ||
  2056. !cpu_online(cpu)))
  2057. cpu = select_fallback_rq(task_cpu(p), p);
  2058. return cpu;
  2059. }
  2060. static void update_avg(u64 *avg, u64 sample)
  2061. {
  2062. s64 diff = sample - *avg;
  2063. *avg += diff >> 3;
  2064. }
  2065. #endif
  2066. static void
  2067. ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
  2068. {
  2069. #ifdef CONFIG_SCHEDSTATS
  2070. struct rq *rq = this_rq();
  2071. #ifdef CONFIG_SMP
  2072. int this_cpu = smp_processor_id();
  2073. if (cpu == this_cpu) {
  2074. schedstat_inc(rq, ttwu_local);
  2075. schedstat_inc(p, se.statistics.nr_wakeups_local);
  2076. } else {
  2077. struct sched_domain *sd;
  2078. schedstat_inc(p, se.statistics.nr_wakeups_remote);
  2079. rcu_read_lock();
  2080. for_each_domain(this_cpu, sd) {
  2081. if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
  2082. schedstat_inc(sd, ttwu_wake_remote);
  2083. break;
  2084. }
  2085. }
  2086. rcu_read_unlock();
  2087. }
  2088. if (wake_flags & WF_MIGRATED)
  2089. schedstat_inc(p, se.statistics.nr_wakeups_migrate);
  2090. #endif /* CONFIG_SMP */
  2091. schedstat_inc(rq, ttwu_count);
  2092. schedstat_inc(p, se.statistics.nr_wakeups);
  2093. if (wake_flags & WF_SYNC)
  2094. schedstat_inc(p, se.statistics.nr_wakeups_sync);
  2095. #endif /* CONFIG_SCHEDSTATS */
  2096. }
  2097. static void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags)
  2098. {
  2099. activate_task(rq, p, en_flags);
  2100. p->on_rq = 1;
  2101. /* if a worker is waking up, notify workqueue */
  2102. if (p->flags & PF_WQ_WORKER)
  2103. wq_worker_waking_up(p, cpu_of(rq));
  2104. }
  2105. /*
  2106. * Mark the task runnable and perform wakeup-preemption.
  2107. */
  2108. static void
  2109. ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
  2110. {
  2111. trace_sched_wakeup(p, true);
  2112. check_preempt_curr(rq, p, wake_flags);
  2113. p->state = TASK_RUNNING;
  2114. #ifdef CONFIG_SMP
  2115. if (p->sched_class->task_woken)
  2116. p->sched_class->task_woken(rq, p);
  2117. if (rq->idle_stamp) {
  2118. u64 delta = rq->clock - rq->idle_stamp;
  2119. u64 max = 2*sysctl_sched_migration_cost;
  2120. if (delta > max)
  2121. rq->avg_idle = max;
  2122. else
  2123. update_avg(&rq->avg_idle, delta);
  2124. rq->idle_stamp = 0;
  2125. }
  2126. #endif
  2127. }
  2128. static void
  2129. ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags)
  2130. {
  2131. #ifdef CONFIG_SMP
  2132. if (p->sched_contributes_to_load)
  2133. rq->nr_uninterruptible--;
  2134. #endif
  2135. ttwu_activate(rq, p, ENQUEUE_WAKEUP | ENQUEUE_WAKING);
  2136. ttwu_do_wakeup(rq, p, wake_flags);
  2137. }
  2138. /*
  2139. * Called in case the task @p isn't fully descheduled from its runqueue,
  2140. * in this case we must do a remote wakeup. Its a 'light' wakeup though,
  2141. * since all we need to do is flip p->state to TASK_RUNNING, since
  2142. * the task is still ->on_rq.
  2143. */
  2144. static int ttwu_remote(struct task_struct *p, int wake_flags)
  2145. {
  2146. struct rq *rq;
  2147. int ret = 0;
  2148. rq = __task_rq_lock(p);
  2149. if (p->on_rq) {
  2150. ttwu_do_wakeup(rq, p, wake_flags);
  2151. ret = 1;
  2152. }
  2153. __task_rq_unlock(rq);
  2154. return ret;
  2155. }
  2156. #ifdef CONFIG_SMP
  2157. static void sched_ttwu_do_pending(struct task_struct *list)
  2158. {
  2159. struct rq *rq = this_rq();
  2160. raw_spin_lock(&rq->lock);
  2161. while (list) {
  2162. struct task_struct *p = list;
  2163. list = list->wake_entry;
  2164. ttwu_do_activate(rq, p, 0);
  2165. }
  2166. raw_spin_unlock(&rq->lock);
  2167. }
  2168. #ifdef CONFIG_HOTPLUG_CPU
  2169. static void sched_ttwu_pending(void)
  2170. {
  2171. struct rq *rq = this_rq();
  2172. struct task_struct *list = xchg(&rq->wake_list, NULL);
  2173. if (!list)
  2174. return;
  2175. sched_ttwu_do_pending(list);
  2176. }
  2177. #endif /* CONFIG_HOTPLUG_CPU */
  2178. void scheduler_ipi(void)
  2179. {
  2180. struct rq *rq = this_rq();
  2181. struct task_struct *list = xchg(&rq->wake_list, NULL);
  2182. if (!list)
  2183. return;
  2184. /*
  2185. * Not all reschedule IPI handlers call irq_enter/irq_exit, since
  2186. * traditionally all their work was done from the interrupt return
  2187. * path. Now that we actually do some work, we need to make sure
  2188. * we do call them.
  2189. *
  2190. * Some archs already do call them, luckily irq_enter/exit nest
  2191. * properly.
  2192. *
  2193. * Arguably we should visit all archs and update all handlers,
  2194. * however a fair share of IPIs are still resched only so this would
  2195. * somewhat pessimize the simple resched case.
  2196. */
  2197. irq_enter();
  2198. sched_ttwu_do_pending(list);
  2199. irq_exit();
  2200. }
  2201. static void ttwu_queue_remote(struct task_struct *p, int cpu)
  2202. {
  2203. struct rq *rq = cpu_rq(cpu);
  2204. struct task_struct *next = rq->wake_list;
  2205. for (;;) {
  2206. struct task_struct *old = next;
  2207. p->wake_entry = next;
  2208. next = cmpxchg(&rq->wake_list, old, p);
  2209. if (next == old)
  2210. break;
  2211. }
  2212. if (!next)
  2213. smp_send_reschedule(cpu);
  2214. }
  2215. #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
  2216. static int ttwu_activate_remote(struct task_struct *p, int wake_flags)
  2217. {
  2218. struct rq *rq;
  2219. int ret = 0;
  2220. rq = __task_rq_lock(p);
  2221. if (p->on_cpu) {
  2222. ttwu_activate(rq, p, ENQUEUE_WAKEUP);
  2223. ttwu_do_wakeup(rq, p, wake_flags);
  2224. ret = 1;
  2225. }
  2226. __task_rq_unlock(rq);
  2227. return ret;
  2228. }
  2229. #endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
  2230. #endif /* CONFIG_SMP */
  2231. static void ttwu_queue(struct task_struct *p, int cpu)
  2232. {
  2233. struct rq *rq = cpu_rq(cpu);
  2234. #if defined(CONFIG_SMP)
  2235. if (sched_feat(TTWU_QUEUE) && cpu != smp_processor_id()) {
  2236. sched_clock_cpu(cpu); /* sync clocks x-cpu */
  2237. ttwu_queue_remote(p, cpu);
  2238. return;
  2239. }
  2240. #endif
  2241. raw_spin_lock(&rq->lock);
  2242. ttwu_do_activate(rq, p, 0);
  2243. raw_spin_unlock(&rq->lock);
  2244. }
  2245. /**
  2246. * try_to_wake_up - wake up a thread
  2247. * @p: the thread to be awakened
  2248. * @state: the mask of task states that can be woken
  2249. * @wake_flags: wake modifier flags (WF_*)
  2250. *
  2251. * Put it on the run-queue if it's not already there. The "current"
  2252. * thread is always on the run-queue (except when the actual
  2253. * re-schedule is in progress), and as such you're allowed to do
  2254. * the simpler "current->state = TASK_RUNNING" to mark yourself
  2255. * runnable without the overhead of this.
  2256. *
  2257. * Returns %true if @p was woken up, %false if it was already running
  2258. * or @state didn't match @p's state.
  2259. */
  2260. static int
  2261. try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
  2262. {
  2263. unsigned long flags;
  2264. int cpu, success = 0;
  2265. smp_wmb();
  2266. raw_spin_lock_irqsave(&p->pi_lock, flags);
  2267. if (!(p->state & state))
  2268. goto out;
  2269. success = 1; /* we're going to change ->state */
  2270. cpu = task_cpu(p);
  2271. if (p->on_rq && ttwu_remote(p, wake_flags))
  2272. goto stat;
  2273. #ifdef CONFIG_SMP
  2274. /*
  2275. * If the owning (remote) cpu is still in the middle of schedule() with
  2276. * this task as prev, wait until its done referencing the task.
  2277. */
  2278. while (p->on_cpu) {
  2279. #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
  2280. /*
  2281. * In case the architecture enables interrupts in
  2282. * context_switch(), we cannot busy wait, since that
  2283. * would lead to deadlocks when an interrupt hits and
  2284. * tries to wake up @prev. So bail and do a complete
  2285. * remote wakeup.
  2286. */
  2287. if (ttwu_activate_remote(p, wake_flags))
  2288. goto stat;
  2289. #else
  2290. cpu_relax();
  2291. #endif
  2292. }
  2293. /*
  2294. * Pairs with the smp_wmb() in finish_lock_switch().
  2295. */
  2296. smp_rmb();
  2297. p->sched_contributes_to_load = !!task_contributes_to_load(p);
  2298. p->state = TASK_WAKING;
  2299. if (p->sched_class->task_waking)
  2300. p->sched_class->task_waking(p);
  2301. cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
  2302. if (task_cpu(p) != cpu) {
  2303. wake_flags |= WF_MIGRATED;
  2304. set_task_cpu(p, cpu);
  2305. }
  2306. #endif /* CONFIG_SMP */
  2307. ttwu_queue(p, cpu);
  2308. stat:
  2309. ttwu_stat(p, cpu, wake_flags);
  2310. out:
  2311. raw_spin_unlock_irqrestore(&p->pi_lock, flags);
  2312. return success;
  2313. }
  2314. /**
  2315. * try_to_wake_up_local - try to wake up a local task with rq lock held
  2316. * @p: the thread to be awakened
  2317. *
  2318. * Put @p on the run-queue if it's not already there. The caller must
  2319. * ensure that this_rq() is locked, @p is bound to this_rq() and not
  2320. * the current task.
  2321. */
  2322. static void try_to_wake_up_local(struct task_struct *p)
  2323. {
  2324. struct rq *rq = task_rq(p);
  2325. BUG_ON(rq != this_rq());
  2326. BUG_ON(p == current);
  2327. lockdep_assert_held(&rq->lock);
  2328. if (!raw_spin_trylock(&p->pi_lock)) {
  2329. raw_spin_unlock(&rq->lock);
  2330. raw_spin_lock(&p->pi_lock);
  2331. raw_spin_lock(&rq->lock);
  2332. }
  2333. if (!(p->state & TASK_NORMAL))
  2334. goto out;
  2335. if (!p->on_rq)
  2336. ttwu_activate(rq, p, ENQUEUE_WAKEUP);
  2337. ttwu_do_wakeup(rq, p, 0);
  2338. ttwu_stat(p, smp_processor_id(), 0);
  2339. out:
  2340. raw_spin_unlock(&p->pi_lock);
  2341. }
  2342. /**
  2343. * wake_up_process - Wake up a specific process
  2344. * @p: The process to be woken up.
  2345. *
  2346. * Attempt to wake up the nominated process and move it to the set of runnable
  2347. * processes. Returns 1 if the process was woken up, 0 if it was already
  2348. * running.
  2349. *
  2350. * It may be assumed that this function implies a write memory barrier before
  2351. * changing the task state if and only if any tasks are woken up.
  2352. */
  2353. int wake_up_process(struct task_struct *p)
  2354. {
  2355. return try_to_wake_up(p, TASK_ALL, 0);
  2356. }
  2357. EXPORT_SYMBOL(wake_up_process);
  2358. int wake_up_state(struct task_struct *p, unsigned int state)
  2359. {
  2360. return try_to_wake_up(p, state, 0);
  2361. }
  2362. /*
  2363. * Perform scheduler related setup for a newly forked process p.
  2364. * p is forked by current.
  2365. *
  2366. * __sched_fork() is basic setup used by init_idle() too:
  2367. */
  2368. static void __sched_fork(struct task_struct *p)
  2369. {
  2370. p->on_rq = 0;
  2371. p->se.on_rq = 0;
  2372. p->se.exec_start = 0;
  2373. p->se.sum_exec_runtime = 0;
  2374. p->se.prev_sum_exec_runtime = 0;
  2375. p->se.nr_migrations = 0;
  2376. p->se.vruntime = 0;
  2377. INIT_LIST_HEAD(&p->se.group_node);
  2378. #ifdef CONFIG_SCHEDSTATS
  2379. memset(&p->se.statistics, 0, sizeof(p->se.statistics));
  2380. #endif
  2381. INIT_LIST_HEAD(&p->rt.run_list);
  2382. #ifdef CONFIG_PREEMPT_NOTIFIERS
  2383. INIT_HLIST_HEAD(&p->preempt_notifiers);
  2384. #endif
  2385. }
  2386. /*
  2387. * fork()/clone()-time setup:
  2388. */
  2389. void sched_fork(struct task_struct *p)
  2390. {
  2391. unsigned long flags;
  2392. int cpu = get_cpu();
  2393. __sched_fork(p);
  2394. /*
  2395. * We mark the process as running here. This guarantees that
  2396. * nobody will actually run it, and a signal or other external
  2397. * event cannot wake it up and insert it on the runqueue either.
  2398. */
  2399. p->state = TASK_RUNNING;
  2400. /*
  2401. * Make sure we do not leak PI boosting priority to the child.
  2402. */
  2403. p->prio = current->normal_prio;
  2404. /*
  2405. * Revert to default priority/policy on fork if requested.
  2406. */
  2407. if (unlikely(p->sched_reset_on_fork)) {
  2408. if (task_has_rt_policy(p)) {
  2409. p->policy = SCHED_NORMAL;
  2410. p->static_prio = NICE_TO_PRIO(0);
  2411. p->rt_priority = 0;
  2412. } else if (PRIO_TO_NICE(p->static_prio) < 0)
  2413. p->static_prio = NICE_TO_PRIO(0);
  2414. p->prio = p->normal_prio = __normal_prio(p);
  2415. set_load_weight(p);
  2416. /*
  2417. * We don't need the reset flag anymore after the fork. It has
  2418. * fulfilled its duty:
  2419. */
  2420. p->sched_reset_on_fork = 0;
  2421. }
  2422. if (!rt_prio(p->prio))
  2423. p->sched_class = &fair_sched_class;
  2424. if (p->sched_class->task_fork)
  2425. p->sched_class->task_fork(p);
  2426. /*
  2427. * The child is not yet in the pid-hash so no cgroup attach races,
  2428. * and the cgroup is pinned to this child due to cgroup_fork()
  2429. * is ran before sched_fork().
  2430. *
  2431. * Silence PROVE_RCU.
  2432. */
  2433. raw_spin_lock_irqsave(&p->pi_lock, flags);
  2434. set_task_cpu(p, cpu);
  2435. raw_spin_unlock_irqrestore(&p->pi_lock, flags);
  2436. #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
  2437. if (likely(sched_info_on()))
  2438. memset(&p->sched_info, 0, sizeof(p->sched_info));
  2439. #endif
  2440. #if defined(CONFIG_SMP)
  2441. p->on_cpu = 0;
  2442. #endif
  2443. #ifdef CONFIG_PREEMPT_COUNT
  2444. /* Want to start with kernel preemption disabled. */
  2445. task_thread_info(p)->preempt_count = 1;
  2446. #endif
  2447. #ifdef CONFIG_SMP
  2448. plist_node_init(&p->pushable_tasks, MAX_PRIO);
  2449. #endif
  2450. put_cpu();
  2451. }
  2452. /*
  2453. * wake_up_new_task - wake up a newly created task for the first time.
  2454. *
  2455. * This function will do some initial scheduler statistics housekeeping
  2456. * that must be done for every newly created context, then puts the task
  2457. * on the runqueue and wakes it.
  2458. */
  2459. void wake_up_new_task(struct task_struct *p)
  2460. {
  2461. unsigned long flags;
  2462. struct rq *rq;
  2463. raw_spin_lock_irqsave(&p->pi_lock, flags);
  2464. #ifdef CONFIG_SMP
  2465. /*
  2466. * Fork balancing, do it here and not earlier because:
  2467. * - cpus_allowed can change in the fork path
  2468. * - any previously selected cpu might disappear through hotplug
  2469. */
  2470. set_task_cpu(p, select_task_rq(p, SD_BALANCE_FORK, 0));
  2471. #endif
  2472. rq = __task_rq_lock(p);
  2473. activate_task(rq, p, 0);
  2474. p->on_rq = 1;
  2475. trace_sched_wakeup_new(p, true);
  2476. check_preempt_curr(rq, p, WF_FORK);
  2477. #ifdef CONFIG_SMP
  2478. if (p->sched_class->task_woken)
  2479. p->sched_class->task_woken(rq, p);
  2480. #endif
  2481. task_rq_unlock(rq, p, &flags);
  2482. }
  2483. #ifdef CONFIG_PREEMPT_NOTIFIERS
  2484. /**
  2485. * preempt_notifier_register - tell me when current is being preempted & rescheduled
  2486. * @notifier: notifier struct to register
  2487. */
  2488. void preempt_notifier_register(struct preempt_notifier *notifier)
  2489. {
  2490. hlist_add_head(&notifier->link, &current->preempt_notifiers);
  2491. }
  2492. EXPORT_SYMBOL_GPL(preempt_notifier_register);
  2493. /**
  2494. * preempt_notifier_unregister - no longer interested in preemption notifications
  2495. * @notifier: notifier struct to unregister
  2496. *
  2497. * This is safe to call from within a preemption notifier.
  2498. */
  2499. void preempt_notifier_unregister(struct preempt_notifier *notifier)
  2500. {
  2501. hlist_del(&notifier->link);
  2502. }
  2503. EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
  2504. static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
  2505. {
  2506. struct preempt_notifier *notifier;
  2507. struct hlist_node *node;
  2508. hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link)
  2509. notifier->ops->sched_in(notifier, raw_smp_processor_id());
  2510. }
  2511. static void
  2512. fire_sched_out_preempt_notifiers(struct task_struct *curr,
  2513. struct task_struct *next)
  2514. {
  2515. struct preempt_notifier *notifier;
  2516. struct hlist_node *node;
  2517. hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link)
  2518. notifier->ops->sched_out(notifier, next);
  2519. }
  2520. #else /* !CONFIG_PREEMPT_NOTIFIERS */
  2521. static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
  2522. {
  2523. }
  2524. static void
  2525. fire_sched_out_preempt_notifiers(struct task_struct *curr,
  2526. struct task_struct *next)
  2527. {
  2528. }
  2529. #endif /* CONFIG_PREEMPT_NOTIFIERS */
  2530. /**
  2531. * prepare_task_switch - prepare to switch tasks
  2532. * @rq: the runqueue preparing to switch
  2533. * @prev: the current task that is being switched out
  2534. * @next: the task we are going to switch to.
  2535. *
  2536. * This is called with the rq lock held and interrupts off. It must
  2537. * be paired with a subsequent finish_task_switch after the context
  2538. * switch.
  2539. *
  2540. * prepare_task_switch sets up locking and calls architecture specific
  2541. * hooks.
  2542. */
  2543. static inline void
  2544. prepare_task_switch(struct rq *rq, struct task_struct *prev,
  2545. struct task_struct *next)
  2546. {
  2547. sched_info_switch(prev, next);
  2548. perf_event_task_sched_out(prev, next);
  2549. fire_sched_out_preempt_notifiers(prev, next);
  2550. prepare_lock_switch(rq, next);
  2551. prepare_arch_switch(next);
  2552. trace_sched_switch(prev, next);
  2553. }
  2554. /**
  2555. * finish_task_switch - clean up after a task-switch
  2556. * @rq: runqueue associated with task-switch
  2557. * @prev: the thread we just switched away from.
  2558. *
  2559. * finish_task_switch must be called after the context switch, paired
  2560. * with a prepare_task_switch call before the context switch.
  2561. * finish_task_switch will reconcile locking set up by prepare_task_switch,
  2562. * and do any other architecture-specific cleanup actions.
  2563. *
  2564. * Note that we may have delayed dropping an mm in context_switch(). If
  2565. * so, we finish that here outside of the runqueue lock. (Doing it
  2566. * with the lock held can cause deadlocks; see schedule() for
  2567. * details.)
  2568. */
  2569. static void finish_task_switch(struct rq *rq, struct task_struct *prev)
  2570. __releases(rq->lock)
  2571. {
  2572. struct mm_struct *mm = rq->prev_mm;
  2573. long prev_state;
  2574. rq->prev_mm = NULL;
  2575. /*
  2576. * A task struct has one reference for the use as "current".
  2577. * If a task dies, then it sets TASK_DEAD in tsk->state and calls
  2578. * schedule one last time. The schedule call will never return, and
  2579. * the scheduled task must drop that reference.
  2580. * The test for TASK_DEAD must occur while the runqueue locks are
  2581. * still held, otherwise prev could be scheduled on another cpu, die
  2582. * there before we look at prev->state, and then the reference would
  2583. * be dropped twice.
  2584. * Manfred Spraul <manfred@colorfullife.com>
  2585. */
  2586. prev_state = prev->state;
  2587. finish_arch_switch(prev);
  2588. #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
  2589. local_irq_disable();
  2590. #endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
  2591. perf_event_task_sched_in(current);
  2592. #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
  2593. local_irq_enable();
  2594. #endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
  2595. finish_lock_switch(rq, prev);
  2596. fire_sched_in_preempt_notifiers(current);
  2597. if (mm)
  2598. mmdrop(mm);
  2599. if (unlikely(prev_state == TASK_DEAD)) {
  2600. /*
  2601. * Remove function-return probe instances associated with this
  2602. * task and put them back on the free list.
  2603. */
  2604. kprobe_flush_task(prev);
  2605. put_task_struct(prev);
  2606. }
  2607. }
  2608. #ifdef CONFIG_SMP
  2609. /* assumes rq->lock is held */
  2610. static inline void pre_schedule(struct rq *rq, struct task_struct *prev)
  2611. {
  2612. if (prev->sched_class->pre_schedule)
  2613. prev->sched_class->pre_schedule(rq, prev);
  2614. }
  2615. /* rq->lock is NOT held, but preemption is disabled */
  2616. static inline void post_schedule(struct rq *rq)
  2617. {
  2618. if (rq->post_schedule) {
  2619. unsigned long flags;
  2620. raw_spin_lock_irqsave(&rq->lock, flags);
  2621. if (rq->curr->sched_class->post_schedule)
  2622. rq->curr->sched_class->post_schedule(rq);
  2623. raw_spin_unlock_irqrestore(&rq->lock, flags);
  2624. rq->post_schedule = 0;
  2625. }
  2626. }
  2627. #else
  2628. static inline void pre_schedule(struct rq *rq, struct task_struct *p)
  2629. {
  2630. }
  2631. static inline void post_schedule(struct rq *rq)
  2632. {
  2633. }
  2634. #endif
  2635. /**
  2636. * schedule_tail - first thing a freshly forked thread must call.
  2637. * @prev: the thread we just switched away from.
  2638. */
  2639. asmlinkage void schedule_tail(struct task_struct *prev)
  2640. __releases(rq->lock)
  2641. {
  2642. struct rq *rq = this_rq();
  2643. finish_task_switch(rq, prev);
  2644. /*
  2645. * FIXME: do we need to worry about rq being invalidated by the
  2646. * task_switch?
  2647. */
  2648. post_schedule(rq);
  2649. #ifdef __ARCH_WANT_UNLOCKED_CTXSW
  2650. /* In this case, finish_task_switch does not reenable preemption */
  2651. preempt_enable();
  2652. #endif
  2653. if (current->set_child_tid)
  2654. put_user(task_pid_vnr(current), current->set_child_tid);
  2655. }
  2656. /*
  2657. * context_switch - switch to the new MM and the new
  2658. * thread's register state.
  2659. */
  2660. static inline void
  2661. context_switch(struct rq *rq, struct task_struct *prev,
  2662. struct task_struct *next)
  2663. {
  2664. struct mm_struct *mm, *oldmm;
  2665. prepare_task_switch(rq, prev, next);
  2666. mm = next->mm;
  2667. oldmm = prev->active_mm;
  2668. /*
  2669. * For paravirt, this is coupled with an exit in switch_to to
  2670. * combine the page table reload and the switch backend into
  2671. * one hypercall.
  2672. */
  2673. arch_start_context_switch(prev);
  2674. if (!mm) {
  2675. next->active_mm = oldmm;
  2676. atomic_inc(&oldmm->mm_count);
  2677. enter_lazy_tlb(oldmm, next);
  2678. } else
  2679. switch_mm(oldmm, mm, next);
  2680. if (!prev->mm) {
  2681. prev->active_mm = NULL;
  2682. rq->prev_mm = oldmm;
  2683. }
  2684. /*
  2685. * Since the runqueue lock will be released by the next
  2686. * task (which is an invalid locking op but in the case
  2687. * of the scheduler it's an obvious special-case), so we
  2688. * do an early lockdep release here:
  2689. */
  2690. #ifndef __ARCH_WANT_UNLOCKED_CTXSW
  2691. spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
  2692. #endif
  2693. /* Here we just switch the register state and the stack. */
  2694. switch_to(prev, next, prev);
  2695. barrier();
  2696. /*
  2697. * this_rq must be evaluated again because prev may have moved
  2698. * CPUs since it called schedule(), thus the 'rq' on its stack
  2699. * frame will be invalid.
  2700. */
  2701. finish_task_switch(this_rq(), prev);
  2702. }
  2703. /*
  2704. * nr_running, nr_uninterruptible and nr_context_switches:
  2705. *
  2706. * externally visible scheduler statistics: current number of runnable
  2707. * threads, current number of uninterruptible-sleeping threads, total
  2708. * number of context switches performed since bootup.
  2709. */
  2710. unsigned long nr_running(void)
  2711. {
  2712. unsigned long i, sum = 0;
  2713. for_each_online_cpu(i)
  2714. sum += cpu_rq(i)->nr_running;
  2715. return sum;
  2716. }
  2717. unsigned long nr_uninterruptible(void)
  2718. {
  2719. unsigned long i, sum = 0;
  2720. for_each_possible_cpu(i)
  2721. sum += cpu_rq(i)->nr_uninterruptible;
  2722. /*
  2723. * Since we read the counters lockless, it might be slightly
  2724. * inaccurate. Do not allow it to go below zero though:
  2725. */
  2726. if (unlikely((long)sum < 0))
  2727. sum = 0;
  2728. return sum;
  2729. }
  2730. unsigned long long nr_context_switches(void)
  2731. {
  2732. int i;
  2733. unsigned long long sum = 0;
  2734. for_each_possible_cpu(i)
  2735. sum += cpu_rq(i)->nr_switches;
  2736. return sum;
  2737. }
  2738. unsigned long nr_iowait(void)
  2739. {
  2740. unsigned long i, sum = 0;
  2741. for_each_possible_cpu(i)
  2742. sum += atomic_read(&cpu_rq(i)->nr_iowait);
  2743. return sum;
  2744. }
  2745. unsigned long nr_iowait_cpu(int cpu)
  2746. {
  2747. struct rq *this = cpu_rq(cpu);
  2748. return atomic_read(&this->nr_iowait);
  2749. }
  2750. unsigned long this_cpu_load(void)
  2751. {
  2752. struct rq *this = this_rq();
  2753. return this->cpu_load[0];
  2754. }
  2755. /* Variables and functions for calc_load */
  2756. static atomic_long_t calc_load_tasks;
  2757. static unsigned long calc_load_update;
  2758. unsigned long avenrun[3];
  2759. EXPORT_SYMBOL(avenrun);
  2760. static long calc_load_fold_active(struct rq *this_rq)
  2761. {
  2762. long nr_active, delta = 0;
  2763. nr_active = this_rq->nr_running;
  2764. nr_active += (long) this_rq->nr_uninterruptible;
  2765. if (nr_active != this_rq->calc_load_active) {
  2766. delta = nr_active - this_rq->calc_load_active;
  2767. this_rq->calc_load_active = nr_active;
  2768. }
  2769. return delta;
  2770. }
  2771. static unsigned long
  2772. calc_load(unsigned long load, unsigned long exp, unsigned long active)
  2773. {
  2774. load *= exp;
  2775. load += active * (FIXED_1 - exp);
  2776. load += 1UL << (FSHIFT - 1);
  2777. return load >> FSHIFT;
  2778. }
  2779. #ifdef CONFIG_NO_HZ
  2780. /*
  2781. * For NO_HZ we delay the active fold to the next LOAD_FREQ update.
  2782. *
  2783. * When making the ILB scale, we should try to pull this in as well.
  2784. */
  2785. static atomic_long_t calc_load_tasks_idle;
  2786. static void calc_load_account_idle(struct rq *this_rq)
  2787. {
  2788. long delta;
  2789. delta = calc_load_fold_active(this_rq);
  2790. if (delta)
  2791. atomic_long_add(delta, &calc_load_tasks_idle);
  2792. }
  2793. static long calc_load_fold_idle(void)
  2794. {
  2795. long delta = 0;
  2796. /*
  2797. * Its got a race, we don't care...
  2798. */
  2799. if (atomic_long_read(&calc_load_tasks_idle))
  2800. delta = atomic_long_xchg(&calc_load_tasks_idle, 0);
  2801. return delta;
  2802. }
  2803. /**
  2804. * fixed_power_int - compute: x^n, in O(log n) time
  2805. *
  2806. * @x: base of the power
  2807. * @frac_bits: fractional bits of @x
  2808. * @n: power to raise @x to.
  2809. *
  2810. * By exploiting the relation between the definition of the natural power
  2811. * function: x^n := x*x*...*x (x multiplied by itself for n times), and
  2812. * the binary encoding of numbers used by computers: n := \Sum n_i * 2^i,
  2813. * (where: n_i \elem {0, 1}, the binary vector representing n),
  2814. * we find: x^n := x^(\Sum n_i * 2^i) := \Prod x^(n_i * 2^i), which is
  2815. * of course trivially computable in O(log_2 n), the length of our binary
  2816. * vector.
  2817. */
  2818. static unsigned long
  2819. fixed_power_int(unsigned long x, unsigned int frac_bits, unsigned int n)
  2820. {
  2821. unsigned long result = 1UL << frac_bits;
  2822. if (n) for (;;) {
  2823. if (n & 1) {
  2824. result *= x;
  2825. result += 1UL << (frac_bits - 1);
  2826. result >>= frac_bits;
  2827. }
  2828. n >>= 1;
  2829. if (!n)
  2830. break;
  2831. x *= x;
  2832. x += 1UL << (frac_bits - 1);
  2833. x >>= frac_bits;
  2834. }
  2835. return result;
  2836. }
  2837. /*
  2838. * a1 = a0 * e + a * (1 - e)
  2839. *
  2840. * a2 = a1 * e + a * (1 - e)
  2841. * = (a0 * e + a * (1 - e)) * e + a * (1 - e)
  2842. * = a0 * e^2 + a * (1 - e) * (1 + e)
  2843. *
  2844. * a3 = a2 * e + a * (1 - e)
  2845. * = (a0 * e^2 + a * (1 - e) * (1 + e)) * e + a * (1 - e)
  2846. * = a0 * e^3 + a * (1 - e) * (1 + e + e^2)
  2847. *
  2848. * ...
  2849. *
  2850. * an = a0 * e^n + a * (1 - e) * (1 + e + ... + e^n-1) [1]
  2851. * = a0 * e^n + a * (1 - e) * (1 - e^n)/(1 - e)
  2852. * = a0 * e^n + a * (1 - e^n)
  2853. *
  2854. * [1] application of the geometric series:
  2855. *
  2856. * n 1 - x^(n+1)
  2857. * S_n := \Sum x^i = -------------
  2858. * i=0 1 - x
  2859. */
  2860. static unsigned long
  2861. calc_load_n(unsigned long load, unsigned long exp,
  2862. unsigned long active, unsigned int n)
  2863. {
  2864. return calc_load(load, fixed_power_int(exp, FSHIFT, n), active);
  2865. }
  2866. /*
  2867. * NO_HZ can leave us missing all per-cpu ticks calling
  2868. * calc_load_account_active(), but since an idle CPU folds its delta into
  2869. * calc_load_tasks_idle per calc_load_account_idle(), all we need to do is fold
  2870. * in the pending idle delta if our idle period crossed a load cycle boundary.
  2871. *
  2872. * Once we've updated the global active value, we need to apply the exponential
  2873. * weights adjusted to the number of cycles missed.
  2874. */
  2875. static void calc_global_nohz(unsigned long ticks)
  2876. {
  2877. long delta, active, n;
  2878. if (time_before(jiffies, calc_load_update))
  2879. return;
  2880. /*
  2881. * If we crossed a calc_load_update boundary, make sure to fold
  2882. * any pending idle changes, the respective CPUs might have
  2883. * missed the tick driven calc_load_account_active() update
  2884. * due to NO_HZ.
  2885. */
  2886. delta = calc_load_fold_idle();
  2887. if (delta)
  2888. atomic_long_add(delta, &calc_load_tasks);
  2889. /*
  2890. * If we were idle for multiple load cycles, apply them.
  2891. */
  2892. if (ticks >= LOAD_FREQ) {
  2893. n = ticks / LOAD_FREQ;
  2894. active = atomic_long_read(&calc_load_tasks);
  2895. active = active > 0 ? active * FIXED_1 : 0;
  2896. avenrun[0] = calc_load_n(avenrun[0], EXP_1, active, n);
  2897. avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n);
  2898. avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n);
  2899. calc_load_update += n * LOAD_FREQ;
  2900. }
  2901. /*
  2902. * Its possible the remainder of the above division also crosses
  2903. * a LOAD_FREQ period, the regular check in calc_global_load()
  2904. * which comes after this will take care of that.
  2905. *
  2906. * Consider us being 11 ticks before a cycle completion, and us
  2907. * sleeping for 4*LOAD_FREQ + 22 ticks, then the above code will
  2908. * age us 4 cycles, and the test in calc_global_load() will
  2909. * pick up the final one.
  2910. */
  2911. }
  2912. #else
  2913. static void calc_load_account_idle(struct rq *this_rq)
  2914. {
  2915. }
  2916. static inline long calc_load_fold_idle(void)
  2917. {
  2918. return 0;
  2919. }
  2920. static void calc_global_nohz(unsigned long ticks)
  2921. {
  2922. }
  2923. #endif
  2924. /**
  2925. * get_avenrun - get the load average array
  2926. * @loads: pointer to dest load array
  2927. * @offset: offset to add
  2928. * @shift: shift count to shift the result left
  2929. *
  2930. * These values are estimates at best, so no need for locking.
  2931. */
  2932. void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
  2933. {
  2934. loads[0] = (avenrun[0] + offset) << shift;
  2935. loads[1] = (avenrun[1] + offset) << shift;
  2936. loads[2] = (avenrun[2] + offset) << shift;
  2937. }
  2938. /*
  2939. * calc_load - update the avenrun load estimates 10 ticks after the
  2940. * CPUs have updated calc_load_tasks.
  2941. */
  2942. void calc_global_load(unsigned long ticks)
  2943. {
  2944. long active;
  2945. calc_global_nohz(ticks);
  2946. if (time_before(jiffies, calc_load_update + 10))
  2947. return;
  2948. active = atomic_long_read(&calc_load_tasks);
  2949. active = active > 0 ? active * FIXED_1 : 0;
  2950. avenrun[0] = calc_load(avenrun[0], EXP_1, active);
  2951. avenrun[1] = calc_load(avenrun[1], EXP_5, active);
  2952. avenrun[2] = calc_load(avenrun[2], EXP_15, active);
  2953. calc_load_update += LOAD_FREQ;
  2954. }
  2955. /*
  2956. * Called from update_cpu_load() to periodically update this CPU's
  2957. * active count.
  2958. */
  2959. static void calc_load_account_active(struct rq *this_rq)
  2960. {
  2961. long delta;
  2962. if (time_before(jiffies, this_rq->calc_load_update))
  2963. return;
  2964. delta = calc_load_fold_active(this_rq);
  2965. delta += calc_load_fold_idle();
  2966. if (delta)
  2967. atomic_long_add(delta, &calc_load_tasks);
  2968. this_rq->calc_load_update += LOAD_FREQ;
  2969. }
  2970. /*
  2971. * The exact cpuload at various idx values, calculated at every tick would be
  2972. * load = (2^idx - 1) / 2^idx * load + 1 / 2^idx * cur_load
  2973. *
  2974. * If a cpu misses updates for n-1 ticks (as it was idle) and update gets called
  2975. * on nth tick when cpu may be busy, then we have:
  2976. * load = ((2^idx - 1) / 2^idx)^(n-1) * load
  2977. * load = (2^idx - 1) / 2^idx) * load + 1 / 2^idx * cur_load
  2978. *
  2979. * decay_load_missed() below does efficient calculation of
  2980. * load = ((2^idx - 1) / 2^idx)^(n-1) * load
  2981. * avoiding 0..n-1 loop doing load = ((2^idx - 1) / 2^idx) * load
  2982. *
  2983. * The calculation is approximated on a 128 point scale.
  2984. * degrade_zero_ticks is the number of ticks after which load at any
  2985. * particular idx is approximated to be zero.
  2986. * degrade_factor is a precomputed table, a row for each load idx.
  2987. * Each column corresponds to degradation factor for a power of two ticks,
  2988. * based on 128 point scale.
  2989. * Example:
  2990. * row 2, col 3 (=12) says that the degradation at load idx 2 after
  2991. * 8 ticks is 12/128 (which is an approximation of exact factor 3^8/4^8).
  2992. *
  2993. * With this power of 2 load factors, we can degrade the load n times
  2994. * by looking at 1 bits in n and doing as many mult/shift instead of
  2995. * n mult/shifts needed by the exact degradation.
  2996. */
  2997. #define DEGRADE_SHIFT 7
  2998. static const unsigned char
  2999. degrade_zero_ticks[CPU_LOAD_IDX_MAX] = {0, 8, 32, 64, 128};
  3000. static const unsigned char
  3001. degrade_factor[CPU_LOAD_IDX_MAX][DEGRADE_SHIFT + 1] = {
  3002. {0, 0, 0, 0, 0, 0, 0, 0},
  3003. {64, 32, 8, 0, 0, 0, 0, 0},
  3004. {96, 72, 40, 12, 1, 0, 0},
  3005. {112, 98, 75, 43, 15, 1, 0},
  3006. {120, 112, 98, 76, 45, 16, 2} };
  3007. /*
  3008. * Update cpu_load for any missed ticks, due to tickless idle. The backlog
  3009. * would be when CPU is idle and so we just decay the old load without
  3010. * adding any new load.
  3011. */
  3012. static unsigned long
  3013. decay_load_missed(unsigned long load, unsigned long missed_updates, int idx)
  3014. {
  3015. int j = 0;
  3016. if (!missed_updates)
  3017. return load;
  3018. if (missed_updates >= degrade_zero_ticks[idx])
  3019. return 0;
  3020. if (idx == 1)
  3021. return load >> missed_updates;
  3022. while (missed_updates) {
  3023. if (missed_updates % 2)
  3024. load = (load * degrade_factor[idx][j]) >> DEGRADE_SHIFT;
  3025. missed_updates >>= 1;
  3026. j++;
  3027. }
  3028. return load;
  3029. }
  3030. /*
  3031. * Update rq->cpu_load[] statistics. This function is usually called every
  3032. * scheduler tick (TICK_NSEC). With tickless idle this will not be called
  3033. * every tick. We fix it up based on jiffies.
  3034. */
  3035. static void update_cpu_load(struct rq *this_rq)
  3036. {
  3037. unsigned long this_load = this_rq->load.weight;
  3038. unsigned long curr_jiffies = jiffies;
  3039. unsigned long pending_updates;
  3040. int i, scale;
  3041. this_rq->nr_load_updates++;
  3042. /* Avoid repeated calls on same jiffy, when moving in and out of idle */
  3043. if (curr_jiffies == this_rq->last_load_update_tick)
  3044. return;
  3045. pending_updates = curr_jiffies - this_rq->last_load_update_tick;
  3046. this_rq->last_load_update_tick = curr_jiffies;
  3047. /* Update our load: */
  3048. this_rq->cpu_load[0] = this_load; /* Fasttrack for idx 0 */
  3049. for (i = 1, scale = 2; i < CPU_LOAD_IDX_MAX; i++, scale += scale) {
  3050. unsigned long old_load, new_load;
  3051. /* scale is effectively 1 << i now, and >> i divides by scale */
  3052. old_load = this_rq->cpu_load[i];
  3053. old_load = decay_load_missed(old_load, pending_updates - 1, i);
  3054. new_load = this_load;
  3055. /*
  3056. * Round up the averaging division if load is increasing. This
  3057. * prevents us from getting stuck on 9 if the load is 10, for
  3058. * example.
  3059. */
  3060. if (new_load > old_load)
  3061. new_load += scale - 1;
  3062. this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i;
  3063. }
  3064. sched_avg_update(this_rq);
  3065. }
  3066. static void update_cpu_load_active(struct rq *this_rq)
  3067. {
  3068. update_cpu_load(this_rq);
  3069. calc_load_account_active(this_rq);
  3070. }
  3071. #ifdef CONFIG_SMP
  3072. /*
  3073. * sched_exec - execve() is a valuable balancing opportunity, because at
  3074. * this point the task has the smallest effective memory and cache footprint.
  3075. */
  3076. void sched_exec(void)
  3077. {
  3078. struct task_struct *p = current;
  3079. unsigned long flags;
  3080. int dest_cpu;
  3081. raw_spin_lock_irqsave(&p->pi_lock, flags);
  3082. dest_cpu = p->sched_class->select_task_rq(p, SD_BALANCE_EXEC, 0);
  3083. if (dest_cpu == smp_processor_id())
  3084. goto unlock;
  3085. if (likely(cpu_active(dest_cpu))) {
  3086. struct migration_arg arg = { p, dest_cpu };
  3087. raw_spin_unlock_irqrestore(&p->pi_lock, flags);
  3088. stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
  3089. return;
  3090. }
  3091. unlock:
  3092. raw_spin_unlock_irqrestore(&p->pi_lock, flags);
  3093. }
  3094. #endif
  3095. DEFINE_PER_CPU(struct kernel_stat, kstat);
  3096. EXPORT_PER_CPU_SYMBOL(kstat);
  3097. /*
  3098. * Return any ns on the sched_clock that have not yet been accounted in
  3099. * @p in case that task is currently running.
  3100. *
  3101. * Called with task_rq_lock() held on @rq.
  3102. */
  3103. static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq)
  3104. {
  3105. u64 ns = 0;
  3106. if (task_current(rq, p)) {
  3107. update_rq_clock(rq);
  3108. ns = rq->clock_task - p->se.exec_start;
  3109. if ((s64)ns < 0)
  3110. ns = 0;
  3111. }
  3112. return ns;
  3113. }
  3114. unsigned long long task_delta_exec(struct task_struct *p)
  3115. {
  3116. unsigned long flags;
  3117. struct rq *rq;
  3118. u64 ns = 0;
  3119. rq = task_rq_lock(p, &flags);
  3120. ns = do_task_delta_exec(p, rq);
  3121. task_rq_unlock(rq, p, &flags);
  3122. return ns;
  3123. }
  3124. /*
  3125. * Return accounted runtime for the task.
  3126. * In case the task is currently running, return the runtime plus current's
  3127. * pending runtime that have not been accounted yet.
  3128. */
  3129. unsigned long long task_sched_runtime(struct task_struct *p)
  3130. {
  3131. unsigned long flags;
  3132. struct rq *rq;
  3133. u64 ns = 0;
  3134. rq = task_rq_lock(p, &flags);
  3135. ns = p->se.sum_exec_runtime + do_task_delta_exec(p, rq);
  3136. task_rq_unlock(rq, p, &flags);
  3137. return ns;
  3138. }
  3139. /*
  3140. * Return sum_exec_runtime for the thread group.
  3141. * In case the task is currently running, return the sum plus current's
  3142. * pending runtime that have not been accounted yet.
  3143. *
  3144. * Note that the thread group might have other running tasks as well,
  3145. * so the return value not includes other pending runtime that other
  3146. * running tasks might have.
  3147. */
  3148. unsigned long long thread_group_sched_runtime(struct task_struct *p)
  3149. {
  3150. struct task_cputime totals;
  3151. unsigned long flags;
  3152. struct rq *rq;
  3153. u64 ns;
  3154. rq = task_rq_lock(p, &flags);
  3155. thread_group_cputime(p, &totals);
  3156. ns = totals.sum_exec_runtime + do_task_delta_exec(p, rq);
  3157. task_rq_unlock(rq, p, &flags);
  3158. return ns;
  3159. }
  3160. /*
  3161. * Account user cpu time to a process.
  3162. * @p: the process that the cpu time gets accounted to
  3163. * @cputime: the cpu time spent in user space since the last update
  3164. * @cputime_scaled: cputime scaled by cpu frequency
  3165. */
  3166. void account_user_time(struct task_struct *p, cputime_t cputime,
  3167. cputime_t cputime_scaled)
  3168. {
  3169. struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
  3170. cputime64_t tmp;
  3171. /* Add user time to process. */
  3172. p->utime = cputime_add(p->utime, cputime);
  3173. p->utimescaled = cputime_add(p->utimescaled, cputime_scaled);
  3174. account_group_user_time(p, cputime);
  3175. /* Add user time to cpustat. */
  3176. tmp = cputime_to_cputime64(cputime);
  3177. if (TASK_NICE(p) > 0)
  3178. cpustat->nice = cputime64_add(cpustat->nice, tmp);
  3179. else
  3180. cpustat->user = cputime64_add(cpustat->user, tmp);
  3181. cpuacct_update_stats(p, CPUACCT_STAT_USER, cputime);
  3182. /* Account for user time used */
  3183. acct_update_integrals(p);
  3184. }
  3185. /*
  3186. * Account guest cpu time to a process.
  3187. * @p: the process that the cpu time gets accounted to
  3188. * @cputime: the cpu time spent in virtual machine since the last update
  3189. * @cputime_scaled: cputime scaled by cpu frequency
  3190. */
  3191. static void account_guest_time(struct task_struct *p, cputime_t cputime,
  3192. cputime_t cputime_scaled)
  3193. {
  3194. cputime64_t tmp;
  3195. struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
  3196. tmp = cputime_to_cputime64(cputime);
  3197. /* Add guest time to process. */
  3198. p->utime = cputime_add(p->utime, cputime);
  3199. p->utimescaled = cputime_add(p->utimescaled, cputime_scaled);
  3200. account_group_user_time(p, cputime);
  3201. p->gtime = cputime_add(p->gtime, cputime);
  3202. /* Add guest time to cpustat. */
  3203. if (TASK_NICE(p) > 0) {
  3204. cpustat->nice = cputime64_add(cpustat->nice, tmp);
  3205. cpustat->guest_nice = cputime64_add(cpustat->guest_nice, tmp);
  3206. } else {
  3207. cpustat->user = cputime64_add(cpustat->user, tmp);
  3208. cpustat->guest = cputime64_add(cpustat->guest, tmp);
  3209. }
  3210. }
  3211. /*
  3212. * Account system cpu time to a process and desired cpustat field
  3213. * @p: the process that the cpu time gets accounted to
  3214. * @cputime: the cpu time spent in kernel space since the last update
  3215. * @cputime_scaled: cputime scaled by cpu frequency
  3216. * @target_cputime64: pointer to cpustat field that has to be updated
  3217. */
  3218. static inline
  3219. void __account_system_time(struct task_struct *p, cputime_t cputime,
  3220. cputime_t cputime_scaled, cputime64_t *target_cputime64)
  3221. {
  3222. cputime64_t tmp = cputime_to_cputime64(cputime);
  3223. /* Add system time to process. */
  3224. p->stime = cputime_add(p->stime, cputime);
  3225. p->stimescaled = cputime_add(p->stimescaled, cputime_scaled);
  3226. account_group_system_time(p, cputime);
  3227. /* Add system time to cpustat. */
  3228. *target_cputime64 = cputime64_add(*target_cputime64, tmp);
  3229. cpuacct_update_stats(p, CPUACCT_STAT_SYSTEM, cputime);
  3230. /* Account for system time used */
  3231. acct_update_integrals(p);
  3232. }
  3233. /*
  3234. * Account system cpu time to a process.
  3235. * @p: the process that the cpu time gets accounted to
  3236. * @hardirq_offset: the offset to subtract from hardirq_count()
  3237. * @cputime: the cpu time spent in kernel space since the last update
  3238. * @cputime_scaled: cputime scaled by cpu frequency
  3239. */
  3240. void account_system_time(struct task_struct *p, int hardirq_offset,
  3241. cputime_t cputime, cputime_t cputime_scaled)
  3242. {
  3243. struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
  3244. cputime64_t *target_cputime64;
  3245. if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) {
  3246. account_guest_time(p, cputime, cputime_scaled);
  3247. return;
  3248. }
  3249. if (hardirq_count() - hardirq_offset)
  3250. target_cputime64 = &cpustat->irq;
  3251. else if (in_serving_softirq())
  3252. target_cputime64 = &cpustat->softirq;
  3253. else
  3254. target_cputime64 = &cpustat->system;
  3255. __account_system_time(p, cputime, cputime_scaled, target_cputime64);
  3256. }
  3257. /*
  3258. * Account for involuntary wait time.
  3259. * @cputime: the cpu time spent in involuntary wait
  3260. */
  3261. void account_steal_time(cputime_t cputime)
  3262. {
  3263. struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
  3264. cputime64_t cputime64 = cputime_to_cputime64(cputime);
  3265. cpustat->steal = cputime64_add(cpustat->steal, cputime64);
  3266. }
  3267. /*
  3268. * Account for idle time.
  3269. * @cputime: the cpu time spent in idle wait
  3270. */
  3271. void account_idle_time(cputime_t cputime)
  3272. {
  3273. struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
  3274. cputime64_t cputime64 = cputime_to_cputime64(cputime);
  3275. struct rq *rq = this_rq();
  3276. if (atomic_read(&rq->nr_iowait) > 0)
  3277. cpustat->iowait = cputime64_add(cpustat->iowait, cputime64);
  3278. else
  3279. cpustat->idle = cputime64_add(cpustat->idle, cputime64);
  3280. }
  3281. static __always_inline bool steal_account_process_tick(void)
  3282. {
  3283. #ifdef CONFIG_PARAVIRT
  3284. if (static_branch(&paravirt_steal_enabled)) {
  3285. u64 steal, st = 0;
  3286. steal = paravirt_steal_clock(smp_processor_id());
  3287. steal -= this_rq()->prev_steal_time;
  3288. st = steal_ticks(steal);
  3289. this_rq()->prev_steal_time += st * TICK_NSEC;
  3290. account_steal_time(st);
  3291. return st;
  3292. }
  3293. #endif
  3294. return false;
  3295. }
  3296. #ifndef CONFIG_VIRT_CPU_ACCOUNTING
  3297. #ifdef CONFIG_IRQ_TIME_ACCOUNTING
  3298. /*
  3299. * Account a tick to a process and cpustat
  3300. * @p: the process that the cpu time gets accounted to
  3301. * @user_tick: is the tick from userspace
  3302. * @rq: the pointer to rq
  3303. *
  3304. * Tick demultiplexing follows the order
  3305. * - pending hardirq update
  3306. * - pending softirq update
  3307. * - user_time
  3308. * - idle_time
  3309. * - system time
  3310. * - check for guest_time
  3311. * - else account as system_time
  3312. *
  3313. * Check for hardirq is done both for system and user time as there is
  3314. * no timer going off while we are on hardirq and hence we may never get an
  3315. * opportunity to update it solely in system time.
  3316. * p->stime and friends are only updated on system time and not on irq
  3317. * softirq as those do not count in task exec_runtime any more.
  3318. */
  3319. static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
  3320. struct rq *rq)
  3321. {
  3322. cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
  3323. cputime64_t tmp = cputime_to_cputime64(cputime_one_jiffy);
  3324. struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
  3325. if (steal_account_process_tick())
  3326. return;
  3327. if (irqtime_account_hi_update()) {
  3328. cpustat->irq = cputime64_add(cpustat->irq, tmp);
  3329. } else if (irqtime_account_si_update()) {
  3330. cpustat->softirq = cputime64_add(cpustat->softirq, tmp);
  3331. } else if (this_cpu_ksoftirqd() == p) {
  3332. /*
  3333. * ksoftirqd time do not get accounted in cpu_softirq_time.
  3334. * So, we have to handle it separately here.
  3335. * Also, p->stime needs to be updated for ksoftirqd.
  3336. */
  3337. __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled,
  3338. &cpustat->softirq);
  3339. } else if (user_tick) {
  3340. account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
  3341. } else if (p == rq->idle) {
  3342. account_idle_time(cputime_one_jiffy);
  3343. } else if (p->flags & PF_VCPU) { /* System time or guest time */
  3344. account_guest_time(p, cputime_one_jiffy, one_jiffy_scaled);
  3345. } else {
  3346. __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled,
  3347. &cpustat->system);
  3348. }
  3349. }
  3350. static void irqtime_account_idle_ticks(int ticks)
  3351. {
  3352. int i;
  3353. struct rq *rq = this_rq();
  3354. for (i = 0; i < ticks; i++)
  3355. irqtime_account_process_tick(current, 0, rq);
  3356. }
  3357. #else /* CONFIG_IRQ_TIME_ACCOUNTING */
  3358. static void irqtime_account_idle_ticks(int ticks) {}
  3359. static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
  3360. struct rq *rq) {}
  3361. #endif /* CONFIG_IRQ_TIME_ACCOUNTING */
  3362. /*
  3363. * Account a single tick of cpu time.
  3364. * @p: the process that the cpu time gets accounted to
  3365. * @user_tick: indicates if the tick is a user or a system tick
  3366. */
  3367. void account_process_tick(struct task_struct *p, int user_tick)
  3368. {
  3369. cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
  3370. struct rq *rq = this_rq();
  3371. if (sched_clock_irqtime) {
  3372. irqtime_account_process_tick(p, user_tick, rq);
  3373. return;
  3374. }
  3375. if (steal_account_process_tick())
  3376. return;
  3377. if (user_tick)
  3378. account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
  3379. else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET))
  3380. account_system_time(p, HARDIRQ_OFFSET, cputime_one_jiffy,
  3381. one_jiffy_scaled);
  3382. else
  3383. account_idle_time(cputime_one_jiffy);
  3384. }
  3385. /*
  3386. * Account multiple ticks of steal time.
  3387. * @p: the process from which the cpu time has been stolen
  3388. * @ticks: number of stolen ticks
  3389. */
  3390. void account_steal_ticks(unsigned long ticks)
  3391. {
  3392. account_steal_time(jiffies_to_cputime(ticks));
  3393. }
  3394. /*
  3395. * Account multiple ticks of idle time.
  3396. * @ticks: number of stolen ticks
  3397. */
  3398. void account_idle_ticks(unsigned long ticks)
  3399. {
  3400. if (sched_clock_irqtime) {
  3401. irqtime_account_idle_ticks(ticks);
  3402. return;
  3403. }
  3404. account_idle_time(jiffies_to_cputime(ticks));
  3405. }
  3406. #endif
  3407. /*
  3408. * Use precise platform statistics if available:
  3409. */
  3410. #ifdef CONFIG_VIRT_CPU_ACCOUNTING
  3411. void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
  3412. {
  3413. *ut = p->utime;
  3414. *st = p->stime;
  3415. }
  3416. void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
  3417. {
  3418. struct task_cputime cputime;
  3419. thread_group_cputime(p, &cputime);
  3420. *ut = cputime.utime;
  3421. *st = cputime.stime;
  3422. }
  3423. #else
  3424. #ifndef nsecs_to_cputime
  3425. # define nsecs_to_cputime(__nsecs) nsecs_to_jiffies(__nsecs)
  3426. #endif
  3427. void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
  3428. {
  3429. cputime_t rtime, utime = p->utime, total = cputime_add(utime, p->stime);
  3430. /*
  3431. * Use CFS's precise accounting:
  3432. */
  3433. rtime = nsecs_to_cputime(p->se.sum_exec_runtime);
  3434. if (total) {
  3435. u64 temp = rtime;
  3436. temp *= utime;
  3437. do_div(temp, total);
  3438. utime = (cputime_t)temp;
  3439. } else
  3440. utime = rtime;
  3441. /*
  3442. * Compare with previous values, to keep monotonicity:
  3443. */
  3444. p->prev_utime = max(p->prev_utime, utime);
  3445. p->prev_stime = max(p->prev_stime, cputime_sub(rtime, p->prev_utime));
  3446. *ut = p->prev_utime;
  3447. *st = p->prev_stime;
  3448. }
  3449. /*
  3450. * Must be called with siglock held.
  3451. */
  3452. void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
  3453. {
  3454. struct signal_struct *sig = p->signal;
  3455. struct task_cputime cputime;
  3456. cputime_t rtime, utime, total;
  3457. thread_group_cputime(p, &cputime);
  3458. total = cputime_add(cputime.utime, cputime.stime);
  3459. rtime = nsecs_to_cputime(cputime.sum_exec_runtime);
  3460. if (total) {
  3461. u64 temp = rtime;
  3462. temp *= cputime.utime;
  3463. do_div(temp, total);
  3464. utime = (cputime_t)temp;
  3465. } else
  3466. utime = rtime;
  3467. sig->prev_utime = max(sig->prev_utime, utime);
  3468. sig->prev_stime = max(sig->prev_stime,
  3469. cputime_sub(rtime, sig->prev_utime));
  3470. *ut = sig->prev_utime;
  3471. *st = sig->prev_stime;
  3472. }
  3473. #endif
  3474. /*
  3475. * This function gets called by the timer code, with HZ frequency.
  3476. * We call it with interrupts disabled.
  3477. */
  3478. void scheduler_tick(void)
  3479. {
  3480. int cpu = smp_processor_id();
  3481. struct rq *rq = cpu_rq(cpu);
  3482. struct task_struct *curr = rq->curr;
  3483. sched_clock_tick();
  3484. raw_spin_lock(&rq->lock);
  3485. update_rq_clock(rq);
  3486. update_cpu_load_active(rq);
  3487. curr->sched_class->task_tick(rq, curr, 0);
  3488. raw_spin_unlock(&rq->lock);
  3489. perf_event_task_tick();
  3490. #ifdef CONFIG_SMP
  3491. rq->idle_at_tick = idle_cpu(cpu);
  3492. trigger_load_balance(rq, cpu);
  3493. #endif
  3494. }
  3495. notrace unsigned long get_parent_ip(unsigned long addr)
  3496. {
  3497. if (in_lock_functions(addr)) {
  3498. addr = CALLER_ADDR2;
  3499. if (in_lock_functions(addr))
  3500. addr = CALLER_ADDR3;
  3501. }
  3502. return addr;
  3503. }
  3504. #if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
  3505. defined(CONFIG_PREEMPT_TRACER))
  3506. void __kprobes add_preempt_count(int val)
  3507. {
  3508. #ifdef CONFIG_DEBUG_PREEMPT
  3509. /*
  3510. * Underflow?
  3511. */
  3512. if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
  3513. return;
  3514. #endif
  3515. preempt_count() += val;
  3516. #ifdef CONFIG_DEBUG_PREEMPT
  3517. /*
  3518. * Spinlock count overflowing soon?
  3519. */
  3520. DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
  3521. PREEMPT_MASK - 10);
  3522. #endif
  3523. if (preempt_count() == val)
  3524. trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
  3525. }
  3526. EXPORT_SYMBOL(add_preempt_count);
  3527. void __kprobes sub_preempt_count(int val)
  3528. {
  3529. #ifdef CONFIG_DEBUG_PREEMPT
  3530. /*
  3531. * Underflow?
  3532. */
  3533. if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
  3534. return;
  3535. /*
  3536. * Is the spinlock portion underflowing?
  3537. */
  3538. if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
  3539. !(preempt_count() & PREEMPT_MASK)))
  3540. return;
  3541. #endif
  3542. if (preempt_count() == val)
  3543. trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
  3544. preempt_count() -= val;
  3545. }
  3546. EXPORT_SYMBOL(sub_preempt_count);
  3547. #endif
  3548. /*
  3549. * Print scheduling while atomic bug:
  3550. */
  3551. static noinline void __schedule_bug(struct task_struct *prev)
  3552. {
  3553. struct pt_regs *regs = get_irq_regs();
  3554. printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
  3555. prev->comm, prev->pid, preempt_count());
  3556. debug_show_held_locks(prev);
  3557. print_modules();
  3558. if (irqs_disabled())
  3559. print_irqtrace_events(prev);
  3560. if (regs)
  3561. show_regs(regs);
  3562. else
  3563. dump_stack();
  3564. }
  3565. /*
  3566. * Various schedule()-time debugging checks and statistics:
  3567. */
  3568. static inline void schedule_debug(struct task_struct *prev)
  3569. {
  3570. /*
  3571. * Test if we are atomic. Since do_exit() needs to call into
  3572. * schedule() atomically, we ignore that path for now.
  3573. * Otherwise, whine if we are scheduling when we should not be.
  3574. */
  3575. if (unlikely(in_atomic_preempt_off() && !prev->exit_state))
  3576. __schedule_bug(prev);
  3577. profile_hit(SCHED_PROFILING, __builtin_return_address(0));
  3578. schedstat_inc(this_rq(), sched_count);
  3579. }
  3580. static void put_prev_task(struct rq *rq, struct task_struct *prev)
  3581. {
  3582. if (prev->on_rq || rq->skip_clock_update < 0)
  3583. update_rq_clock(rq);
  3584. prev->sched_class->put_prev_task(rq, prev);
  3585. }
  3586. /*
  3587. * Pick up the highest-prio task:
  3588. */
  3589. static inline struct task_struct *
  3590. pick_next_task(struct rq *rq)
  3591. {
  3592. const struct sched_class *class;
  3593. struct task_struct *p;
  3594. /*
  3595. * Optimization: we know that if all tasks are in
  3596. * the fair class we can call that function directly:
  3597. */
  3598. if (likely(rq->nr_running == rq->cfs.nr_running)) {
  3599. p = fair_sched_class.pick_next_task(rq);
  3600. if (likely(p))
  3601. return p;
  3602. }
  3603. for_each_class(class) {
  3604. p = class->pick_next_task(rq);
  3605. if (p)
  3606. return p;
  3607. }
  3608. BUG(); /* the idle class will always have a runnable task */
  3609. }
  3610. /*
  3611. * schedule() is the main scheduler function.
  3612. */
  3613. asmlinkage void __sched schedule(void)
  3614. {
  3615. struct task_struct *prev, *next;
  3616. unsigned long *switch_count;
  3617. struct rq *rq;
  3618. int cpu;
  3619. need_resched:
  3620. preempt_disable();
  3621. cpu = smp_processor_id();
  3622. rq = cpu_rq(cpu);
  3623. rcu_note_context_switch(cpu);
  3624. prev = rq->curr;
  3625. schedule_debug(prev);
  3626. if (sched_feat(HRTICK))
  3627. hrtick_clear(rq);
  3628. raw_spin_lock_irq(&rq->lock);
  3629. switch_count = &prev->nivcsw;
  3630. if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
  3631. if (unlikely(signal_pending_state(prev->state, prev))) {
  3632. prev->state = TASK_RUNNING;
  3633. } else {
  3634. deactivate_task(rq, prev, DEQUEUE_SLEEP);
  3635. prev->on_rq = 0;
  3636. /*
  3637. * If a worker went to sleep, notify and ask workqueue
  3638. * whether it wants to wake up a task to maintain
  3639. * concurrency.
  3640. */
  3641. if (prev->flags & PF_WQ_WORKER) {
  3642. struct task_struct *to_wakeup;
  3643. to_wakeup = wq_worker_sleeping(prev, cpu);
  3644. if (to_wakeup)
  3645. try_to_wake_up_local(to_wakeup);
  3646. }
  3647. /*
  3648. * If we are going to sleep and we have plugged IO
  3649. * queued, make sure to submit it to avoid deadlocks.
  3650. */
  3651. if (blk_needs_flush_plug(prev)) {
  3652. raw_spin_unlock(&rq->lock);
  3653. blk_schedule_flush_plug(prev);
  3654. raw_spin_lock(&rq->lock);
  3655. }
  3656. }
  3657. switch_count = &prev->nvcsw;
  3658. }
  3659. pre_schedule(rq, prev);
  3660. if (unlikely(!rq->nr_running))
  3661. idle_balance(cpu, rq);
  3662. put_prev_task(rq, prev);
  3663. next = pick_next_task(rq);
  3664. clear_tsk_need_resched(prev);
  3665. rq->skip_clock_update = 0;
  3666. if (likely(prev != next)) {
  3667. rq->nr_switches++;
  3668. rq->curr = next;
  3669. ++*switch_count;
  3670. context_switch(rq, prev, next); /* unlocks the rq */
  3671. /*
  3672. * The context switch have flipped the stack from under us
  3673. * and restored the local variables which were saved when
  3674. * this task called schedule() in the past. prev == current
  3675. * is still correct, but it can be moved to another cpu/rq.
  3676. */
  3677. cpu = smp_processor_id();
  3678. rq = cpu_rq(cpu);
  3679. } else
  3680. raw_spin_unlock_irq(&rq->lock);
  3681. post_schedule(rq);
  3682. preempt_enable_no_resched();
  3683. if (need_resched())
  3684. goto need_resched;
  3685. }
  3686. EXPORT_SYMBOL(schedule);
  3687. #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
  3688. static inline bool owner_running(struct mutex *lock, struct task_struct *owner)
  3689. {
  3690. if (lock->owner != owner)
  3691. return false;
  3692. /*
  3693. * Ensure we emit the owner->on_cpu, dereference _after_ checking
  3694. * lock->owner still matches owner, if that fails, owner might
  3695. * point to free()d memory, if it still matches, the rcu_read_lock()
  3696. * ensures the memory stays valid.
  3697. */
  3698. barrier();
  3699. return owner->on_cpu;
  3700. }
  3701. /*
  3702. * Look out! "owner" is an entirely speculative pointer
  3703. * access and not reliable.
  3704. */
  3705. int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
  3706. {
  3707. if (!sched_feat(OWNER_SPIN))
  3708. return 0;
  3709. rcu_read_lock();
  3710. while (owner_running(lock, owner)) {
  3711. if (need_resched())
  3712. break;
  3713. arch_mutex_cpu_relax();
  3714. }
  3715. rcu_read_unlock();
  3716. /*
  3717. * We break out the loop above on need_resched() and when the
  3718. * owner changed, which is a sign for heavy contention. Return
  3719. * success only when lock->owner is NULL.
  3720. */
  3721. return lock->owner == NULL;
  3722. }
  3723. #endif
  3724. #ifdef CONFIG_PREEMPT
  3725. /*
  3726. * this is the entry point to schedule() from in-kernel preemption
  3727. * off of preempt_enable. Kernel preemptions off return from interrupt
  3728. * occur there and call schedule directly.
  3729. */
  3730. asmlinkage void __sched notrace preempt_schedule(void)
  3731. {
  3732. struct thread_info *ti = current_thread_info();
  3733. /*
  3734. * If there is a non-zero preempt_count or interrupts are disabled,
  3735. * we do not want to preempt the current task. Just return..
  3736. */
  3737. if (likely(ti->preempt_count || irqs_disabled()))
  3738. return;
  3739. do {
  3740. add_preempt_count_notrace(PREEMPT_ACTIVE);
  3741. schedule();
  3742. sub_preempt_count_notrace(PREEMPT_ACTIVE);
  3743. /*
  3744. * Check again in case we missed a preemption opportunity
  3745. * between schedule and now.
  3746. */
  3747. barrier();
  3748. } while (need_resched());
  3749. }
  3750. EXPORT_SYMBOL(preempt_schedule);
  3751. /*
  3752. * this is the entry point to schedule() from kernel preemption
  3753. * off of irq context.
  3754. * Note, that this is called and return with irqs disabled. This will
  3755. * protect us against recursive calling from irq.
  3756. */
  3757. asmlinkage void __sched preempt_schedule_irq(void)
  3758. {
  3759. struct thread_info *ti = current_thread_info();
  3760. /* Catch callers which need to be fixed */
  3761. BUG_ON(ti->preempt_count || !irqs_disabled());
  3762. do {
  3763. add_preempt_count(PREEMPT_ACTIVE);
  3764. local_irq_enable();
  3765. schedule();
  3766. local_irq_disable();
  3767. sub_preempt_count(PREEMPT_ACTIVE);
  3768. /*
  3769. * Check again in case we missed a preemption opportunity
  3770. * between schedule and now.
  3771. */
  3772. barrier();
  3773. } while (need_resched());
  3774. }
  3775. #endif /* CONFIG_PREEMPT */
  3776. int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags,
  3777. void *key)
  3778. {
  3779. return try_to_wake_up(curr->private, mode, wake_flags);
  3780. }
  3781. EXPORT_SYMBOL(default_wake_function);
  3782. /*
  3783. * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
  3784. * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
  3785. * number) then we wake all the non-exclusive tasks and one exclusive task.
  3786. *
  3787. * There are circumstances in which we can try to wake a task which has already
  3788. * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
  3789. * zero in this (rare) case, and we handle it by continuing to scan the queue.
  3790. */
  3791. static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
  3792. int nr_exclusive, int wake_flags, void *key)
  3793. {
  3794. wait_queue_t *curr, *next;
  3795. list_for_each_entry_safe(curr, next, &q->task_list, task_list) {
  3796. unsigned flags = curr->flags;
  3797. if (curr->func(curr, mode, wake_flags, key) &&
  3798. (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
  3799. break;
  3800. }
  3801. }
  3802. /**
  3803. * __wake_up - wake up threads blocked on a waitqueue.
  3804. * @q: the waitqueue
  3805. * @mode: which threads
  3806. * @nr_exclusive: how many wake-one or wake-many threads to wake up
  3807. * @key: is directly passed to the wakeup function
  3808. *
  3809. * It may be assumed that this function implies a write memory barrier before
  3810. * changing the task state if and only if any tasks are woken up.
  3811. */
  3812. void __wake_up(wait_queue_head_t *q, unsigned int mode,
  3813. int nr_exclusive, void *key)
  3814. {
  3815. unsigned long flags;
  3816. spin_lock_irqsave(&q->lock, flags);
  3817. __wake_up_common(q, mode, nr_exclusive, 0, key);
  3818. spin_unlock_irqrestore(&q->lock, flags);
  3819. }
  3820. EXPORT_SYMBOL(__wake_up);
  3821. /*
  3822. * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
  3823. */
  3824. void __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
  3825. {
  3826. __wake_up_common(q, mode, 1, 0, NULL);
  3827. }
  3828. EXPORT_SYMBOL_GPL(__wake_up_locked);
  3829. void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key)
  3830. {
  3831. __wake_up_common(q, mode, 1, 0, key);
  3832. }
  3833. EXPORT_SYMBOL_GPL(__wake_up_locked_key);
  3834. /**
  3835. * __wake_up_sync_key - wake up threads blocked on a waitqueue.
  3836. * @q: the waitqueue
  3837. * @mode: which threads
  3838. * @nr_exclusive: how many wake-one or wake-many threads to wake up
  3839. * @key: opaque value to be passed to wakeup targets
  3840. *
  3841. * The sync wakeup differs that the waker knows that it will schedule
  3842. * away soon, so while the target thread will be woken up, it will not
  3843. * be migrated to another CPU - ie. the two threads are 'synchronized'
  3844. * with each other. This can prevent needless bouncing between CPUs.
  3845. *
  3846. * On UP it can prevent extra preemption.
  3847. *
  3848. * It may be assumed that this function implies a write memory barrier before
  3849. * changing the task state if and only if any tasks are woken up.
  3850. */
  3851. void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode,
  3852. int nr_exclusive, void *key)
  3853. {
  3854. unsigned long flags;
  3855. int wake_flags = WF_SYNC;
  3856. if (unlikely(!q))
  3857. return;
  3858. if (unlikely(!nr_exclusive))
  3859. wake_flags = 0;
  3860. spin_lock_irqsave(&q->lock, flags);
  3861. __wake_up_common(q, mode, nr_exclusive, wake_flags, key);
  3862. spin_unlock_irqrestore(&q->lock, flags);
  3863. }
  3864. EXPORT_SYMBOL_GPL(__wake_up_sync_key);
  3865. /*
  3866. * __wake_up_sync - see __wake_up_sync_key()
  3867. */
  3868. void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
  3869. {
  3870. __wake_up_sync_key(q, mode, nr_exclusive, NULL);
  3871. }
  3872. EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
  3873. /**
  3874. * complete: - signals a single thread waiting on this completion
  3875. * @x: holds the state of this particular completion
  3876. *
  3877. * This will wake up a single thread waiting on this completion. Threads will be
  3878. * awakened in the same order in which they were queued.
  3879. *
  3880. * See also complete_all(), wait_for_completion() and related routines.
  3881. *
  3882. * It may be assumed that this function implies a write memory barrier before
  3883. * changing the task state if and only if any tasks are woken up.
  3884. */
  3885. void complete(struct completion *x)
  3886. {
  3887. unsigned long flags;
  3888. spin_lock_irqsave(&x->wait.lock, flags);
  3889. x->done++;
  3890. __wake_up_common(&x->wait, TASK_NORMAL, 1, 0, NULL);
  3891. spin_unlock_irqrestore(&x->wait.lock, flags);
  3892. }
  3893. EXPORT_SYMBOL(complete);
  3894. /**
  3895. * complete_all: - signals all threads waiting on this completion
  3896. * @x: holds the state of this particular completion
  3897. *
  3898. * This will wake up all threads waiting on this particular completion event.
  3899. *
  3900. * It may be assumed that this function implies a write memory barrier before
  3901. * changing the task state if and only if any tasks are woken up.
  3902. */
  3903. void complete_all(struct completion *x)
  3904. {
  3905. unsigned long flags;
  3906. spin_lock_irqsave(&x->wait.lock, flags);
  3907. x->done += UINT_MAX/2;
  3908. __wake_up_common(&x->wait, TASK_NORMAL, 0, 0, NULL);
  3909. spin_unlock_irqrestore(&x->wait.lock, flags);
  3910. }
  3911. EXPORT_SYMBOL(complete_all);
  3912. static inline long __sched
  3913. do_wait_for_common(struct completion *x, long timeout, int state)
  3914. {
  3915. if (!x->done) {
  3916. DECLARE_WAITQUEUE(wait, current);
  3917. __add_wait_queue_tail_exclusive(&x->wait, &wait);
  3918. do {
  3919. if (signal_pending_state(state, current)) {
  3920. timeout = -ERESTARTSYS;
  3921. break;
  3922. }
  3923. __set_current_state(state);
  3924. spin_unlock_irq(&x->wait.lock);
  3925. timeout = schedule_timeout(timeout);
  3926. spin_lock_irq(&x->wait.lock);
  3927. } while (!x->done && timeout);
  3928. __remove_wait_queue(&x->wait, &wait);
  3929. if (!x->done)
  3930. return timeout;
  3931. }
  3932. x->done--;
  3933. return timeout ?: 1;
  3934. }
  3935. static long __sched
  3936. wait_for_common(struct completion *x, long timeout, int state)
  3937. {
  3938. might_sleep();
  3939. spin_lock_irq(&x->wait.lock);
  3940. timeout = do_wait_for_common(x, timeout, state);
  3941. spin_unlock_irq(&x->wait.lock);
  3942. return timeout;
  3943. }
  3944. /**
  3945. * wait_for_completion: - waits for completion of a task
  3946. * @x: holds the state of this particular completion
  3947. *
  3948. * This waits to be signaled for completion of a specific task. It is NOT
  3949. * interruptible and there is no timeout.
  3950. *
  3951. * See also similar routines (i.e. wait_for_completion_timeout()) with timeout
  3952. * and interrupt capability. Also see complete().
  3953. */
  3954. void __sched wait_for_completion(struct completion *x)
  3955. {
  3956. wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
  3957. }
  3958. EXPORT_SYMBOL(wait_for_completion);
  3959. /**
  3960. * wait_for_completion_timeout: - waits for completion of a task (w/timeout)
  3961. * @x: holds the state of this particular completion
  3962. * @timeout: timeout value in jiffies
  3963. *
  3964. * This waits for either a completion of a specific task to be signaled or for a
  3965. * specified timeout to expire. The timeout is in jiffies. It is not
  3966. * interruptible.
  3967. */
  3968. unsigned long __sched
  3969. wait_for_completion_timeout(struct completion *x, unsigned long timeout)
  3970. {
  3971. return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE);
  3972. }
  3973. EXPORT_SYMBOL(wait_for_completion_timeout);
  3974. /**
  3975. * wait_for_completion_interruptible: - waits for completion of a task (w/intr)
  3976. * @x: holds the state of this particular completion
  3977. *
  3978. * This waits for completion of a specific task to be signaled. It is
  3979. * interruptible.
  3980. */
  3981. int __sched wait_for_completion_interruptible(struct completion *x)
  3982. {
  3983. long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE);
  3984. if (t == -ERESTARTSYS)
  3985. return t;
  3986. return 0;
  3987. }
  3988. EXPORT_SYMBOL(wait_for_completion_interruptible);
  3989. /**
  3990. * wait_for_completion_interruptible_timeout: - waits for completion (w/(to,intr))
  3991. * @x: holds the state of this particular completion
  3992. * @timeout: timeout value in jiffies
  3993. *
  3994. * This waits for either a completion of a specific task to be signaled or for a
  3995. * specified timeout to expire. It is interruptible. The timeout is in jiffies.
  3996. */
  3997. long __sched
  3998. wait_for_completion_interruptible_timeout(struct completion *x,
  3999. unsigned long timeout)
  4000. {
  4001. return wait_for_common(x, timeout, TASK_INTERRUPTIBLE);
  4002. }
  4003. EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
  4004. /**
  4005. * wait_for_completion_killable: - waits for completion of a task (killable)
  4006. * @x: holds the state of this particular completion
  4007. *
  4008. * This waits to be signaled for completion of a specific task. It can be
  4009. * interrupted by a kill signal.
  4010. */
  4011. int __sched wait_for_completion_killable(struct completion *x)
  4012. {
  4013. long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
  4014. if (t == -ERESTARTSYS)
  4015. return t;
  4016. return 0;
  4017. }
  4018. EXPORT_SYMBOL(wait_for_completion_killable);
  4019. /**
  4020. * wait_for_completion_killable_timeout: - waits for completion of a task (w/(to,killable))
  4021. * @x: holds the state of this particular completion
  4022. * @timeout: timeout value in jiffies
  4023. *
  4024. * This waits for either a completion of a specific task to be
  4025. * signaled or for a specified timeout to expire. It can be
  4026. * interrupted by a kill signal. The timeout is in jiffies.
  4027. */
  4028. long __sched
  4029. wait_for_completion_killable_timeout(struct completion *x,
  4030. unsigned long timeout)
  4031. {
  4032. return wait_for_common(x, timeout, TASK_KILLABLE);
  4033. }
  4034. EXPORT_SYMBOL(wait_for_completion_killable_timeout);
  4035. /**
  4036. * try_wait_for_completion - try to decrement a completion without blocking
  4037. * @x: completion structure
  4038. *
  4039. * Returns: 0 if a decrement cannot be done without blocking
  4040. * 1 if a decrement succeeded.
  4041. *
  4042. * If a completion is being used as a counting completion,
  4043. * attempt to decrement the counter without blocking. This
  4044. * enables us to avoid waiting if the resource the completion
  4045. * is protecting is not available.
  4046. */
  4047. bool try_wait_for_completion(struct completion *x)
  4048. {
  4049. unsigned long flags;
  4050. int ret = 1;
  4051. spin_lock_irqsave(&x->wait.lock, flags);
  4052. if (!x->done)
  4053. ret = 0;
  4054. else
  4055. x->done--;
  4056. spin_unlock_irqrestore(&x->wait.lock, flags);
  4057. return ret;
  4058. }
  4059. EXPORT_SYMBOL(try_wait_for_completion);
  4060. /**
  4061. * completion_done - Test to see if a completion has any waiters
  4062. * @x: completion structure
  4063. *
  4064. * Returns: 0 if there are waiters (wait_for_completion() in progress)
  4065. * 1 if there are no waiters.
  4066. *
  4067. */
  4068. bool completion_done(struct completion *x)
  4069. {
  4070. unsigned long flags;
  4071. int ret = 1;
  4072. spin_lock_irqsave(&x->wait.lock, flags);
  4073. if (!x->done)
  4074. ret = 0;
  4075. spin_unlock_irqrestore(&x->wait.lock, flags);
  4076. return ret;
  4077. }
  4078. EXPORT_SYMBOL(completion_done);
  4079. static long __sched
  4080. sleep_on_common(wait_queue_head_t *q, int state, long timeout)
  4081. {
  4082. unsigned long flags;
  4083. wait_queue_t wait;
  4084. init_waitqueue_entry(&wait, current);
  4085. __set_current_state(state);
  4086. spin_lock_irqsave(&q->lock, flags);
  4087. __add_wait_queue(q, &wait);
  4088. spin_unlock(&q->lock);
  4089. timeout = schedule_timeout(timeout);
  4090. spin_lock_irq(&q->lock);
  4091. __remove_wait_queue(q, &wait);
  4092. spin_unlock_irqrestore(&q->lock, flags);
  4093. return timeout;
  4094. }
  4095. void __sched interruptible_sleep_on(wait_queue_head_t *q)
  4096. {
  4097. sleep_on_common(q, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
  4098. }
  4099. EXPORT_SYMBOL(interruptible_sleep_on);
  4100. long __sched
  4101. interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
  4102. {
  4103. return sleep_on_common(q, TASK_INTERRUPTIBLE, timeout);
  4104. }
  4105. EXPORT_SYMBOL(interruptible_sleep_on_timeout);
  4106. void __sched sleep_on(wait_queue_head_t *q)
  4107. {
  4108. sleep_on_common(q, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
  4109. }
  4110. EXPORT_SYMBOL(sleep_on);
  4111. long __sched sleep_on_timeout(wait_queue_head_t *q, long timeout)
  4112. {
  4113. return sleep_on_common(q, TASK_UNINTERRUPTIBLE, timeout);
  4114. }
  4115. EXPORT_SYMBOL(sleep_on_timeout);
  4116. #ifdef CONFIG_RT_MUTEXES
  4117. /*
  4118. * rt_mutex_setprio - set the current priority of a task
  4119. * @p: task
  4120. * @prio: prio value (kernel-internal form)
  4121. *
  4122. * This function changes the 'effective' priority of a task. It does
  4123. * not touch ->normal_prio like __setscheduler().
  4124. *
  4125. * Used by the rt_mutex code to implement priority inheritance logic.
  4126. */
  4127. void rt_mutex_setprio(struct task_struct *p, int prio)
  4128. {
  4129. int oldprio, on_rq, running;
  4130. struct rq *rq;
  4131. const struct sched_class *prev_class;
  4132. BUG_ON(prio < 0 || prio > MAX_PRIO);
  4133. rq = __task_rq_lock(p);
  4134. trace_sched_pi_setprio(p, prio);
  4135. oldprio = p->prio;
  4136. prev_class = p->sched_class;
  4137. on_rq = p->on_rq;
  4138. running = task_current(rq, p);
  4139. if (on_rq)
  4140. dequeue_task(rq, p, 0);
  4141. if (running)
  4142. p->sched_class->put_prev_task(rq, p);
  4143. if (rt_prio(prio))
  4144. p->sched_class = &rt_sched_class;
  4145. else
  4146. p->sched_class = &fair_sched_class;
  4147. p->prio = prio;
  4148. if (running)
  4149. p->sched_class->set_curr_task(rq);
  4150. if (on_rq)
  4151. enqueue_task(rq, p, oldprio < prio ? ENQUEUE_HEAD : 0);
  4152. check_class_changed(rq, p, prev_class, oldprio);
  4153. __task_rq_unlock(rq);
  4154. }
  4155. #endif
  4156. void set_user_nice(struct task_struct *p, long nice)
  4157. {
  4158. int old_prio, delta, on_rq;
  4159. unsigned long flags;
  4160. struct rq *rq;
  4161. if (TASK_NICE(p) == nice || nice < -20 || nice > 19)
  4162. return;
  4163. /*
  4164. * We have to be careful, if called from sys_setpriority(),
  4165. * the task might be in the middle of scheduling on another CPU.
  4166. */
  4167. rq = task_rq_lock(p, &flags);
  4168. /*
  4169. * The RT priorities are set via sched_setscheduler(), but we still
  4170. * allow the 'normal' nice value to be set - but as expected
  4171. * it wont have any effect on scheduling until the task is
  4172. * SCHED_FIFO/SCHED_RR:
  4173. */
  4174. if (task_has_rt_policy(p)) {
  4175. p->static_prio = NICE_TO_PRIO(nice);
  4176. goto out_unlock;
  4177. }
  4178. on_rq = p->on_rq;
  4179. if (on_rq)
  4180. dequeue_task(rq, p, 0);
  4181. p->static_prio = NICE_TO_PRIO(nice);
  4182. set_load_weight(p);
  4183. old_prio = p->prio;
  4184. p->prio = effective_prio(p);
  4185. delta = p->prio - old_prio;
  4186. if (on_rq) {
  4187. enqueue_task(rq, p, 0);
  4188. /*
  4189. * If the task increased its priority or is running and
  4190. * lowered its priority, then reschedule its CPU:
  4191. */
  4192. if (delta < 0 || (delta > 0 && task_running(rq, p)))
  4193. resched_task(rq->curr);
  4194. }
  4195. out_unlock:
  4196. task_rq_unlock(rq, p, &flags);
  4197. }
  4198. EXPORT_SYMBOL(set_user_nice);
  4199. /*
  4200. * can_nice - check if a task can reduce its nice value
  4201. * @p: task
  4202. * @nice: nice value
  4203. */
  4204. int can_nice(const struct task_struct *p, const int nice)
  4205. {
  4206. /* convert nice value [19,-20] to rlimit style value [1,40] */
  4207. int nice_rlim = 20 - nice;
  4208. return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
  4209. capable(CAP_SYS_NICE));
  4210. }
  4211. #ifdef __ARCH_WANT_SYS_NICE
  4212. /*
  4213. * sys_nice - change the priority of the current process.
  4214. * @increment: priority increment
  4215. *
  4216. * sys_setpriority is a more generic, but much slower function that
  4217. * does similar things.
  4218. */
  4219. SYSCALL_DEFINE1(nice, int, increment)
  4220. {
  4221. long nice, retval;
  4222. /*
  4223. * Setpriority might change our priority at the same moment.
  4224. * We don't have to worry. Conceptually one call occurs first
  4225. * and we have a single winner.
  4226. */
  4227. if (increment < -40)
  4228. increment = -40;
  4229. if (increment > 40)
  4230. increment = 40;
  4231. nice = TASK_NICE(current) + increment;
  4232. if (nice < -20)
  4233. nice = -20;
  4234. if (nice > 19)
  4235. nice = 19;
  4236. if (increment < 0 && !can_nice(current, nice))
  4237. return -EPERM;
  4238. retval = security_task_setnice(current, nice);
  4239. if (retval)
  4240. return retval;
  4241. set_user_nice(current, nice);
  4242. return 0;
  4243. }
  4244. #endif
  4245. /**
  4246. * task_prio - return the priority value of a given task.
  4247. * @p: the task in question.
  4248. *
  4249. * This is the priority value as seen by users in /proc.
  4250. * RT tasks are offset by -200. Normal tasks are centered
  4251. * around 0, value goes from -16 to +15.
  4252. */
  4253. int task_prio(const struct task_struct *p)
  4254. {
  4255. return p->prio - MAX_RT_PRIO;
  4256. }
  4257. /**
  4258. * task_nice - return the nice value of a given task.
  4259. * @p: the task in question.
  4260. */
  4261. int task_nice(const struct task_struct *p)
  4262. {
  4263. return TASK_NICE(p);
  4264. }
  4265. EXPORT_SYMBOL(task_nice);
  4266. /**
  4267. * idle_cpu - is a given cpu idle currently?
  4268. * @cpu: the processor in question.
  4269. */
  4270. int idle_cpu(int cpu)
  4271. {
  4272. return cpu_curr(cpu) == cpu_rq(cpu)->idle;
  4273. }
  4274. /**
  4275. * idle_task - return the idle task for a given cpu.
  4276. * @cpu: the processor in question.
  4277. */
  4278. struct task_struct *idle_task(int cpu)
  4279. {
  4280. return cpu_rq(cpu)->idle;
  4281. }
  4282. /**
  4283. * find_process_by_pid - find a process with a matching PID value.
  4284. * @pid: the pid in question.
  4285. */
  4286. static struct task_struct *find_process_by_pid(pid_t pid)
  4287. {
  4288. return pid ? find_task_by_vpid(pid) : current;
  4289. }
  4290. /* Actually do priority change: must hold rq lock. */
  4291. static void
  4292. __setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
  4293. {
  4294. p->policy = policy;
  4295. p->rt_priority = prio;
  4296. p->normal_prio = normal_prio(p);
  4297. /* we are holding p->pi_lock already */
  4298. p->prio = rt_mutex_getprio(p);
  4299. if (rt_prio(p->prio))
  4300. p->sched_class = &rt_sched_class;
  4301. else
  4302. p->sched_class = &fair_sched_class;
  4303. set_load_weight(p);
  4304. }
  4305. /*
  4306. * check the target process has a UID that matches the current process's
  4307. */
  4308. static bool check_same_owner(struct task_struct *p)
  4309. {
  4310. const struct cred *cred = current_cred(), *pcred;
  4311. bool match;
  4312. rcu_read_lock();
  4313. pcred = __task_cred(p);
  4314. if (cred->user->user_ns == pcred->user->user_ns)
  4315. match = (cred->euid == pcred->euid ||
  4316. cred->euid == pcred->uid);
  4317. else
  4318. match = false;
  4319. rcu_read_unlock();
  4320. return match;
  4321. }
  4322. static int __sched_setscheduler(struct task_struct *p, int policy,
  4323. const struct sched_param *param, bool user)
  4324. {
  4325. int retval, oldprio, oldpolicy = -1, on_rq, running;
  4326. unsigned long flags;
  4327. const struct sched_class *prev_class;
  4328. struct rq *rq;
  4329. int reset_on_fork;
  4330. /* may grab non-irq protected spin_locks */
  4331. BUG_ON(in_interrupt());
  4332. recheck:
  4333. /* double check policy once rq lock held */
  4334. if (policy < 0) {
  4335. reset_on_fork = p->sched_reset_on_fork;
  4336. policy = oldpolicy = p->policy;
  4337. } else {
  4338. reset_on_fork = !!(policy & SCHED_RESET_ON_FORK);
  4339. policy &= ~SCHED_RESET_ON_FORK;
  4340. if (policy != SCHED_FIFO && policy != SCHED_RR &&
  4341. policy != SCHED_NORMAL && policy != SCHED_BATCH &&
  4342. policy != SCHED_IDLE)
  4343. return -EINVAL;
  4344. }
  4345. /*
  4346. * Valid priorities for SCHED_FIFO and SCHED_RR are
  4347. * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL,
  4348. * SCHED_BATCH and SCHED_IDLE is 0.
  4349. */
  4350. if (param->sched_priority < 0 ||
  4351. (p->mm && param->sched_priority > MAX_USER_RT_PRIO-1) ||
  4352. (!p->mm && param->sched_priority > MAX_RT_PRIO-1))
  4353. return -EINVAL;
  4354. if (rt_policy(policy) != (param->sched_priority != 0))
  4355. return -EINVAL;
  4356. /*
  4357. * Allow unprivileged RT tasks to decrease priority:
  4358. */
  4359. if (user && !capable(CAP_SYS_NICE)) {
  4360. if (rt_policy(policy)) {
  4361. unsigned long rlim_rtprio =
  4362. task_rlimit(p, RLIMIT_RTPRIO);
  4363. /* can't set/change the rt policy */
  4364. if (policy != p->policy && !rlim_rtprio)
  4365. return -EPERM;
  4366. /* can't increase priority */
  4367. if (param->sched_priority > p->rt_priority &&
  4368. param->sched_priority > rlim_rtprio)
  4369. return -EPERM;
  4370. }
  4371. /*
  4372. * Treat SCHED_IDLE as nice 20. Only allow a switch to
  4373. * SCHED_NORMAL if the RLIMIT_NICE would normally permit it.
  4374. */
  4375. if (p->policy == SCHED_IDLE && policy != SCHED_IDLE) {
  4376. if (!can_nice(p, TASK_NICE(p)))
  4377. return -EPERM;
  4378. }
  4379. /* can't change other user's priorities */
  4380. if (!check_same_owner(p))
  4381. return -EPERM;
  4382. /* Normal users shall not reset the sched_reset_on_fork flag */
  4383. if (p->sched_reset_on_fork && !reset_on_fork)
  4384. return -EPERM;
  4385. }
  4386. if (user) {
  4387. retval = security_task_setscheduler(p);
  4388. if (retval)
  4389. return retval;
  4390. }
  4391. /*
  4392. * make sure no PI-waiters arrive (or leave) while we are
  4393. * changing the priority of the task:
  4394. *
  4395. * To be able to change p->policy safely, the appropriate
  4396. * runqueue lock must be held.
  4397. */
  4398. rq = task_rq_lock(p, &flags);
  4399. /*
  4400. * Changing the policy of the stop threads its a very bad idea
  4401. */
  4402. if (p == rq->stop) {
  4403. task_rq_unlock(rq, p, &flags);
  4404. return -EINVAL;
  4405. }
  4406. /*
  4407. * If not changing anything there's no need to proceed further:
  4408. */
  4409. if (unlikely(policy == p->policy && (!rt_policy(policy) ||
  4410. param->sched_priority == p->rt_priority))) {
  4411. __task_rq_unlock(rq);
  4412. raw_spin_unlock_irqrestore(&p->pi_lock, flags);
  4413. return 0;
  4414. }
  4415. #ifdef CONFIG_RT_GROUP_SCHED
  4416. if (user) {
  4417. /*
  4418. * Do not allow realtime tasks into groups that have no runtime
  4419. * assigned.
  4420. */
  4421. if (rt_bandwidth_enabled() && rt_policy(policy) &&
  4422. task_group(p)->rt_bandwidth.rt_runtime == 0 &&
  4423. !task_group_is_autogroup(task_group(p))) {
  4424. task_rq_unlock(rq, p, &flags);
  4425. return -EPERM;
  4426. }
  4427. }
  4428. #endif
  4429. /* recheck policy now with rq lock held */
  4430. if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
  4431. policy = oldpolicy = -1;
  4432. task_rq_unlock(rq, p, &flags);
  4433. goto recheck;
  4434. }
  4435. on_rq = p->on_rq;
  4436. running = task_current(rq, p);
  4437. if (on_rq)
  4438. deactivate_task(rq, p, 0);
  4439. if (running)
  4440. p->sched_class->put_prev_task(rq, p);
  4441. p->sched_reset_on_fork = reset_on_fork;
  4442. oldprio = p->prio;
  4443. prev_class = p->sched_class;
  4444. __setscheduler(rq, p, policy, param->sched_priority);
  4445. if (running)
  4446. p->sched_class->set_curr_task(rq);
  4447. if (on_rq)
  4448. activate_task(rq, p, 0);
  4449. check_class_changed(rq, p, prev_class, oldprio);
  4450. task_rq_unlock(rq, p, &flags);
  4451. rt_mutex_adjust_pi(p);
  4452. return 0;
  4453. }
  4454. /**
  4455. * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
  4456. * @p: the task in question.
  4457. * @policy: new policy.
  4458. * @param: structure containing the new RT priority.
  4459. *
  4460. * NOTE that the task may be already dead.
  4461. */
  4462. int sched_setscheduler(struct task_struct *p, int policy,
  4463. const struct sched_param *param)
  4464. {
  4465. return __sched_setscheduler(p, policy, param, true);
  4466. }
  4467. EXPORT_SYMBOL_GPL(sched_setscheduler);
  4468. /**
  4469. * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
  4470. * @p: the task in question.
  4471. * @policy: new policy.
  4472. * @param: structure containing the new RT priority.
  4473. *
  4474. * Just like sched_setscheduler, only don't bother checking if the
  4475. * current context has permission. For example, this is needed in
  4476. * stop_machine(): we create temporary high priority worker threads,
  4477. * but our caller might not have that capability.
  4478. */
  4479. int sched_setscheduler_nocheck(struct task_struct *p, int policy,
  4480. const struct sched_param *param)
  4481. {
  4482. return __sched_setscheduler(p, policy, param, false);
  4483. }
  4484. static int
  4485. do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
  4486. {
  4487. struct sched_param lparam;
  4488. struct task_struct *p;
  4489. int retval;
  4490. if (!param || pid < 0)
  4491. return -EINVAL;
  4492. if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
  4493. return -EFAULT;
  4494. rcu_read_lock();
  4495. retval = -ESRCH;
  4496. p = find_process_by_pid(pid);
  4497. if (p != NULL)
  4498. retval = sched_setscheduler(p, policy, &lparam);
  4499. rcu_read_unlock();
  4500. return retval;
  4501. }
  4502. /**
  4503. * sys_sched_setscheduler - set/change the scheduler policy and RT priority
  4504. * @pid: the pid in question.
  4505. * @policy: new policy.
  4506. * @param: structure containing the new RT priority.
  4507. */
  4508. SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy,
  4509. struct sched_param __user *, param)
  4510. {
  4511. /* negative values for policy are not valid */
  4512. if (policy < 0)
  4513. return -EINVAL;
  4514. return do_sched_setscheduler(pid, policy, param);
  4515. }
  4516. /**
  4517. * sys_sched_setparam - set/change the RT priority of a thread
  4518. * @pid: the pid in question.
  4519. * @param: structure containing the new RT priority.
  4520. */
  4521. SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
  4522. {
  4523. return do_sched_setscheduler(pid, -1, param);
  4524. }
  4525. /**
  4526. * sys_sched_getscheduler - get the policy (scheduling class) of a thread
  4527. * @pid: the pid in question.
  4528. */
  4529. SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
  4530. {
  4531. struct task_struct *p;
  4532. int retval;
  4533. if (pid < 0)
  4534. return -EINVAL;
  4535. retval = -ESRCH;
  4536. rcu_read_lock();
  4537. p = find_process_by_pid(pid);
  4538. if (p) {
  4539. retval = security_task_getscheduler(p);
  4540. if (!retval)
  4541. retval = p->policy
  4542. | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0);
  4543. }
  4544. rcu_read_unlock();
  4545. return retval;
  4546. }
  4547. /**
  4548. * sys_sched_getparam - get the RT priority of a thread
  4549. * @pid: the pid in question.
  4550. * @param: structure containing the RT priority.
  4551. */
  4552. SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
  4553. {
  4554. struct sched_param lp;
  4555. struct task_struct *p;
  4556. int retval;
  4557. if (!param || pid < 0)
  4558. return -EINVAL;
  4559. rcu_read_lock();
  4560. p = find_process_by_pid(pid);
  4561. retval = -ESRCH;
  4562. if (!p)
  4563. goto out_unlock;
  4564. retval = security_task_getscheduler(p);
  4565. if (retval)
  4566. goto out_unlock;
  4567. lp.sched_priority = p->rt_priority;
  4568. rcu_read_unlock();
  4569. /*
  4570. * This one might sleep, we cannot do it with a spinlock held ...
  4571. */
  4572. retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
  4573. return retval;
  4574. out_unlock:
  4575. rcu_read_unlock();
  4576. return retval;
  4577. }
  4578. long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
  4579. {
  4580. cpumask_var_t cpus_allowed, new_mask;
  4581. struct task_struct *p;
  4582. int retval;
  4583. get_online_cpus();
  4584. rcu_read_lock();
  4585. p = find_process_by_pid(pid);
  4586. if (!p) {
  4587. rcu_read_unlock();
  4588. put_online_cpus();
  4589. return -ESRCH;
  4590. }
  4591. /* Prevent p going away */
  4592. get_task_struct(p);
  4593. rcu_read_unlock();
  4594. if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
  4595. retval = -ENOMEM;
  4596. goto out_put_task;
  4597. }
  4598. if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
  4599. retval = -ENOMEM;
  4600. goto out_free_cpus_allowed;
  4601. }
  4602. retval = -EPERM;
  4603. if (!check_same_owner(p) && !task_ns_capable(p, CAP_SYS_NICE))
  4604. goto out_unlock;
  4605. retval = security_task_setscheduler(p);
  4606. if (retval)
  4607. goto out_unlock;
  4608. cpuset_cpus_allowed(p, cpus_allowed);
  4609. cpumask_and(new_mask, in_mask, cpus_allowed);
  4610. again:
  4611. retval = set_cpus_allowed_ptr(p, new_mask);
  4612. if (!retval) {
  4613. cpuset_cpus_allowed(p, cpus_allowed);
  4614. if (!cpumask_subset(new_mask, cpus_allowed)) {
  4615. /*
  4616. * We must have raced with a concurrent cpuset
  4617. * update. Just reset the cpus_allowed to the
  4618. * cpuset's cpus_allowed
  4619. */
  4620. cpumask_copy(new_mask, cpus_allowed);
  4621. goto again;
  4622. }
  4623. }
  4624. out_unlock:
  4625. free_cpumask_var(new_mask);
  4626. out_free_cpus_allowed:
  4627. free_cpumask_var(cpus_allowed);
  4628. out_put_task:
  4629. put_task_struct(p);
  4630. put_online_cpus();
  4631. return retval;
  4632. }
  4633. static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
  4634. struct cpumask *new_mask)
  4635. {
  4636. if (len < cpumask_size())
  4637. cpumask_clear(new_mask);
  4638. else if (len > cpumask_size())
  4639. len = cpumask_size();
  4640. return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
  4641. }
  4642. /**
  4643. * sys_sched_setaffinity - set the cpu affinity of a process
  4644. * @pid: pid of the process
  4645. * @len: length in bytes of the bitmask pointed to by user_mask_ptr
  4646. * @user_mask_ptr: user-space pointer to the new cpu mask
  4647. */
  4648. SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
  4649. unsigned long __user *, user_mask_ptr)
  4650. {
  4651. cpumask_var_t new_mask;
  4652. int retval;
  4653. if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
  4654. return -ENOMEM;
  4655. retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
  4656. if (retval == 0)
  4657. retval = sched_setaffinity(pid, new_mask);
  4658. free_cpumask_var(new_mask);
  4659. return retval;
  4660. }
  4661. long sched_getaffinity(pid_t pid, struct cpumask *mask)
  4662. {
  4663. struct task_struct *p;
  4664. unsigned long flags;
  4665. int retval;
  4666. get_online_cpus();
  4667. rcu_read_lock();
  4668. retval = -ESRCH;
  4669. p = find_process_by_pid(pid);
  4670. if (!p)
  4671. goto out_unlock;
  4672. retval = security_task_getscheduler(p);
  4673. if (retval)
  4674. goto out_unlock;
  4675. raw_spin_lock_irqsave(&p->pi_lock, flags);
  4676. cpumask_and(mask, &p->cpus_allowed, cpu_online_mask);
  4677. raw_spin_unlock_irqrestore(&p->pi_lock, flags);
  4678. out_unlock:
  4679. rcu_read_unlock();
  4680. put_online_cpus();
  4681. return retval;
  4682. }
  4683. /**
  4684. * sys_sched_getaffinity - get the cpu affinity of a process
  4685. * @pid: pid of the process
  4686. * @len: length in bytes of the bitmask pointed to by user_mask_ptr
  4687. * @user_mask_ptr: user-space pointer to hold the current cpu mask
  4688. */
  4689. SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
  4690. unsigned long __user *, user_mask_ptr)
  4691. {
  4692. int ret;
  4693. cpumask_var_t mask;
  4694. if ((len * BITS_PER_BYTE) < nr_cpu_ids)
  4695. return -EINVAL;
  4696. if (len & (sizeof(unsigned long)-1))
  4697. return -EINVAL;
  4698. if (!alloc_cpumask_var(&mask, GFP_KERNEL))
  4699. return -ENOMEM;
  4700. ret = sched_getaffinity(pid, mask);
  4701. if (ret == 0) {
  4702. size_t retlen = min_t(size_t, len, cpumask_size());
  4703. if (copy_to_user(user_mask_ptr, mask, retlen))
  4704. ret = -EFAULT;
  4705. else
  4706. ret = retlen;
  4707. }
  4708. free_cpumask_var(mask);
  4709. return ret;
  4710. }
  4711. /**
  4712. * sys_sched_yield - yield the current processor to other threads.
  4713. *
  4714. * This function yields the current CPU to other tasks. If there are no
  4715. * other threads running on this CPU then this function will return.
  4716. */
  4717. SYSCALL_DEFINE0(sched_yield)
  4718. {
  4719. struct rq *rq = this_rq_lock();
  4720. schedstat_inc(rq, yld_count);
  4721. current->sched_class->yield_task(rq);
  4722. /*
  4723. * Since we are going to call schedule() anyway, there's
  4724. * no need to preempt or enable interrupts:
  4725. */
  4726. __release(rq->lock);
  4727. spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
  4728. do_raw_spin_unlock(&rq->lock);
  4729. preempt_enable_no_resched();
  4730. schedule();
  4731. return 0;
  4732. }
  4733. static inline int should_resched(void)
  4734. {
  4735. return need_resched() && !(preempt_count() & PREEMPT_ACTIVE);
  4736. }
  4737. static void __cond_resched(void)
  4738. {
  4739. add_preempt_count(PREEMPT_ACTIVE);
  4740. schedule();
  4741. sub_preempt_count(PREEMPT_ACTIVE);
  4742. }
  4743. int __sched _cond_resched(void)
  4744. {
  4745. if (should_resched()) {
  4746. __cond_resched();
  4747. return 1;
  4748. }
  4749. return 0;
  4750. }
  4751. EXPORT_SYMBOL(_cond_resched);
  4752. /*
  4753. * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
  4754. * call schedule, and on return reacquire the lock.
  4755. *
  4756. * This works OK both with and without CONFIG_PREEMPT. We do strange low-level
  4757. * operations here to prevent schedule() from being called twice (once via
  4758. * spin_unlock(), once by hand).
  4759. */
  4760. int __cond_resched_lock(spinlock_t *lock)
  4761. {
  4762. int resched = should_resched();
  4763. int ret = 0;
  4764. lockdep_assert_held(lock);
  4765. if (spin_needbreak(lock) || resched) {
  4766. spin_unlock(lock);
  4767. if (resched)
  4768. __cond_resched();
  4769. else
  4770. cpu_relax();
  4771. ret = 1;
  4772. spin_lock(lock);
  4773. }
  4774. return ret;
  4775. }
  4776. EXPORT_SYMBOL(__cond_resched_lock);
  4777. int __sched __cond_resched_softirq(void)
  4778. {
  4779. BUG_ON(!in_softirq());
  4780. if (should_resched()) {
  4781. local_bh_enable();
  4782. __cond_resched();
  4783. local_bh_disable();
  4784. return 1;
  4785. }
  4786. return 0;
  4787. }
  4788. EXPORT_SYMBOL(__cond_resched_softirq);
  4789. /**
  4790. * yield - yield the current processor to other threads.
  4791. *
  4792. * This is a shortcut for kernel-space yielding - it marks the
  4793. * thread runnable and calls sys_sched_yield().
  4794. */
  4795. void __sched yield(void)
  4796. {
  4797. set_current_state(TASK_RUNNING);
  4798. sys_sched_yield();
  4799. }
  4800. EXPORT_SYMBOL(yield);
  4801. /**
  4802. * yield_to - yield the current processor to another thread in
  4803. * your thread group, or accelerate that thread toward the
  4804. * processor it's on.
  4805. * @p: target task
  4806. * @preempt: whether task preemption is allowed or not
  4807. *
  4808. * It's the caller's job to ensure that the target task struct
  4809. * can't go away on us before we can do any checks.
  4810. *
  4811. * Returns true if we indeed boosted the target task.
  4812. */
  4813. bool __sched yield_to(struct task_struct *p, bool preempt)
  4814. {
  4815. struct task_struct *curr = current;
  4816. struct rq *rq, *p_rq;
  4817. unsigned long flags;
  4818. bool yielded = 0;
  4819. local_irq_save(flags);
  4820. rq = this_rq();
  4821. again:
  4822. p_rq = task_rq(p);
  4823. double_rq_lock(rq, p_rq);
  4824. while (task_rq(p) != p_rq) {
  4825. double_rq_unlock(rq, p_rq);
  4826. goto again;
  4827. }
  4828. if (!curr->sched_class->yield_to_task)
  4829. goto out;
  4830. if (curr->sched_class != p->sched_class)
  4831. goto out;
  4832. if (task_running(p_rq, p) || p->state)
  4833. goto out;
  4834. yielded = curr->sched_class->yield_to_task(rq, p, preempt);
  4835. if (yielded) {
  4836. schedstat_inc(rq, yld_count);
  4837. /*
  4838. * Make p's CPU reschedule; pick_next_entity takes care of
  4839. * fairness.
  4840. */
  4841. if (preempt && rq != p_rq)
  4842. resched_task(p_rq->curr);
  4843. }
  4844. out:
  4845. double_rq_unlock(rq, p_rq);
  4846. local_irq_restore(flags);
  4847. if (yielded)
  4848. schedule();
  4849. return yielded;
  4850. }
  4851. EXPORT_SYMBOL_GPL(yield_to);
  4852. /*
  4853. * This task is about to go to sleep on IO. Increment rq->nr_iowait so
  4854. * that process accounting knows that this is a task in IO wait state.
  4855. */
  4856. void __sched io_schedule(void)
  4857. {
  4858. struct rq *rq = raw_rq();
  4859. delayacct_blkio_start();
  4860. atomic_inc(&rq->nr_iowait);
  4861. blk_flush_plug(current);
  4862. current->in_iowait = 1;
  4863. schedule();
  4864. current->in_iowait = 0;
  4865. atomic_dec(&rq->nr_iowait);
  4866. delayacct_blkio_end();
  4867. }
  4868. EXPORT_SYMBOL(io_schedule);
  4869. long __sched io_schedule_timeout(long timeout)
  4870. {
  4871. struct rq *rq = raw_rq();
  4872. long ret;
  4873. delayacct_blkio_start();
  4874. atomic_inc(&rq->nr_iowait);
  4875. blk_flush_plug(current);
  4876. current->in_iowait = 1;
  4877. ret = schedule_timeout(timeout);
  4878. current->in_iowait = 0;
  4879. atomic_dec(&rq->nr_iowait);
  4880. delayacct_blkio_end();
  4881. return ret;
  4882. }
  4883. /**
  4884. * sys_sched_get_priority_max - return maximum RT priority.
  4885. * @policy: scheduling class.
  4886. *
  4887. * this syscall returns the maximum rt_priority that can be used
  4888. * by a given scheduling class.
  4889. */
  4890. SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
  4891. {
  4892. int ret = -EINVAL;
  4893. switch (policy) {
  4894. case SCHED_FIFO:
  4895. case SCHED_RR:
  4896. ret = MAX_USER_RT_PRIO-1;
  4897. break;
  4898. case SCHED_NORMAL:
  4899. case SCHED_BATCH:
  4900. case SCHED_IDLE:
  4901. ret = 0;
  4902. break;
  4903. }
  4904. return ret;
  4905. }
  4906. /**
  4907. * sys_sched_get_priority_min - return minimum RT priority.
  4908. * @policy: scheduling class.
  4909. *
  4910. * this syscall returns the minimum rt_priority that can be used
  4911. * by a given scheduling class.
  4912. */
  4913. SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
  4914. {
  4915. int ret = -EINVAL;
  4916. switch (policy) {
  4917. case SCHED_FIFO:
  4918. case SCHED_RR:
  4919. ret = 1;
  4920. break;
  4921. case SCHED_NORMAL:
  4922. case SCHED_BATCH:
  4923. case SCHED_IDLE:
  4924. ret = 0;
  4925. }
  4926. return ret;
  4927. }
  4928. /**
  4929. * sys_sched_rr_get_interval - return the default timeslice of a process.
  4930. * @pid: pid of the process.
  4931. * @interval: userspace pointer to the timeslice value.
  4932. *
  4933. * this syscall writes the default timeslice value of a given process
  4934. * into the user-space timespec buffer. A value of '0' means infinity.
  4935. */
  4936. SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
  4937. struct timespec __user *, interval)
  4938. {
  4939. struct task_struct *p;
  4940. unsigned int time_slice;
  4941. unsigned long flags;
  4942. struct rq *rq;
  4943. int retval;
  4944. struct timespec t;
  4945. if (pid < 0)
  4946. return -EINVAL;
  4947. retval = -ESRCH;
  4948. rcu_read_lock();
  4949. p = find_process_by_pid(pid);
  4950. if (!p)
  4951. goto out_unlock;
  4952. retval = security_task_getscheduler(p);
  4953. if (retval)
  4954. goto out_unlock;
  4955. rq = task_rq_lock(p, &flags);
  4956. time_slice = p->sched_class->get_rr_interval(rq, p);
  4957. task_rq_unlock(rq, p, &flags);
  4958. rcu_read_unlock();
  4959. jiffies_to_timespec(time_slice, &t);
  4960. retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
  4961. return retval;
  4962. out_unlock:
  4963. rcu_read_unlock();
  4964. return retval;
  4965. }
  4966. static const char stat_nam[] = TASK_STATE_TO_CHAR_STR;
  4967. void sched_show_task(struct task_struct *p)
  4968. {
  4969. unsigned long free = 0;
  4970. unsigned state;
  4971. state = p->state ? __ffs(p->state) + 1 : 0;
  4972. printk(KERN_INFO "%-15.15s %c", p->comm,
  4973. state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
  4974. #if BITS_PER_LONG == 32
  4975. if (state == TASK_RUNNING)
  4976. printk(KERN_CONT " running ");
  4977. else
  4978. printk(KERN_CONT " %08lx ", thread_saved_pc(p));
  4979. #else
  4980. if (state == TASK_RUNNING)
  4981. printk(KERN_CONT " running task ");
  4982. else
  4983. printk(KERN_CONT " %016lx ", thread_saved_pc(p));
  4984. #endif
  4985. #ifdef CONFIG_DEBUG_STACK_USAGE
  4986. free = stack_not_used(p);
  4987. #endif
  4988. printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free,
  4989. task_pid_nr(p), task_pid_nr(p->real_parent),
  4990. (unsigned long)task_thread_info(p)->flags);
  4991. show_stack(p, NULL);
  4992. }
  4993. void show_state_filter(unsigned long state_filter)
  4994. {
  4995. struct task_struct *g, *p;
  4996. #if BITS_PER_LONG == 32
  4997. printk(KERN_INFO
  4998. " task PC stack pid father\n");
  4999. #else
  5000. printk(KERN_INFO
  5001. " task PC stack pid father\n");
  5002. #endif
  5003. read_lock(&tasklist_lock);
  5004. do_each_thread(g, p) {
  5005. /*
  5006. * reset the NMI-timeout, listing all files on a slow
  5007. * console might take a lot of time:
  5008. */
  5009. touch_nmi_watchdog();
  5010. if (!state_filter || (p->state & state_filter))
  5011. sched_show_task(p);
  5012. } while_each_thread(g, p);
  5013. touch_all_softlockup_watchdogs();
  5014. #ifdef CONFIG_SCHED_DEBUG
  5015. sysrq_sched_debug_show();
  5016. #endif
  5017. read_unlock(&tasklist_lock);
  5018. /*
  5019. * Only show locks if all tasks are dumped:
  5020. */
  5021. if (!state_filter)
  5022. debug_show_all_locks();
  5023. }
  5024. void __cpuinit init_idle_bootup_task(struct task_struct *idle)
  5025. {
  5026. idle->sched_class = &idle_sched_class;
  5027. }
  5028. /**
  5029. * init_idle - set up an idle thread for a given CPU
  5030. * @idle: task in question
  5031. * @cpu: cpu the idle task belongs to
  5032. *
  5033. * NOTE: this function does not set the idle thread's NEED_RESCHED
  5034. * flag, to make booting more robust.
  5035. */
  5036. void __cpuinit init_idle(struct task_struct *idle, int cpu)
  5037. {
  5038. struct rq *rq = cpu_rq(cpu);
  5039. unsigned long flags;
  5040. raw_spin_lock_irqsave(&rq->lock, flags);
  5041. __sched_fork(idle);
  5042. idle->state = TASK_RUNNING;
  5043. idle->se.exec_start = sched_clock();
  5044. do_set_cpus_allowed(idle, cpumask_of(cpu));
  5045. /*
  5046. * We're having a chicken and egg problem, even though we are
  5047. * holding rq->lock, the cpu isn't yet set to this cpu so the
  5048. * lockdep check in task_group() will fail.
  5049. *
  5050. * Similar case to sched_fork(). / Alternatively we could
  5051. * use task_rq_lock() here and obtain the other rq->lock.
  5052. *
  5053. * Silence PROVE_RCU
  5054. */
  5055. rcu_read_lock();
  5056. __set_task_cpu(idle, cpu);
  5057. rcu_read_unlock();
  5058. rq->curr = rq->idle = idle;
  5059. #if defined(CONFIG_SMP)
  5060. idle->on_cpu = 1;
  5061. #endif
  5062. raw_spin_unlock_irqrestore(&rq->lock, flags);
  5063. /* Set the preempt count _outside_ the spinlocks! */
  5064. task_thread_info(idle)->preempt_count = 0;
  5065. /*
  5066. * The idle tasks have their own, simple scheduling class:
  5067. */
  5068. idle->sched_class = &idle_sched_class;
  5069. ftrace_graph_init_idle_task(idle, cpu);
  5070. }
  5071. /*
  5072. * In a system that switches off the HZ timer nohz_cpu_mask
  5073. * indicates which cpus entered this state. This is used
  5074. * in the rcu update to wait only for active cpus. For system
  5075. * which do not switch off the HZ timer nohz_cpu_mask should
  5076. * always be CPU_BITS_NONE.
  5077. */
  5078. cpumask_var_t nohz_cpu_mask;
  5079. /*
  5080. * Increase the granularity value when there are more CPUs,
  5081. * because with more CPUs the 'effective latency' as visible
  5082. * to users decreases. But the relationship is not linear,
  5083. * so pick a second-best guess by going with the log2 of the
  5084. * number of CPUs.
  5085. *
  5086. * This idea comes from the SD scheduler of Con Kolivas:
  5087. */
  5088. static int get_update_sysctl_factor(void)
  5089. {
  5090. unsigned int cpus = min_t(int, num_online_cpus(), 8);
  5091. unsigned int factor;
  5092. switch (sysctl_sched_tunable_scaling) {
  5093. case SCHED_TUNABLESCALING_NONE:
  5094. factor = 1;
  5095. break;
  5096. case SCHED_TUNABLESCALING_LINEAR:
  5097. factor = cpus;
  5098. break;
  5099. case SCHED_TUNABLESCALING_LOG:
  5100. default:
  5101. factor = 1 + ilog2(cpus);
  5102. break;
  5103. }
  5104. return factor;
  5105. }
  5106. static void update_sysctl(void)
  5107. {
  5108. unsigned int factor = get_update_sysctl_factor();
  5109. #define SET_SYSCTL(name) \
  5110. (sysctl_##name = (factor) * normalized_sysctl_##name)
  5111. SET_SYSCTL(sched_min_granularity);
  5112. SET_SYSCTL(sched_latency);
  5113. SET_SYSCTL(sched_wakeup_granularity);
  5114. #undef SET_SYSCTL
  5115. }
  5116. static inline void sched_init_granularity(void)
  5117. {
  5118. update_sysctl();
  5119. }
  5120. #ifdef CONFIG_SMP
  5121. void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
  5122. {
  5123. if (p->sched_class && p->sched_class->set_cpus_allowed)
  5124. p->sched_class->set_cpus_allowed(p, new_mask);
  5125. else {
  5126. cpumask_copy(&p->cpus_allowed, new_mask);
  5127. p->rt.nr_cpus_allowed = cpumask_weight(new_mask);
  5128. }
  5129. }
  5130. /*
  5131. * This is how migration works:
  5132. *
  5133. * 1) we invoke migration_cpu_stop() on the target CPU using
  5134. * stop_one_cpu().
  5135. * 2) stopper starts to run (implicitly forcing the migrated thread
  5136. * off the CPU)
  5137. * 3) it checks whether the migrated task is still in the wrong runqueue.
  5138. * 4) if it's in the wrong runqueue then the migration thread removes
  5139. * it and puts it into the right queue.
  5140. * 5) stopper completes and stop_one_cpu() returns and the migration
  5141. * is done.
  5142. */
  5143. /*
  5144. * Change a given task's CPU affinity. Migrate the thread to a
  5145. * proper CPU and schedule it away if the CPU it's executing on
  5146. * is removed from the allowed bitmask.
  5147. *
  5148. * NOTE: the caller must have a valid reference to the task, the
  5149. * task must not exit() & deallocate itself prematurely. The
  5150. * call is not atomic; no spinlocks may be held.
  5151. */
  5152. int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
  5153. {
  5154. unsigned long flags;
  5155. struct rq *rq;
  5156. unsigned int dest_cpu;
  5157. int ret = 0;
  5158. rq = task_rq_lock(p, &flags);
  5159. if (cpumask_equal(&p->cpus_allowed, new_mask))
  5160. goto out;
  5161. if (!cpumask_intersects(new_mask, cpu_active_mask)) {
  5162. ret = -EINVAL;
  5163. goto out;
  5164. }
  5165. if (unlikely((p->flags & PF_THREAD_BOUND) && p != current)) {
  5166. ret = -EINVAL;
  5167. goto out;
  5168. }
  5169. do_set_cpus_allowed(p, new_mask);
  5170. /* Can the task run on the task's current CPU? If so, we're done */
  5171. if (cpumask_test_cpu(task_cpu(p), new_mask))
  5172. goto out;
  5173. dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
  5174. if (p->on_rq) {
  5175. struct migration_arg arg = { p, dest_cpu };
  5176. /* Need help from migration thread: drop lock and wait. */
  5177. task_rq_unlock(rq, p, &flags);
  5178. stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
  5179. tlb_migrate_finish(p->mm);
  5180. return 0;
  5181. }
  5182. out:
  5183. task_rq_unlock(rq, p, &flags);
  5184. return ret;
  5185. }
  5186. EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
  5187. /*
  5188. * Move (not current) task off this cpu, onto dest cpu. We're doing
  5189. * this because either it can't run here any more (set_cpus_allowed()
  5190. * away from this CPU, or CPU going down), or because we're
  5191. * attempting to rebalance this task on exec (sched_exec).
  5192. *
  5193. * So we race with normal scheduler movements, but that's OK, as long
  5194. * as the task is no longer on this CPU.
  5195. *
  5196. * Returns non-zero if task was successfully migrated.
  5197. */
  5198. static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
  5199. {
  5200. struct rq *rq_dest, *rq_src;
  5201. int ret = 0;
  5202. if (unlikely(!cpu_active(dest_cpu)))
  5203. return ret;
  5204. rq_src = cpu_rq(src_cpu);
  5205. rq_dest = cpu_rq(dest_cpu);
  5206. raw_spin_lock(&p->pi_lock);
  5207. double_rq_lock(rq_src, rq_dest);
  5208. /* Already moved. */
  5209. if (task_cpu(p) != src_cpu)
  5210. goto done;
  5211. /* Affinity changed (again). */
  5212. if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
  5213. goto fail;
  5214. /*
  5215. * If we're not on a rq, the next wake-up will ensure we're
  5216. * placed properly.
  5217. */
  5218. if (p->on_rq) {
  5219. deactivate_task(rq_src, p, 0);
  5220. set_task_cpu(p, dest_cpu);
  5221. activate_task(rq_dest, p, 0);
  5222. check_preempt_curr(rq_dest, p, 0);
  5223. }
  5224. done:
  5225. ret = 1;
  5226. fail:
  5227. double_rq_unlock(rq_src, rq_dest);
  5228. raw_spin_unlock(&p->pi_lock);
  5229. return ret;
  5230. }
  5231. /*
  5232. * migration_cpu_stop - this will be executed by a highprio stopper thread
  5233. * and performs thread migration by bumping thread off CPU then
  5234. * 'pushing' onto another runqueue.
  5235. */
  5236. static int migration_cpu_stop(void *data)
  5237. {
  5238. struct migration_arg *arg = data;
  5239. /*
  5240. * The original target cpu might have gone down and we might
  5241. * be on another cpu but it doesn't matter.
  5242. */
  5243. local_irq_disable();
  5244. __migrate_task(arg->task, raw_smp_processor_id(), arg->dest_cpu);
  5245. local_irq_enable();
  5246. return 0;
  5247. }
  5248. #ifdef CONFIG_HOTPLUG_CPU
  5249. /*
  5250. * Ensures that the idle task is using init_mm right before its cpu goes
  5251. * offline.
  5252. */
  5253. void idle_task_exit(void)
  5254. {
  5255. struct mm_struct *mm = current->active_mm;
  5256. BUG_ON(cpu_online(smp_processor_id()));
  5257. if (mm != &init_mm)
  5258. switch_mm(mm, &init_mm, current);
  5259. mmdrop(mm);
  5260. }
  5261. /*
  5262. * While a dead CPU has no uninterruptible tasks queued at this point,
  5263. * it might still have a nonzero ->nr_uninterruptible counter, because
  5264. * for performance reasons the counter is not stricly tracking tasks to
  5265. * their home CPUs. So we just add the counter to another CPU's counter,
  5266. * to keep the global sum constant after CPU-down:
  5267. */
  5268. static void migrate_nr_uninterruptible(struct rq *rq_src)
  5269. {
  5270. struct rq *rq_dest = cpu_rq(cpumask_any(cpu_active_mask));
  5271. rq_dest->nr_uninterruptible += rq_src->nr_uninterruptible;
  5272. rq_src->nr_uninterruptible = 0;
  5273. }
  5274. /*
  5275. * remove the tasks which were accounted by rq from calc_load_tasks.
  5276. */
  5277. static void calc_global_load_remove(struct rq *rq)
  5278. {
  5279. atomic_long_sub(rq->calc_load_active, &calc_load_tasks);
  5280. rq->calc_load_active = 0;
  5281. }
  5282. /*
  5283. * Migrate all tasks from the rq, sleeping tasks will be migrated by
  5284. * try_to_wake_up()->select_task_rq().
  5285. *
  5286. * Called with rq->lock held even though we'er in stop_machine() and
  5287. * there's no concurrency possible, we hold the required locks anyway
  5288. * because of lock validation efforts.
  5289. */
  5290. static void migrate_tasks(unsigned int dead_cpu)
  5291. {
  5292. struct rq *rq = cpu_rq(dead_cpu);
  5293. struct task_struct *next, *stop = rq->stop;
  5294. int dest_cpu;
  5295. /*
  5296. * Fudge the rq selection such that the below task selection loop
  5297. * doesn't get stuck on the currently eligible stop task.
  5298. *
  5299. * We're currently inside stop_machine() and the rq is either stuck
  5300. * in the stop_machine_cpu_stop() loop, or we're executing this code,
  5301. * either way we should never end up calling schedule() until we're
  5302. * done here.
  5303. */
  5304. rq->stop = NULL;
  5305. for ( ; ; ) {
  5306. /*
  5307. * There's this thread running, bail when that's the only
  5308. * remaining thread.
  5309. */
  5310. if (rq->nr_running == 1)
  5311. break;
  5312. next = pick_next_task(rq);
  5313. BUG_ON(!next);
  5314. next->sched_class->put_prev_task(rq, next);
  5315. /* Find suitable destination for @next, with force if needed. */
  5316. dest_cpu = select_fallback_rq(dead_cpu, next);
  5317. raw_spin_unlock(&rq->lock);
  5318. __migrate_task(next, dead_cpu, dest_cpu);
  5319. raw_spin_lock(&rq->lock);
  5320. }
  5321. rq->stop = stop;
  5322. }
  5323. #endif /* CONFIG_HOTPLUG_CPU */
  5324. #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
  5325. static struct ctl_table sd_ctl_dir[] = {
  5326. {
  5327. .procname = "sched_domain",
  5328. .mode = 0555,
  5329. },
  5330. {}
  5331. };
  5332. static struct ctl_table sd_ctl_root[] = {
  5333. {
  5334. .procname = "kernel",
  5335. .mode = 0555,
  5336. .child = sd_ctl_dir,
  5337. },
  5338. {}
  5339. };
  5340. static struct ctl_table *sd_alloc_ctl_entry(int n)
  5341. {
  5342. struct ctl_table *entry =
  5343. kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
  5344. return entry;
  5345. }
  5346. static void sd_free_ctl_entry(struct ctl_table **tablep)
  5347. {
  5348. struct ctl_table *entry;
  5349. /*
  5350. * In the intermediate directories, both the child directory and
  5351. * procname are dynamically allocated and could fail but the mode
  5352. * will always be set. In the lowest directory the names are
  5353. * static strings and all have proc handlers.
  5354. */
  5355. for (entry = *tablep; entry->mode; entry++) {
  5356. if (entry->child)
  5357. sd_free_ctl_entry(&entry->child);
  5358. if (entry->proc_handler == NULL)
  5359. kfree(entry->procname);
  5360. }
  5361. kfree(*tablep);
  5362. *tablep = NULL;
  5363. }
  5364. static void
  5365. set_table_entry(struct ctl_table *entry,
  5366. const char *procname, void *data, int maxlen,
  5367. mode_t mode, proc_handler *proc_handler)
  5368. {
  5369. entry->procname = procname;
  5370. entry->data = data;
  5371. entry->maxlen = maxlen;
  5372. entry->mode = mode;
  5373. entry->proc_handler = proc_handler;
  5374. }
  5375. static struct ctl_table *
  5376. sd_alloc_ctl_domain_table(struct sched_domain *sd)
  5377. {
  5378. struct ctl_table *table = sd_alloc_ctl_entry(13);
  5379. if (table == NULL)
  5380. return NULL;
  5381. set_table_entry(&table[0], "min_interval", &sd->min_interval,
  5382. sizeof(long), 0644, proc_doulongvec_minmax);
  5383. set_table_entry(&table[1], "max_interval", &sd->max_interval,
  5384. sizeof(long), 0644, proc_doulongvec_minmax);
  5385. set_table_entry(&table[2], "busy_idx", &sd->busy_idx,
  5386. sizeof(int), 0644, proc_dointvec_minmax);
  5387. set_table_entry(&table[3], "idle_idx", &sd->idle_idx,
  5388. sizeof(int), 0644, proc_dointvec_minmax);
  5389. set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx,
  5390. sizeof(int), 0644, proc_dointvec_minmax);
  5391. set_table_entry(&table[5], "wake_idx", &sd->wake_idx,
  5392. sizeof(int), 0644, proc_dointvec_minmax);
  5393. set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx,
  5394. sizeof(int), 0644, proc_dointvec_minmax);
  5395. set_table_entry(&table[7], "busy_factor", &sd->busy_factor,
  5396. sizeof(int), 0644, proc_dointvec_minmax);
  5397. set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct,
  5398. sizeof(int), 0644, proc_dointvec_minmax);
  5399. set_table_entry(&table[9], "cache_nice_tries",
  5400. &sd->cache_nice_tries,
  5401. sizeof(int), 0644, proc_dointvec_minmax);
  5402. set_table_entry(&table[10], "flags", &sd->flags,
  5403. sizeof(int), 0644, proc_dointvec_minmax);
  5404. set_table_entry(&table[11], "name", sd->name,
  5405. CORENAME_MAX_SIZE, 0444, proc_dostring);
  5406. /* &table[12] is terminator */
  5407. return table;
  5408. }
  5409. static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
  5410. {
  5411. struct ctl_table *entry, *table;
  5412. struct sched_domain *sd;
  5413. int domain_num = 0, i;
  5414. char buf[32];
  5415. for_each_domain(cpu, sd)
  5416. domain_num++;
  5417. entry = table = sd_alloc_ctl_entry(domain_num + 1);
  5418. if (table == NULL)
  5419. return NULL;
  5420. i = 0;
  5421. for_each_domain(cpu, sd) {
  5422. snprintf(buf, 32, "domain%d", i);
  5423. entry->procname = kstrdup(buf, GFP_KERNEL);
  5424. entry->mode = 0555;
  5425. entry->child = sd_alloc_ctl_domain_table(sd);
  5426. entry++;
  5427. i++;
  5428. }
  5429. return table;
  5430. }
  5431. static struct ctl_table_header *sd_sysctl_header;
  5432. static void register_sched_domain_sysctl(void)
  5433. {
  5434. int i, cpu_num = num_possible_cpus();
  5435. struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
  5436. char buf[32];
  5437. WARN_ON(sd_ctl_dir[0].child);
  5438. sd_ctl_dir[0].child = entry;
  5439. if (entry == NULL)
  5440. return;
  5441. for_each_possible_cpu(i) {
  5442. snprintf(buf, 32, "cpu%d", i);
  5443. entry->procname = kstrdup(buf, GFP_KERNEL);
  5444. entry->mode = 0555;
  5445. entry->child = sd_alloc_ctl_cpu_table(i);
  5446. entry++;
  5447. }
  5448. WARN_ON(sd_sysctl_header);
  5449. sd_sysctl_header = register_sysctl_table(sd_ctl_root);
  5450. }
  5451. /* may be called multiple times per register */
  5452. static void unregister_sched_domain_sysctl(void)
  5453. {
  5454. if (sd_sysctl_header)
  5455. unregister_sysctl_table(sd_sysctl_header);
  5456. sd_sysctl_header = NULL;
  5457. if (sd_ctl_dir[0].child)
  5458. sd_free_ctl_entry(&sd_ctl_dir[0].child);
  5459. }
  5460. #else
  5461. static void register_sched_domain_sysctl(void)
  5462. {
  5463. }
  5464. static void unregister_sched_domain_sysctl(void)
  5465. {
  5466. }
  5467. #endif
  5468. static void set_rq_online(struct rq *rq)
  5469. {
  5470. if (!rq->online) {
  5471. const struct sched_class *class;
  5472. cpumask_set_cpu(rq->cpu, rq->rd->online);
  5473. rq->online = 1;
  5474. for_each_class(class) {
  5475. if (class->rq_online)
  5476. class->rq_online(rq);
  5477. }
  5478. }
  5479. }
  5480. static void set_rq_offline(struct rq *rq)
  5481. {
  5482. if (rq->online) {
  5483. const struct sched_class *class;
  5484. for_each_class(class) {
  5485. if (class->rq_offline)
  5486. class->rq_offline(rq);
  5487. }
  5488. cpumask_clear_cpu(rq->cpu, rq->rd->online);
  5489. rq->online = 0;
  5490. }
  5491. }
  5492. /*
  5493. * migration_call - callback that gets triggered when a CPU is added.
  5494. * Here we can start up the necessary migration thread for the new CPU.
  5495. */
  5496. static int __cpuinit
  5497. migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
  5498. {
  5499. int cpu = (long)hcpu;
  5500. unsigned long flags;
  5501. struct rq *rq = cpu_rq(cpu);
  5502. switch (action & ~CPU_TASKS_FROZEN) {
  5503. case CPU_UP_PREPARE:
  5504. rq->calc_load_update = calc_load_update;
  5505. break;
  5506. case CPU_ONLINE:
  5507. /* Update our root-domain */
  5508. raw_spin_lock_irqsave(&rq->lock, flags);
  5509. if (rq->rd) {
  5510. BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
  5511. set_rq_online(rq);
  5512. }
  5513. raw_spin_unlock_irqrestore(&rq->lock, flags);
  5514. break;
  5515. #ifdef CONFIG_HOTPLUG_CPU
  5516. case CPU_DYING:
  5517. sched_ttwu_pending();
  5518. /* Update our root-domain */
  5519. raw_spin_lock_irqsave(&rq->lock, flags);
  5520. if (rq->rd) {
  5521. BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
  5522. set_rq_offline(rq);
  5523. }
  5524. migrate_tasks(cpu);
  5525. BUG_ON(rq->nr_running != 1); /* the migration thread */
  5526. raw_spin_unlock_irqrestore(&rq->lock, flags);
  5527. migrate_nr_uninterruptible(rq);
  5528. calc_global_load_remove(rq);
  5529. break;
  5530. #endif
  5531. }
  5532. update_max_interval();
  5533. return NOTIFY_OK;
  5534. }
  5535. /*
  5536. * Register at high priority so that task migration (migrate_all_tasks)
  5537. * happens before everything else. This has to be lower priority than
  5538. * the notifier in the perf_event subsystem, though.
  5539. */
  5540. static struct notifier_block __cpuinitdata migration_notifier = {
  5541. .notifier_call = migration_call,
  5542. .priority = CPU_PRI_MIGRATION,
  5543. };
  5544. static int __cpuinit sched_cpu_active(struct notifier_block *nfb,
  5545. unsigned long action, void *hcpu)
  5546. {
  5547. switch (action & ~CPU_TASKS_FROZEN) {
  5548. case CPU_ONLINE:
  5549. case CPU_DOWN_FAILED:
  5550. set_cpu_active((long)hcpu, true);
  5551. return NOTIFY_OK;
  5552. default:
  5553. return NOTIFY_DONE;
  5554. }
  5555. }
  5556. static int __cpuinit sched_cpu_inactive(struct notifier_block *nfb,
  5557. unsigned long action, void *hcpu)
  5558. {
  5559. switch (action & ~CPU_TASKS_FROZEN) {
  5560. case CPU_DOWN_PREPARE:
  5561. set_cpu_active((long)hcpu, false);
  5562. return NOTIFY_OK;
  5563. default:
  5564. return NOTIFY_DONE;
  5565. }
  5566. }
  5567. static int __init migration_init(void)
  5568. {
  5569. void *cpu = (void *)(long)smp_processor_id();
  5570. int err;
  5571. /* Initialize migration for the boot CPU */
  5572. err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu);
  5573. BUG_ON(err == NOTIFY_BAD);
  5574. migration_call(&migration_notifier, CPU_ONLINE, cpu);
  5575. register_cpu_notifier(&migration_notifier);
  5576. /* Register cpu active notifiers */
  5577. cpu_notifier(sched_cpu_active, CPU_PRI_SCHED_ACTIVE);
  5578. cpu_notifier(sched_cpu_inactive, CPU_PRI_SCHED_INACTIVE);
  5579. return 0;
  5580. }
  5581. early_initcall(migration_init);
  5582. #endif
  5583. #ifdef CONFIG_SMP
  5584. static cpumask_var_t sched_domains_tmpmask; /* sched_domains_mutex */
  5585. #ifdef CONFIG_SCHED_DEBUG
  5586. static __read_mostly int sched_domain_debug_enabled;
  5587. static int __init sched_domain_debug_setup(char *str)
  5588. {
  5589. sched_domain_debug_enabled = 1;
  5590. return 0;
  5591. }
  5592. early_param("sched_debug", sched_domain_debug_setup);
  5593. static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
  5594. struct cpumask *groupmask)
  5595. {
  5596. struct sched_group *group = sd->groups;
  5597. char str[256];
  5598. cpulist_scnprintf(str, sizeof(str), sched_domain_span(sd));
  5599. cpumask_clear(groupmask);
  5600. printk(KERN_DEBUG "%*s domain %d: ", level, "", level);
  5601. if (!(sd->flags & SD_LOAD_BALANCE)) {
  5602. printk("does not load-balance\n");
  5603. if (sd->parent)
  5604. printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain"
  5605. " has parent");
  5606. return -1;
  5607. }
  5608. printk(KERN_CONT "span %s level %s\n", str, sd->name);
  5609. if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
  5610. printk(KERN_ERR "ERROR: domain->span does not contain "
  5611. "CPU%d\n", cpu);
  5612. }
  5613. if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) {
  5614. printk(KERN_ERR "ERROR: domain->groups does not contain"
  5615. " CPU%d\n", cpu);
  5616. }
  5617. printk(KERN_DEBUG "%*s groups:", level + 1, "");
  5618. do {
  5619. if (!group) {
  5620. printk("\n");
  5621. printk(KERN_ERR "ERROR: group is NULL\n");
  5622. break;
  5623. }
  5624. if (!group->sgp->power) {
  5625. printk(KERN_CONT "\n");
  5626. printk(KERN_ERR "ERROR: domain->cpu_power not "
  5627. "set\n");
  5628. break;
  5629. }
  5630. if (!cpumask_weight(sched_group_cpus(group))) {
  5631. printk(KERN_CONT "\n");
  5632. printk(KERN_ERR "ERROR: empty group\n");
  5633. break;
  5634. }
  5635. if (cpumask_intersects(groupmask, sched_group_cpus(group))) {
  5636. printk(KERN_CONT "\n");
  5637. printk(KERN_ERR "ERROR: repeated CPUs\n");
  5638. break;
  5639. }
  5640. cpumask_or(groupmask, groupmask, sched_group_cpus(group));
  5641. cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group));
  5642. printk(KERN_CONT " %s", str);
  5643. if (group->sgp->power != SCHED_POWER_SCALE) {
  5644. printk(KERN_CONT " (cpu_power = %d)",
  5645. group->sgp->power);
  5646. }
  5647. group = group->next;
  5648. } while (group != sd->groups);
  5649. printk(KERN_CONT "\n");
  5650. if (!cpumask_equal(sched_domain_span(sd), groupmask))
  5651. printk(KERN_ERR "ERROR: groups don't span domain->span\n");
  5652. if (sd->parent &&
  5653. !cpumask_subset(groupmask, sched_domain_span(sd->parent)))
  5654. printk(KERN_ERR "ERROR: parent span is not a superset "
  5655. "of domain->span\n");
  5656. return 0;
  5657. }
  5658. static void sched_domain_debug(struct sched_domain *sd, int cpu)
  5659. {
  5660. int level = 0;
  5661. if (!sched_domain_debug_enabled)
  5662. return;
  5663. if (!sd) {
  5664. printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
  5665. return;
  5666. }
  5667. printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu);
  5668. for (;;) {
  5669. if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask))
  5670. break;
  5671. level++;
  5672. sd = sd->parent;
  5673. if (!sd)
  5674. break;
  5675. }
  5676. }
  5677. #else /* !CONFIG_SCHED_DEBUG */
  5678. # define sched_domain_debug(sd, cpu) do { } while (0)
  5679. #endif /* CONFIG_SCHED_DEBUG */
  5680. static int sd_degenerate(struct sched_domain *sd)
  5681. {
  5682. if (cpumask_weight(sched_domain_span(sd)) == 1)
  5683. return 1;
  5684. /* Following flags need at least 2 groups */
  5685. if (sd->flags & (SD_LOAD_BALANCE |
  5686. SD_BALANCE_NEWIDLE |
  5687. SD_BALANCE_FORK |
  5688. SD_BALANCE_EXEC |
  5689. SD_SHARE_CPUPOWER |
  5690. SD_SHARE_PKG_RESOURCES)) {
  5691. if (sd->groups != sd->groups->next)
  5692. return 0;
  5693. }
  5694. /* Following flags don't use groups */
  5695. if (sd->flags & (SD_WAKE_AFFINE))
  5696. return 0;
  5697. return 1;
  5698. }
  5699. static int
  5700. sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
  5701. {
  5702. unsigned long cflags = sd->flags, pflags = parent->flags;
  5703. if (sd_degenerate(parent))
  5704. return 1;
  5705. if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent)))
  5706. return 0;
  5707. /* Flags needing groups don't count if only 1 group in parent */
  5708. if (parent->groups == parent->groups->next) {
  5709. pflags &= ~(SD_LOAD_BALANCE |
  5710. SD_BALANCE_NEWIDLE |
  5711. SD_BALANCE_FORK |
  5712. SD_BALANCE_EXEC |
  5713. SD_SHARE_CPUPOWER |
  5714. SD_SHARE_PKG_RESOURCES);
  5715. if (nr_node_ids == 1)
  5716. pflags &= ~SD_SERIALIZE;
  5717. }
  5718. if (~cflags & pflags)
  5719. return 0;
  5720. return 1;
  5721. }
  5722. static void free_rootdomain(struct rcu_head *rcu)
  5723. {
  5724. struct root_domain *rd = container_of(rcu, struct root_domain, rcu);
  5725. cpupri_cleanup(&rd->cpupri);
  5726. free_cpumask_var(rd->rto_mask);
  5727. free_cpumask_var(rd->online);
  5728. free_cpumask_var(rd->span);
  5729. kfree(rd);
  5730. }
  5731. static void rq_attach_root(struct rq *rq, struct root_domain *rd)
  5732. {
  5733. struct root_domain *old_rd = NULL;
  5734. unsigned long flags;
  5735. raw_spin_lock_irqsave(&rq->lock, flags);
  5736. if (rq->rd) {
  5737. old_rd = rq->rd;
  5738. if (cpumask_test_cpu(rq->cpu, old_rd->online))
  5739. set_rq_offline(rq);
  5740. cpumask_clear_cpu(rq->cpu, old_rd->span);
  5741. /*
  5742. * If we dont want to free the old_rt yet then
  5743. * set old_rd to NULL to skip the freeing later
  5744. * in this function:
  5745. */
  5746. if (!atomic_dec_and_test(&old_rd->refcount))
  5747. old_rd = NULL;
  5748. }
  5749. atomic_inc(&rd->refcount);
  5750. rq->rd = rd;
  5751. cpumask_set_cpu(rq->cpu, rd->span);
  5752. if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
  5753. set_rq_online(rq);
  5754. raw_spin_unlock_irqrestore(&rq->lock, flags);
  5755. if (old_rd)
  5756. call_rcu_sched(&old_rd->rcu, free_rootdomain);
  5757. }
  5758. static int init_rootdomain(struct root_domain *rd)
  5759. {
  5760. memset(rd, 0, sizeof(*rd));
  5761. if (!alloc_cpumask_var(&rd->span, GFP_KERNEL))
  5762. goto out;
  5763. if (!alloc_cpumask_var(&rd->online, GFP_KERNEL))
  5764. goto free_span;
  5765. if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
  5766. goto free_online;
  5767. if (cpupri_init(&rd->cpupri) != 0)
  5768. goto free_rto_mask;
  5769. return 0;
  5770. free_rto_mask:
  5771. free_cpumask_var(rd->rto_mask);
  5772. free_online:
  5773. free_cpumask_var(rd->online);
  5774. free_span:
  5775. free_cpumask_var(rd->span);
  5776. out:
  5777. return -ENOMEM;
  5778. }
  5779. static void init_defrootdomain(void)
  5780. {
  5781. init_rootdomain(&def_root_domain);
  5782. atomic_set(&def_root_domain.refcount, 1);
  5783. }
  5784. static struct root_domain *alloc_rootdomain(void)
  5785. {
  5786. struct root_domain *rd;
  5787. rd = kmalloc(sizeof(*rd), GFP_KERNEL);
  5788. if (!rd)
  5789. return NULL;
  5790. if (init_rootdomain(rd) != 0) {
  5791. kfree(rd);
  5792. return NULL;
  5793. }
  5794. return rd;
  5795. }
  5796. static void free_sched_groups(struct sched_group *sg, int free_sgp)
  5797. {
  5798. struct sched_group *tmp, *first;
  5799. if (!sg)
  5800. return;
  5801. first = sg;
  5802. do {
  5803. tmp = sg->next;
  5804. if (free_sgp && atomic_dec_and_test(&sg->sgp->ref))
  5805. kfree(sg->sgp);
  5806. kfree(sg);
  5807. sg = tmp;
  5808. } while (sg != first);
  5809. }
  5810. static void free_sched_domain(struct rcu_head *rcu)
  5811. {
  5812. struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu);
  5813. /*
  5814. * If its an overlapping domain it has private groups, iterate and
  5815. * nuke them all.
  5816. */
  5817. if (sd->flags & SD_OVERLAP) {
  5818. free_sched_groups(sd->groups, 1);
  5819. } else if (atomic_dec_and_test(&sd->groups->ref)) {
  5820. kfree(sd->groups->sgp);
  5821. kfree(sd->groups);
  5822. }
  5823. kfree(sd);
  5824. }
  5825. static void destroy_sched_domain(struct sched_domain *sd, int cpu)
  5826. {
  5827. call_rcu(&sd->rcu, free_sched_domain);
  5828. }
  5829. static void destroy_sched_domains(struct sched_domain *sd, int cpu)
  5830. {
  5831. for (; sd; sd = sd->parent)
  5832. destroy_sched_domain(sd, cpu);
  5833. }
  5834. /*
  5835. * Attach the domain 'sd' to 'cpu' as its base domain. Callers must
  5836. * hold the hotplug lock.
  5837. */
  5838. static void
  5839. cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
  5840. {
  5841. struct rq *rq = cpu_rq(cpu);
  5842. struct sched_domain *tmp;
  5843. /* Remove the sched domains which do not contribute to scheduling. */
  5844. for (tmp = sd; tmp; ) {
  5845. struct sched_domain *parent = tmp->parent;
  5846. if (!parent)
  5847. break;
  5848. if (sd_parent_degenerate(tmp, parent)) {
  5849. tmp->parent = parent->parent;
  5850. if (parent->parent)
  5851. parent->parent->child = tmp;
  5852. destroy_sched_domain(parent, cpu);
  5853. } else
  5854. tmp = tmp->parent;
  5855. }
  5856. if (sd && sd_degenerate(sd)) {
  5857. tmp = sd;
  5858. sd = sd->parent;
  5859. destroy_sched_domain(tmp, cpu);
  5860. if (sd)
  5861. sd->child = NULL;
  5862. }
  5863. sched_domain_debug(sd, cpu);
  5864. rq_attach_root(rq, rd);
  5865. tmp = rq->sd;
  5866. rcu_assign_pointer(rq->sd, sd);
  5867. destroy_sched_domains(tmp, cpu);
  5868. }
  5869. /* cpus with isolated domains */
  5870. static cpumask_var_t cpu_isolated_map;
  5871. /* Setup the mask of cpus configured for isolated domains */
  5872. static int __init isolated_cpu_setup(char *str)
  5873. {
  5874. alloc_bootmem_cpumask_var(&cpu_isolated_map);
  5875. cpulist_parse(str, cpu_isolated_map);
  5876. return 1;
  5877. }
  5878. __setup("isolcpus=", isolated_cpu_setup);
  5879. #define SD_NODES_PER_DOMAIN 16
  5880. #ifdef CONFIG_NUMA
  5881. /**
  5882. * find_next_best_node - find the next node to include in a sched_domain
  5883. * @node: node whose sched_domain we're building
  5884. * @used_nodes: nodes already in the sched_domain
  5885. *
  5886. * Find the next node to include in a given scheduling domain. Simply
  5887. * finds the closest node not already in the @used_nodes map.
  5888. *
  5889. * Should use nodemask_t.
  5890. */
  5891. static int find_next_best_node(int node, nodemask_t *used_nodes)
  5892. {
  5893. int i, n, val, min_val, best_node = -1;
  5894. min_val = INT_MAX;
  5895. for (i = 0; i < nr_node_ids; i++) {
  5896. /* Start at @node */
  5897. n = (node + i) % nr_node_ids;
  5898. if (!nr_cpus_node(n))
  5899. continue;
  5900. /* Skip already used nodes */
  5901. if (node_isset(n, *used_nodes))
  5902. continue;
  5903. /* Simple min distance search */
  5904. val = node_distance(node, n);
  5905. if (val < min_val) {
  5906. min_val = val;
  5907. best_node = n;
  5908. }
  5909. }
  5910. if (best_node != -1)
  5911. node_set(best_node, *used_nodes);
  5912. return best_node;
  5913. }
  5914. /**
  5915. * sched_domain_node_span - get a cpumask for a node's sched_domain
  5916. * @node: node whose cpumask we're constructing
  5917. * @span: resulting cpumask
  5918. *
  5919. * Given a node, construct a good cpumask for its sched_domain to span. It
  5920. * should be one that prevents unnecessary balancing, but also spreads tasks
  5921. * out optimally.
  5922. */
  5923. static void sched_domain_node_span(int node, struct cpumask *span)
  5924. {
  5925. nodemask_t used_nodes;
  5926. int i;
  5927. cpumask_clear(span);
  5928. nodes_clear(used_nodes);
  5929. cpumask_or(span, span, cpumask_of_node(node));
  5930. node_set(node, used_nodes);
  5931. for (i = 1; i < SD_NODES_PER_DOMAIN; i++) {
  5932. int next_node = find_next_best_node(node, &used_nodes);
  5933. if (next_node < 0)
  5934. break;
  5935. cpumask_or(span, span, cpumask_of_node(next_node));
  5936. }
  5937. }
  5938. static const struct cpumask *cpu_node_mask(int cpu)
  5939. {
  5940. lockdep_assert_held(&sched_domains_mutex);
  5941. sched_domain_node_span(cpu_to_node(cpu), sched_domains_tmpmask);
  5942. return sched_domains_tmpmask;
  5943. }
  5944. static const struct cpumask *cpu_allnodes_mask(int cpu)
  5945. {
  5946. return cpu_possible_mask;
  5947. }
  5948. #endif /* CONFIG_NUMA */
  5949. static const struct cpumask *cpu_cpu_mask(int cpu)
  5950. {
  5951. return cpumask_of_node(cpu_to_node(cpu));
  5952. }
  5953. int sched_smt_power_savings = 0, sched_mc_power_savings = 0;
  5954. struct sd_data {
  5955. struct sched_domain **__percpu sd;
  5956. struct sched_group **__percpu sg;
  5957. struct sched_group_power **__percpu sgp;
  5958. };
  5959. struct s_data {
  5960. struct sched_domain ** __percpu sd;
  5961. struct root_domain *rd;
  5962. };
  5963. enum s_alloc {
  5964. sa_rootdomain,
  5965. sa_sd,
  5966. sa_sd_storage,
  5967. sa_none,
  5968. };
  5969. struct sched_domain_topology_level;
  5970. typedef struct sched_domain *(*sched_domain_init_f)(struct sched_domain_topology_level *tl, int cpu);
  5971. typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
  5972. #define SDTL_OVERLAP 0x01
  5973. struct sched_domain_topology_level {
  5974. sched_domain_init_f init;
  5975. sched_domain_mask_f mask;
  5976. int flags;
  5977. struct sd_data data;
  5978. };
  5979. static int
  5980. build_overlap_sched_groups(struct sched_domain *sd, int cpu)
  5981. {
  5982. struct sched_group *first = NULL, *last = NULL, *groups = NULL, *sg;
  5983. const struct cpumask *span = sched_domain_span(sd);
  5984. struct cpumask *covered = sched_domains_tmpmask;
  5985. struct sd_data *sdd = sd->private;
  5986. struct sched_domain *child;
  5987. int i;
  5988. cpumask_clear(covered);
  5989. for_each_cpu(i, span) {
  5990. struct cpumask *sg_span;
  5991. if (cpumask_test_cpu(i, covered))
  5992. continue;
  5993. sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
  5994. GFP_KERNEL, cpu_to_node(i));
  5995. if (!sg)
  5996. goto fail;
  5997. sg_span = sched_group_cpus(sg);
  5998. child = *per_cpu_ptr(sdd->sd, i);
  5999. if (child->child) {
  6000. child = child->child;
  6001. cpumask_copy(sg_span, sched_domain_span(child));
  6002. } else
  6003. cpumask_set_cpu(i, sg_span);
  6004. cpumask_or(covered, covered, sg_span);
  6005. sg->sgp = *per_cpu_ptr(sdd->sgp, cpumask_first(sg_span));
  6006. atomic_inc(&sg->sgp->ref);
  6007. if (cpumask_test_cpu(cpu, sg_span))
  6008. groups = sg;
  6009. if (!first)
  6010. first = sg;
  6011. if (last)
  6012. last->next = sg;
  6013. last = sg;
  6014. last->next = first;
  6015. }
  6016. sd->groups = groups;
  6017. return 0;
  6018. fail:
  6019. free_sched_groups(first, 0);
  6020. return -ENOMEM;
  6021. }
  6022. static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg)
  6023. {
  6024. struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
  6025. struct sched_domain *child = sd->child;
  6026. if (child)
  6027. cpu = cpumask_first(sched_domain_span(child));
  6028. if (sg) {
  6029. *sg = *per_cpu_ptr(sdd->sg, cpu);
  6030. (*sg)->sgp = *per_cpu_ptr(sdd->sgp, cpu);
  6031. atomic_set(&(*sg)->sgp->ref, 1); /* for claim_allocations */
  6032. }
  6033. return cpu;
  6034. }
  6035. /*
  6036. * build_sched_groups will build a circular linked list of the groups
  6037. * covered by the given span, and will set each group's ->cpumask correctly,
  6038. * and ->cpu_power to 0.
  6039. *
  6040. * Assumes the sched_domain tree is fully constructed
  6041. */
  6042. static int
  6043. build_sched_groups(struct sched_domain *sd, int cpu)
  6044. {
  6045. struct sched_group *first = NULL, *last = NULL;
  6046. struct sd_data *sdd = sd->private;
  6047. const struct cpumask *span = sched_domain_span(sd);
  6048. struct cpumask *covered;
  6049. int i;
  6050. get_group(cpu, sdd, &sd->groups);
  6051. atomic_inc(&sd->groups->ref);
  6052. if (cpu != cpumask_first(sched_domain_span(sd)))
  6053. return 0;
  6054. lockdep_assert_held(&sched_domains_mutex);
  6055. covered = sched_domains_tmpmask;
  6056. cpumask_clear(covered);
  6057. for_each_cpu(i, span) {
  6058. struct sched_group *sg;
  6059. int group = get_group(i, sdd, &sg);
  6060. int j;
  6061. if (cpumask_test_cpu(i, covered))
  6062. continue;
  6063. cpumask_clear(sched_group_cpus(sg));
  6064. sg->sgp->power = 0;
  6065. for_each_cpu(j, span) {
  6066. if (get_group(j, sdd, NULL) != group)
  6067. continue;
  6068. cpumask_set_cpu(j, covered);
  6069. cpumask_set_cpu(j, sched_group_cpus(sg));
  6070. }
  6071. if (!first)
  6072. first = sg;
  6073. if (last)
  6074. last->next = sg;
  6075. last = sg;
  6076. }
  6077. last->next = first;
  6078. return 0;
  6079. }
  6080. /*
  6081. * Initialize sched groups cpu_power.
  6082. *
  6083. * cpu_power indicates the capacity of sched group, which is used while
  6084. * distributing the load between different sched groups in a sched domain.
  6085. * Typically cpu_power for all the groups in a sched domain will be same unless
  6086. * there are asymmetries in the topology. If there are asymmetries, group
  6087. * having more cpu_power will pickup more load compared to the group having
  6088. * less cpu_power.
  6089. */
  6090. static void init_sched_groups_power(int cpu, struct sched_domain *sd)
  6091. {
  6092. struct sched_group *sg = sd->groups;
  6093. WARN_ON(!sd || !sg);
  6094. do {
  6095. sg->group_weight = cpumask_weight(sched_group_cpus(sg));
  6096. sg = sg->next;
  6097. } while (sg != sd->groups);
  6098. if (cpu != group_first_cpu(sg))
  6099. return;
  6100. update_group_power(sd, cpu);
  6101. }
  6102. /*
  6103. * Initializers for schedule domains
  6104. * Non-inlined to reduce accumulated stack pressure in build_sched_domains()
  6105. */
  6106. #ifdef CONFIG_SCHED_DEBUG
  6107. # define SD_INIT_NAME(sd, type) sd->name = #type
  6108. #else
  6109. # define SD_INIT_NAME(sd, type) do { } while (0)
  6110. #endif
  6111. #define SD_INIT_FUNC(type) \
  6112. static noinline struct sched_domain * \
  6113. sd_init_##type(struct sched_domain_topology_level *tl, int cpu) \
  6114. { \
  6115. struct sched_domain *sd = *per_cpu_ptr(tl->data.sd, cpu); \
  6116. *sd = SD_##type##_INIT; \
  6117. SD_INIT_NAME(sd, type); \
  6118. sd->private = &tl->data; \
  6119. return sd; \
  6120. }
  6121. SD_INIT_FUNC(CPU)
  6122. #ifdef CONFIG_NUMA
  6123. SD_INIT_FUNC(ALLNODES)
  6124. SD_INIT_FUNC(NODE)
  6125. #endif
  6126. #ifdef CONFIG_SCHED_SMT
  6127. SD_INIT_FUNC(SIBLING)
  6128. #endif
  6129. #ifdef CONFIG_SCHED_MC
  6130. SD_INIT_FUNC(MC)
  6131. #endif
  6132. #ifdef CONFIG_SCHED_BOOK
  6133. SD_INIT_FUNC(BOOK)
  6134. #endif
  6135. static int default_relax_domain_level = -1;
  6136. int sched_domain_level_max;
  6137. static int __init setup_relax_domain_level(char *str)
  6138. {
  6139. unsigned long val;
  6140. val = simple_strtoul(str, NULL, 0);
  6141. if (val < sched_domain_level_max)
  6142. default_relax_domain_level = val;
  6143. return 1;
  6144. }
  6145. __setup("relax_domain_level=", setup_relax_domain_level);
  6146. static void set_domain_attribute(struct sched_domain *sd,
  6147. struct sched_domain_attr *attr)
  6148. {
  6149. int request;
  6150. if (!attr || attr->relax_domain_level < 0) {
  6151. if (default_relax_domain_level < 0)
  6152. return;
  6153. else
  6154. request = default_relax_domain_level;
  6155. } else
  6156. request = attr->relax_domain_level;
  6157. if (request < sd->level) {
  6158. /* turn off idle balance on this domain */
  6159. sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
  6160. } else {
  6161. /* turn on idle balance on this domain */
  6162. sd->flags |= (SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
  6163. }
  6164. }
  6165. static void __sdt_free(const struct cpumask *cpu_map);
  6166. static int __sdt_alloc(const struct cpumask *cpu_map);
  6167. static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
  6168. const struct cpumask *cpu_map)
  6169. {
  6170. switch (what) {
  6171. case sa_rootdomain:
  6172. if (!atomic_read(&d->rd->refcount))
  6173. free_rootdomain(&d->rd->rcu); /* fall through */
  6174. case sa_sd:
  6175. free_percpu(d->sd); /* fall through */
  6176. case sa_sd_storage:
  6177. __sdt_free(cpu_map); /* fall through */
  6178. case sa_none:
  6179. break;
  6180. }
  6181. }
  6182. static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
  6183. const struct cpumask *cpu_map)
  6184. {
  6185. memset(d, 0, sizeof(*d));
  6186. if (__sdt_alloc(cpu_map))
  6187. return sa_sd_storage;
  6188. d->sd = alloc_percpu(struct sched_domain *);
  6189. if (!d->sd)
  6190. return sa_sd_storage;
  6191. d->rd = alloc_rootdomain();
  6192. if (!d->rd)
  6193. return sa_sd;
  6194. return sa_rootdomain;
  6195. }
  6196. /*
  6197. * NULL the sd_data elements we've used to build the sched_domain and
  6198. * sched_group structure so that the subsequent __free_domain_allocs()
  6199. * will not free the data we're using.
  6200. */
  6201. static void claim_allocations(int cpu, struct sched_domain *sd)
  6202. {
  6203. struct sd_data *sdd = sd->private;
  6204. WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd);
  6205. *per_cpu_ptr(sdd->sd, cpu) = NULL;
  6206. if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref))
  6207. *per_cpu_ptr(sdd->sg, cpu) = NULL;
  6208. if (atomic_read(&(*per_cpu_ptr(sdd->sgp, cpu))->ref))
  6209. *per_cpu_ptr(sdd->sgp, cpu) = NULL;
  6210. }
  6211. #ifdef CONFIG_SCHED_SMT
  6212. static const struct cpumask *cpu_smt_mask(int cpu)
  6213. {
  6214. return topology_thread_cpumask(cpu);
  6215. }
  6216. #endif
  6217. /*
  6218. * Topology list, bottom-up.
  6219. */
  6220. static struct sched_domain_topology_level default_topology[] = {
  6221. #ifdef CONFIG_SCHED_SMT
  6222. { sd_init_SIBLING, cpu_smt_mask, },
  6223. #endif
  6224. #ifdef CONFIG_SCHED_MC
  6225. { sd_init_MC, cpu_coregroup_mask, },
  6226. #endif
  6227. #ifdef CONFIG_SCHED_BOOK
  6228. { sd_init_BOOK, cpu_book_mask, },
  6229. #endif
  6230. { sd_init_CPU, cpu_cpu_mask, },
  6231. #ifdef CONFIG_NUMA
  6232. { sd_init_NODE, cpu_node_mask, SDTL_OVERLAP, },
  6233. { sd_init_ALLNODES, cpu_allnodes_mask, },
  6234. #endif
  6235. { NULL, },
  6236. };
  6237. static struct sched_domain_topology_level *sched_domain_topology = default_topology;
  6238. static int __sdt_alloc(const struct cpumask *cpu_map)
  6239. {
  6240. struct sched_domain_topology_level *tl;
  6241. int j;
  6242. for (tl = sched_domain_topology; tl->init; tl++) {
  6243. struct sd_data *sdd = &tl->data;
  6244. sdd->sd = alloc_percpu(struct sched_domain *);
  6245. if (!sdd->sd)
  6246. return -ENOMEM;
  6247. sdd->sg = alloc_percpu(struct sched_group *);
  6248. if (!sdd->sg)
  6249. return -ENOMEM;
  6250. sdd->sgp = alloc_percpu(struct sched_group_power *);
  6251. if (!sdd->sgp)
  6252. return -ENOMEM;
  6253. for_each_cpu(j, cpu_map) {
  6254. struct sched_domain *sd;
  6255. struct sched_group *sg;
  6256. struct sched_group_power *sgp;
  6257. sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(),
  6258. GFP_KERNEL, cpu_to_node(j));
  6259. if (!sd)
  6260. return -ENOMEM;
  6261. *per_cpu_ptr(sdd->sd, j) = sd;
  6262. sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
  6263. GFP_KERNEL, cpu_to_node(j));
  6264. if (!sg)
  6265. return -ENOMEM;
  6266. *per_cpu_ptr(sdd->sg, j) = sg;
  6267. sgp = kzalloc_node(sizeof(struct sched_group_power),
  6268. GFP_KERNEL, cpu_to_node(j));
  6269. if (!sgp)
  6270. return -ENOMEM;
  6271. *per_cpu_ptr(sdd->sgp, j) = sgp;
  6272. }
  6273. }
  6274. return 0;
  6275. }
  6276. static void __sdt_free(const struct cpumask *cpu_map)
  6277. {
  6278. struct sched_domain_topology_level *tl;
  6279. int j;
  6280. for (tl = sched_domain_topology; tl->init; tl++) {
  6281. struct sd_data *sdd = &tl->data;
  6282. for_each_cpu(j, cpu_map) {
  6283. struct sched_domain *sd = *per_cpu_ptr(sdd->sd, j);
  6284. if (sd && (sd->flags & SD_OVERLAP))
  6285. free_sched_groups(sd->groups, 0);
  6286. kfree(*per_cpu_ptr(sdd->sg, j));
  6287. kfree(*per_cpu_ptr(sdd->sgp, j));
  6288. }
  6289. free_percpu(sdd->sd);
  6290. free_percpu(sdd->sg);
  6291. free_percpu(sdd->sgp);
  6292. }
  6293. }
  6294. struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
  6295. struct s_data *d, const struct cpumask *cpu_map,
  6296. struct sched_domain_attr *attr, struct sched_domain *child,
  6297. int cpu)
  6298. {
  6299. struct sched_domain *sd = tl->init(tl, cpu);
  6300. if (!sd)
  6301. return child;
  6302. set_domain_attribute(sd, attr);
  6303. cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu));
  6304. if (child) {
  6305. sd->level = child->level + 1;
  6306. sched_domain_level_max = max(sched_domain_level_max, sd->level);
  6307. child->parent = sd;
  6308. }
  6309. sd->child = child;
  6310. return sd;
  6311. }
  6312. /*
  6313. * Build sched domains for a given set of cpus and attach the sched domains
  6314. * to the individual cpus
  6315. */
  6316. static int build_sched_domains(const struct cpumask *cpu_map,
  6317. struct sched_domain_attr *attr)
  6318. {
  6319. enum s_alloc alloc_state = sa_none;
  6320. struct sched_domain *sd;
  6321. struct s_data d;
  6322. int i, ret = -ENOMEM;
  6323. alloc_state = __visit_domain_allocation_hell(&d, cpu_map);
  6324. if (alloc_state != sa_rootdomain)
  6325. goto error;
  6326. /* Set up domains for cpus specified by the cpu_map. */
  6327. for_each_cpu(i, cpu_map) {
  6328. struct sched_domain_topology_level *tl;
  6329. sd = NULL;
  6330. for (tl = sched_domain_topology; tl->init; tl++) {
  6331. sd = build_sched_domain(tl, &d, cpu_map, attr, sd, i);
  6332. if (tl->flags & SDTL_OVERLAP || sched_feat(FORCE_SD_OVERLAP))
  6333. sd->flags |= SD_OVERLAP;
  6334. if (cpumask_equal(cpu_map, sched_domain_span(sd)))
  6335. break;
  6336. }
  6337. while (sd->child)
  6338. sd = sd->child;
  6339. *per_cpu_ptr(d.sd, i) = sd;
  6340. }
  6341. /* Build the groups for the domains */
  6342. for_each_cpu(i, cpu_map) {
  6343. for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
  6344. sd->span_weight = cpumask_weight(sched_domain_span(sd));
  6345. if (sd->flags & SD_OVERLAP) {
  6346. if (build_overlap_sched_groups(sd, i))
  6347. goto error;
  6348. } else {
  6349. if (build_sched_groups(sd, i))
  6350. goto error;
  6351. }
  6352. }
  6353. }
  6354. /* Calculate CPU power for physical packages and nodes */
  6355. for (i = nr_cpumask_bits-1; i >= 0; i--) {
  6356. if (!cpumask_test_cpu(i, cpu_map))
  6357. continue;
  6358. for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
  6359. claim_allocations(i, sd);
  6360. init_sched_groups_power(i, sd);
  6361. }
  6362. }
  6363. /* Attach the domains */
  6364. rcu_read_lock();
  6365. for_each_cpu(i, cpu_map) {
  6366. sd = *per_cpu_ptr(d.sd, i);
  6367. cpu_attach_domain(sd, d.rd, i);
  6368. }
  6369. rcu_read_unlock();
  6370. ret = 0;
  6371. error:
  6372. __free_domain_allocs(&d, alloc_state, cpu_map);
  6373. return ret;
  6374. }
  6375. static cpumask_var_t *doms_cur; /* current sched domains */
  6376. static int ndoms_cur; /* number of sched domains in 'doms_cur' */
  6377. static struct sched_domain_attr *dattr_cur;
  6378. /* attribues of custom domains in 'doms_cur' */
  6379. /*
  6380. * Special case: If a kmalloc of a doms_cur partition (array of
  6381. * cpumask) fails, then fallback to a single sched domain,
  6382. * as determined by the single cpumask fallback_doms.
  6383. */
  6384. static cpumask_var_t fallback_doms;
  6385. /*
  6386. * arch_update_cpu_topology lets virtualized architectures update the
  6387. * cpu core maps. It is supposed to return 1 if the topology changed
  6388. * or 0 if it stayed the same.
  6389. */
  6390. int __attribute__((weak)) arch_update_cpu_topology(void)
  6391. {
  6392. return 0;
  6393. }
  6394. cpumask_var_t *alloc_sched_domains(unsigned int ndoms)
  6395. {
  6396. int i;
  6397. cpumask_var_t *doms;
  6398. doms = kmalloc(sizeof(*doms) * ndoms, GFP_KERNEL);
  6399. if (!doms)
  6400. return NULL;
  6401. for (i = 0; i < ndoms; i++) {
  6402. if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) {
  6403. free_sched_domains(doms, i);
  6404. return NULL;
  6405. }
  6406. }
  6407. return doms;
  6408. }
  6409. void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms)
  6410. {
  6411. unsigned int i;
  6412. for (i = 0; i < ndoms; i++)
  6413. free_cpumask_var(doms[i]);
  6414. kfree(doms);
  6415. }
  6416. /*
  6417. * Set up scheduler domains and groups. Callers must hold the hotplug lock.
  6418. * For now this just excludes isolated cpus, but could be used to
  6419. * exclude other special cases in the future.
  6420. */
  6421. static int init_sched_domains(const struct cpumask *cpu_map)
  6422. {
  6423. int err;
  6424. arch_update_cpu_topology();
  6425. ndoms_cur = 1;
  6426. doms_cur = alloc_sched_domains(ndoms_cur);
  6427. if (!doms_cur)
  6428. doms_cur = &fallback_doms;
  6429. cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map);
  6430. dattr_cur = NULL;
  6431. err = build_sched_domains(doms_cur[0], NULL);
  6432. register_sched_domain_sysctl();
  6433. return err;
  6434. }
  6435. /*
  6436. * Detach sched domains from a group of cpus specified in cpu_map
  6437. * These cpus will now be attached to the NULL domain
  6438. */
  6439. static void detach_destroy_domains(const struct cpumask *cpu_map)
  6440. {
  6441. int i;
  6442. rcu_read_lock();
  6443. for_each_cpu(i, cpu_map)
  6444. cpu_attach_domain(NULL, &def_root_domain, i);
  6445. rcu_read_unlock();
  6446. }
  6447. /* handle null as "default" */
  6448. static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
  6449. struct sched_domain_attr *new, int idx_new)
  6450. {
  6451. struct sched_domain_attr tmp;
  6452. /* fast path */
  6453. if (!new && !cur)
  6454. return 1;
  6455. tmp = SD_ATTR_INIT;
  6456. return !memcmp(cur ? (cur + idx_cur) : &tmp,
  6457. new ? (new + idx_new) : &tmp,
  6458. sizeof(struct sched_domain_attr));
  6459. }
  6460. /*
  6461. * Partition sched domains as specified by the 'ndoms_new'
  6462. * cpumasks in the array doms_new[] of cpumasks. This compares
  6463. * doms_new[] to the current sched domain partitioning, doms_cur[].
  6464. * It destroys each deleted domain and builds each new domain.
  6465. *
  6466. * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'.
  6467. * The masks don't intersect (don't overlap.) We should setup one
  6468. * sched domain for each mask. CPUs not in any of the cpumasks will
  6469. * not be load balanced. If the same cpumask appears both in the
  6470. * current 'doms_cur' domains and in the new 'doms_new', we can leave
  6471. * it as it is.
  6472. *
  6473. * The passed in 'doms_new' should be allocated using
  6474. * alloc_sched_domains. This routine takes ownership of it and will
  6475. * free_sched_domains it when done with it. If the caller failed the
  6476. * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1,
  6477. * and partition_sched_domains() will fallback to the single partition
  6478. * 'fallback_doms', it also forces the domains to be rebuilt.
  6479. *
  6480. * If doms_new == NULL it will be replaced with cpu_online_mask.
  6481. * ndoms_new == 0 is a special case for destroying existing domains,
  6482. * and it will not create the default domain.
  6483. *
  6484. * Call with hotplug lock held
  6485. */
  6486. void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
  6487. struct sched_domain_attr *dattr_new)
  6488. {
  6489. int i, j, n;
  6490. int new_topology;
  6491. mutex_lock(&sched_domains_mutex);
  6492. /* always unregister in case we don't destroy any domains */
  6493. unregister_sched_domain_sysctl();
  6494. /* Let architecture update cpu core mappings. */
  6495. new_topology = arch_update_cpu_topology();
  6496. n = doms_new ? ndoms_new : 0;
  6497. /* Destroy deleted domains */
  6498. for (i = 0; i < ndoms_cur; i++) {
  6499. for (j = 0; j < n && !new_topology; j++) {
  6500. if (cpumask_equal(doms_cur[i], doms_new[j])
  6501. && dattrs_equal(dattr_cur, i, dattr_new, j))
  6502. goto match1;
  6503. }
  6504. /* no match - a current sched domain not in new doms_new[] */
  6505. detach_destroy_domains(doms_cur[i]);
  6506. match1:
  6507. ;
  6508. }
  6509. if (doms_new == NULL) {
  6510. ndoms_cur = 0;
  6511. doms_new = &fallback_doms;
  6512. cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map);
  6513. WARN_ON_ONCE(dattr_new);
  6514. }
  6515. /* Build new domains */
  6516. for (i = 0; i < ndoms_new; i++) {
  6517. for (j = 0; j < ndoms_cur && !new_topology; j++) {
  6518. if (cpumask_equal(doms_new[i], doms_cur[j])
  6519. && dattrs_equal(dattr_new, i, dattr_cur, j))
  6520. goto match2;
  6521. }
  6522. /* no match - add a new doms_new */
  6523. build_sched_domains(doms_new[i], dattr_new ? dattr_new + i : NULL);
  6524. match2:
  6525. ;
  6526. }
  6527. /* Remember the new sched domains */
  6528. if (doms_cur != &fallback_doms)
  6529. free_sched_domains(doms_cur, ndoms_cur);
  6530. kfree(dattr_cur); /* kfree(NULL) is safe */
  6531. doms_cur = doms_new;
  6532. dattr_cur = dattr_new;
  6533. ndoms_cur = ndoms_new;
  6534. register_sched_domain_sysctl();
  6535. mutex_unlock(&sched_domains_mutex);
  6536. }
  6537. #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
  6538. static void reinit_sched_domains(void)
  6539. {
  6540. get_online_cpus();
  6541. /* Destroy domains first to force the rebuild */
  6542. partition_sched_domains(0, NULL, NULL);
  6543. rebuild_sched_domains();
  6544. put_online_cpus();
  6545. }
  6546. static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt)
  6547. {
  6548. unsigned int level = 0;
  6549. if (sscanf(buf, "%u", &level) != 1)
  6550. return -EINVAL;
  6551. /*
  6552. * level is always be positive so don't check for
  6553. * level < POWERSAVINGS_BALANCE_NONE which is 0
  6554. * What happens on 0 or 1 byte write,
  6555. * need to check for count as well?
  6556. */
  6557. if (level >= MAX_POWERSAVINGS_BALANCE_LEVELS)
  6558. return -EINVAL;
  6559. if (smt)
  6560. sched_smt_power_savings = level;
  6561. else
  6562. sched_mc_power_savings = level;
  6563. reinit_sched_domains();
  6564. return count;
  6565. }
  6566. #ifdef CONFIG_SCHED_MC
  6567. static ssize_t sched_mc_power_savings_show(struct sysdev_class *class,
  6568. struct sysdev_class_attribute *attr,
  6569. char *page)
  6570. {
  6571. return sprintf(page, "%u\n", sched_mc_power_savings);
  6572. }
  6573. static ssize_t sched_mc_power_savings_store(struct sysdev_class *class,
  6574. struct sysdev_class_attribute *attr,
  6575. const char *buf, size_t count)
  6576. {
  6577. return sched_power_savings_store(buf, count, 0);
  6578. }
  6579. static SYSDEV_CLASS_ATTR(sched_mc_power_savings, 0644,
  6580. sched_mc_power_savings_show,
  6581. sched_mc_power_savings_store);
  6582. #endif
  6583. #ifdef CONFIG_SCHED_SMT
  6584. static ssize_t sched_smt_power_savings_show(struct sysdev_class *dev,
  6585. struct sysdev_class_attribute *attr,
  6586. char *page)
  6587. {
  6588. return sprintf(page, "%u\n", sched_smt_power_savings);
  6589. }
  6590. static ssize_t sched_smt_power_savings_store(struct sysdev_class *dev,
  6591. struct sysdev_class_attribute *attr,
  6592. const char *buf, size_t count)
  6593. {
  6594. return sched_power_savings_store(buf, count, 1);
  6595. }
  6596. static SYSDEV_CLASS_ATTR(sched_smt_power_savings, 0644,
  6597. sched_smt_power_savings_show,
  6598. sched_smt_power_savings_store);
  6599. #endif
  6600. int __init sched_create_sysfs_power_savings_entries(struct sysdev_class *cls)
  6601. {
  6602. int err = 0;
  6603. #ifdef CONFIG_SCHED_SMT
  6604. if (smt_capable())
  6605. err = sysfs_create_file(&cls->kset.kobj,
  6606. &attr_sched_smt_power_savings.attr);
  6607. #endif
  6608. #ifdef CONFIG_SCHED_MC
  6609. if (!err && mc_capable())
  6610. err = sysfs_create_file(&cls->kset.kobj,
  6611. &attr_sched_mc_power_savings.attr);
  6612. #endif
  6613. return err;
  6614. }
  6615. #endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
  6616. /*
  6617. * Update cpusets according to cpu_active mask. If cpusets are
  6618. * disabled, cpuset_update_active_cpus() becomes a simple wrapper
  6619. * around partition_sched_domains().
  6620. */
  6621. static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action,
  6622. void *hcpu)
  6623. {
  6624. switch (action & ~CPU_TASKS_FROZEN) {
  6625. case CPU_ONLINE:
  6626. case CPU_DOWN_FAILED:
  6627. cpuset_update_active_cpus();
  6628. return NOTIFY_OK;
  6629. default:
  6630. return NOTIFY_DONE;
  6631. }
  6632. }
  6633. static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action,
  6634. void *hcpu)
  6635. {
  6636. switch (action & ~CPU_TASKS_FROZEN) {
  6637. case CPU_DOWN_PREPARE:
  6638. cpuset_update_active_cpus();
  6639. return NOTIFY_OK;
  6640. default:
  6641. return NOTIFY_DONE;
  6642. }
  6643. }
  6644. static int update_runtime(struct notifier_block *nfb,
  6645. unsigned long action, void *hcpu)
  6646. {
  6647. int cpu = (int)(long)hcpu;
  6648. switch (action) {
  6649. case CPU_DOWN_PREPARE:
  6650. case CPU_DOWN_PREPARE_FROZEN:
  6651. disable_runtime(cpu_rq(cpu));
  6652. return NOTIFY_OK;
  6653. case CPU_DOWN_FAILED:
  6654. case CPU_DOWN_FAILED_FROZEN:
  6655. case CPU_ONLINE:
  6656. case CPU_ONLINE_FROZEN:
  6657. enable_runtime(cpu_rq(cpu));
  6658. return NOTIFY_OK;
  6659. default:
  6660. return NOTIFY_DONE;
  6661. }
  6662. }
  6663. void __init sched_init_smp(void)
  6664. {
  6665. cpumask_var_t non_isolated_cpus;
  6666. alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL);
  6667. alloc_cpumask_var(&fallback_doms, GFP_KERNEL);
  6668. get_online_cpus();
  6669. mutex_lock(&sched_domains_mutex);
  6670. init_sched_domains(cpu_active_mask);
  6671. cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
  6672. if (cpumask_empty(non_isolated_cpus))
  6673. cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);
  6674. mutex_unlock(&sched_domains_mutex);
  6675. put_online_cpus();
  6676. hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE);
  6677. hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE);
  6678. /* RT runtime code needs to handle some hotplug events */
  6679. hotcpu_notifier(update_runtime, 0);
  6680. init_hrtick();
  6681. /* Move init over to a non-isolated CPU */
  6682. if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0)
  6683. BUG();
  6684. sched_init_granularity();
  6685. free_cpumask_var(non_isolated_cpus);
  6686. init_sched_rt_class();
  6687. }
  6688. #else
  6689. void __init sched_init_smp(void)
  6690. {
  6691. sched_init_granularity();
  6692. }
  6693. #endif /* CONFIG_SMP */
  6694. const_debug unsigned int sysctl_timer_migration = 1;
  6695. int in_sched_functions(unsigned long addr)
  6696. {
  6697. return in_lock_functions(addr) ||
  6698. (addr >= (unsigned long)__sched_text_start
  6699. && addr < (unsigned long)__sched_text_end);
  6700. }
  6701. static void init_cfs_rq(struct cfs_rq *cfs_rq)
  6702. {
  6703. cfs_rq->tasks_timeline = RB_ROOT;
  6704. INIT_LIST_HEAD(&cfs_rq->tasks);
  6705. cfs_rq->min_vruntime = (u64)(-(1LL << 20));
  6706. #ifndef CONFIG_64BIT
  6707. cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
  6708. #endif
  6709. }
  6710. static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
  6711. {
  6712. struct rt_prio_array *array;
  6713. int i;
  6714. array = &rt_rq->active;
  6715. for (i = 0; i < MAX_RT_PRIO; i++) {
  6716. INIT_LIST_HEAD(array->queue + i);
  6717. __clear_bit(i, array->bitmap);
  6718. }
  6719. /* delimiter for bitsearch: */
  6720. __set_bit(MAX_RT_PRIO, array->bitmap);
  6721. #if defined CONFIG_SMP
  6722. rt_rq->highest_prio.curr = MAX_RT_PRIO;
  6723. rt_rq->highest_prio.next = MAX_RT_PRIO;
  6724. rt_rq->rt_nr_migratory = 0;
  6725. rt_rq->overloaded = 0;
  6726. plist_head_init(&rt_rq->pushable_tasks);
  6727. #endif
  6728. rt_rq->rt_time = 0;
  6729. rt_rq->rt_throttled = 0;
  6730. rt_rq->rt_runtime = 0;
  6731. raw_spin_lock_init(&rt_rq->rt_runtime_lock);
  6732. }
  6733. #ifdef CONFIG_FAIR_GROUP_SCHED
  6734. static void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
  6735. struct sched_entity *se, int cpu,
  6736. struct sched_entity *parent)
  6737. {
  6738. struct rq *rq = cpu_rq(cpu);
  6739. cfs_rq->tg = tg;
  6740. cfs_rq->rq = rq;
  6741. #ifdef CONFIG_SMP
  6742. /* allow initial update_cfs_load() to truncate */
  6743. cfs_rq->load_stamp = 1;
  6744. #endif
  6745. tg->cfs_rq[cpu] = cfs_rq;
  6746. tg->se[cpu] = se;
  6747. /* se could be NULL for root_task_group */
  6748. if (!se)
  6749. return;
  6750. if (!parent)
  6751. se->cfs_rq = &rq->cfs;
  6752. else
  6753. se->cfs_rq = parent->my_q;
  6754. se->my_q = cfs_rq;
  6755. update_load_set(&se->load, 0);
  6756. se->parent = parent;
  6757. }
  6758. #endif
  6759. #ifdef CONFIG_RT_GROUP_SCHED
  6760. static void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
  6761. struct sched_rt_entity *rt_se, int cpu,
  6762. struct sched_rt_entity *parent)
  6763. {
  6764. struct rq *rq = cpu_rq(cpu);
  6765. rt_rq->highest_prio.curr = MAX_RT_PRIO;
  6766. rt_rq->rt_nr_boosted = 0;
  6767. rt_rq->rq = rq;
  6768. rt_rq->tg = tg;
  6769. tg->rt_rq[cpu] = rt_rq;
  6770. tg->rt_se[cpu] = rt_se;
  6771. if (!rt_se)
  6772. return;
  6773. if (!parent)
  6774. rt_se->rt_rq = &rq->rt;
  6775. else
  6776. rt_se->rt_rq = parent->my_q;
  6777. rt_se->my_q = rt_rq;
  6778. rt_se->parent = parent;
  6779. INIT_LIST_HEAD(&rt_se->run_list);
  6780. }
  6781. #endif
  6782. void __init sched_init(void)
  6783. {
  6784. int i, j;
  6785. unsigned long alloc_size = 0, ptr;
  6786. #ifdef CONFIG_FAIR_GROUP_SCHED
  6787. alloc_size += 2 * nr_cpu_ids * sizeof(void **);
  6788. #endif
  6789. #ifdef CONFIG_RT_GROUP_SCHED
  6790. alloc_size += 2 * nr_cpu_ids * sizeof(void **);
  6791. #endif
  6792. #ifdef CONFIG_CPUMASK_OFFSTACK
  6793. alloc_size += num_possible_cpus() * cpumask_size();
  6794. #endif
  6795. if (alloc_size) {
  6796. ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
  6797. #ifdef CONFIG_FAIR_GROUP_SCHED
  6798. root_task_group.se = (struct sched_entity **)ptr;
  6799. ptr += nr_cpu_ids * sizeof(void **);
  6800. root_task_group.cfs_rq = (struct cfs_rq **)ptr;
  6801. ptr += nr_cpu_ids * sizeof(void **);
  6802. #endif /* CONFIG_FAIR_GROUP_SCHED */
  6803. #ifdef CONFIG_RT_GROUP_SCHED
  6804. root_task_group.rt_se = (struct sched_rt_entity **)ptr;
  6805. ptr += nr_cpu_ids * sizeof(void **);
  6806. root_task_group.rt_rq = (struct rt_rq **)ptr;
  6807. ptr += nr_cpu_ids * sizeof(void **);
  6808. #endif /* CONFIG_RT_GROUP_SCHED */
  6809. #ifdef CONFIG_CPUMASK_OFFSTACK
  6810. for_each_possible_cpu(i) {
  6811. per_cpu(load_balance_tmpmask, i) = (void *)ptr;
  6812. ptr += cpumask_size();
  6813. }
  6814. #endif /* CONFIG_CPUMASK_OFFSTACK */
  6815. }
  6816. #ifdef CONFIG_SMP
  6817. init_defrootdomain();
  6818. #endif
  6819. init_rt_bandwidth(&def_rt_bandwidth,
  6820. global_rt_period(), global_rt_runtime());
  6821. #ifdef CONFIG_RT_GROUP_SCHED
  6822. init_rt_bandwidth(&root_task_group.rt_bandwidth,
  6823. global_rt_period(), global_rt_runtime());
  6824. #endif /* CONFIG_RT_GROUP_SCHED */
  6825. #ifdef CONFIG_CGROUP_SCHED
  6826. list_add(&root_task_group.list, &task_groups);
  6827. INIT_LIST_HEAD(&root_task_group.children);
  6828. autogroup_init(&init_task);
  6829. #endif /* CONFIG_CGROUP_SCHED */
  6830. for_each_possible_cpu(i) {
  6831. struct rq *rq;
  6832. rq = cpu_rq(i);
  6833. raw_spin_lock_init(&rq->lock);
  6834. rq->nr_running = 0;
  6835. rq->calc_load_active = 0;
  6836. rq->calc_load_update = jiffies + LOAD_FREQ;
  6837. init_cfs_rq(&rq->cfs);
  6838. init_rt_rq(&rq->rt, rq);
  6839. #ifdef CONFIG_FAIR_GROUP_SCHED
  6840. root_task_group.shares = root_task_group_load;
  6841. INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
  6842. /*
  6843. * How much cpu bandwidth does root_task_group get?
  6844. *
  6845. * In case of task-groups formed thr' the cgroup filesystem, it
  6846. * gets 100% of the cpu resources in the system. This overall
  6847. * system cpu resource is divided among the tasks of
  6848. * root_task_group and its child task-groups in a fair manner,
  6849. * based on each entity's (task or task-group's) weight
  6850. * (se->load.weight).
  6851. *
  6852. * In other words, if root_task_group has 10 tasks of weight
  6853. * 1024) and two child groups A0 and A1 (of weight 1024 each),
  6854. * then A0's share of the cpu resource is:
  6855. *
  6856. * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
  6857. *
  6858. * We achieve this by letting root_task_group's tasks sit
  6859. * directly in rq->cfs (i.e root_task_group->se[] = NULL).
  6860. */
  6861. init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL);
  6862. #endif /* CONFIG_FAIR_GROUP_SCHED */
  6863. rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
  6864. #ifdef CONFIG_RT_GROUP_SCHED
  6865. INIT_LIST_HEAD(&rq->leaf_rt_rq_list);
  6866. init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
  6867. #endif
  6868. for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
  6869. rq->cpu_load[j] = 0;
  6870. rq->last_load_update_tick = jiffies;
  6871. #ifdef CONFIG_SMP
  6872. rq->sd = NULL;
  6873. rq->rd = NULL;
  6874. rq->cpu_power = SCHED_POWER_SCALE;
  6875. rq->post_schedule = 0;
  6876. rq->active_balance = 0;
  6877. rq->next_balance = jiffies;
  6878. rq->push_cpu = 0;
  6879. rq->cpu = i;
  6880. rq->online = 0;
  6881. rq->idle_stamp = 0;
  6882. rq->avg_idle = 2*sysctl_sched_migration_cost;
  6883. rq_attach_root(rq, &def_root_domain);
  6884. #ifdef CONFIG_NO_HZ
  6885. rq->nohz_balance_kick = 0;
  6886. init_sched_softirq_csd(&per_cpu(remote_sched_softirq_cb, i));
  6887. #endif
  6888. #endif
  6889. init_rq_hrtick(rq);
  6890. atomic_set(&rq->nr_iowait, 0);
  6891. }
  6892. set_load_weight(&init_task);
  6893. #ifdef CONFIG_PREEMPT_NOTIFIERS
  6894. INIT_HLIST_HEAD(&init_task.preempt_notifiers);
  6895. #endif
  6896. #ifdef CONFIG_SMP
  6897. open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
  6898. #endif
  6899. #ifdef CONFIG_RT_MUTEXES
  6900. plist_head_init(&init_task.pi_waiters);
  6901. #endif
  6902. /*
  6903. * The boot idle thread does lazy MMU switching as well:
  6904. */
  6905. atomic_inc(&init_mm.mm_count);
  6906. enter_lazy_tlb(&init_mm, current);
  6907. /*
  6908. * Make us the idle thread. Technically, schedule() should not be
  6909. * called from this thread, however somewhere below it might be,
  6910. * but because we are the idle thread, we just pick up running again
  6911. * when this runqueue becomes "idle".
  6912. */
  6913. init_idle(current, smp_processor_id());
  6914. calc_load_update = jiffies + LOAD_FREQ;
  6915. /*
  6916. * During early bootup we pretend to be a normal task:
  6917. */
  6918. current->sched_class = &fair_sched_class;
  6919. /* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */
  6920. zalloc_cpumask_var(&nohz_cpu_mask, GFP_NOWAIT);
  6921. #ifdef CONFIG_SMP
  6922. zalloc_cpumask_var(&sched_domains_tmpmask, GFP_NOWAIT);
  6923. #ifdef CONFIG_NO_HZ
  6924. zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
  6925. alloc_cpumask_var(&nohz.grp_idle_mask, GFP_NOWAIT);
  6926. atomic_set(&nohz.load_balancer, nr_cpu_ids);
  6927. atomic_set(&nohz.first_pick_cpu, nr_cpu_ids);
  6928. atomic_set(&nohz.second_pick_cpu, nr_cpu_ids);
  6929. #endif
  6930. /* May be allocated at isolcpus cmdline parse time */
  6931. if (cpu_isolated_map == NULL)
  6932. zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
  6933. #endif /* SMP */
  6934. scheduler_running = 1;
  6935. }
  6936. #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
  6937. static inline int preempt_count_equals(int preempt_offset)
  6938. {
  6939. int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth();
  6940. return (nested == preempt_offset);
  6941. }
  6942. void __might_sleep(const char *file, int line, int preempt_offset)
  6943. {
  6944. static unsigned long prev_jiffy; /* ratelimiting */
  6945. if ((preempt_count_equals(preempt_offset) && !irqs_disabled()) ||
  6946. system_state != SYSTEM_RUNNING || oops_in_progress)
  6947. return;
  6948. if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
  6949. return;
  6950. prev_jiffy = jiffies;
  6951. printk(KERN_ERR
  6952. "BUG: sleeping function called from invalid context at %s:%d\n",
  6953. file, line);
  6954. printk(KERN_ERR
  6955. "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
  6956. in_atomic(), irqs_disabled(),
  6957. current->pid, current->comm);
  6958. debug_show_held_locks(current);
  6959. if (irqs_disabled())
  6960. print_irqtrace_events(current);
  6961. dump_stack();
  6962. }
  6963. EXPORT_SYMBOL(__might_sleep);
  6964. #endif
  6965. #ifdef CONFIG_MAGIC_SYSRQ
  6966. static void normalize_task(struct rq *rq, struct task_struct *p)
  6967. {
  6968. const struct sched_class *prev_class = p->sched_class;
  6969. int old_prio = p->prio;
  6970. int on_rq;
  6971. on_rq = p->on_rq;
  6972. if (on_rq)
  6973. deactivate_task(rq, p, 0);
  6974. __setscheduler(rq, p, SCHED_NORMAL, 0);
  6975. if (on_rq) {
  6976. activate_task(rq, p, 0);
  6977. resched_task(rq->curr);
  6978. }
  6979. check_class_changed(rq, p, prev_class, old_prio);
  6980. }
  6981. void normalize_rt_tasks(void)
  6982. {
  6983. struct task_struct *g, *p;
  6984. unsigned long flags;
  6985. struct rq *rq;
  6986. read_lock_irqsave(&tasklist_lock, flags);
  6987. do_each_thread(g, p) {
  6988. /*
  6989. * Only normalize user tasks:
  6990. */
  6991. if (!p->mm)
  6992. continue;
  6993. p->se.exec_start = 0;
  6994. #ifdef CONFIG_SCHEDSTATS
  6995. p->se.statistics.wait_start = 0;
  6996. p->se.statistics.sleep_start = 0;
  6997. p->se.statistics.block_start = 0;
  6998. #endif
  6999. if (!rt_task(p)) {
  7000. /*
  7001. * Renice negative nice level userspace
  7002. * tasks back to 0:
  7003. */
  7004. if (TASK_NICE(p) < 0 && p->mm)
  7005. set_user_nice(p, 0);
  7006. continue;
  7007. }
  7008. raw_spin_lock(&p->pi_lock);
  7009. rq = __task_rq_lock(p);
  7010. normalize_task(rq, p);
  7011. __task_rq_unlock(rq);
  7012. raw_spin_unlock(&p->pi_lock);
  7013. } while_each_thread(g, p);
  7014. read_unlock_irqrestore(&tasklist_lock, flags);
  7015. }
  7016. #endif /* CONFIG_MAGIC_SYSRQ */
  7017. #if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB)
  7018. /*
  7019. * These functions are only useful for the IA64 MCA handling, or kdb.
  7020. *
  7021. * They can only be called when the whole system has been
  7022. * stopped - every CPU needs to be quiescent, and no scheduling
  7023. * activity can take place. Using them for anything else would
  7024. * be a serious bug, and as a result, they aren't even visible
  7025. * under any other configuration.
  7026. */
  7027. /**
  7028. * curr_task - return the current task for a given cpu.
  7029. * @cpu: the processor in question.
  7030. *
  7031. * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
  7032. */
  7033. struct task_struct *curr_task(int cpu)
  7034. {
  7035. return cpu_curr(cpu);
  7036. }
  7037. #endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */
  7038. #ifdef CONFIG_IA64
  7039. /**
  7040. * set_curr_task - set the current task for a given cpu.
  7041. * @cpu: the processor in question.
  7042. * @p: the task pointer to set.
  7043. *
  7044. * Description: This function must only be used when non-maskable interrupts
  7045. * are serviced on a separate stack. It allows the architecture to switch the
  7046. * notion of the current task on a cpu in a non-blocking manner. This function
  7047. * must be called with all CPU's synchronized, and interrupts disabled, the
  7048. * and caller must save the original value of the current task (see
  7049. * curr_task() above) and restore that value before reenabling interrupts and
  7050. * re-starting the system.
  7051. *
  7052. * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
  7053. */
  7054. void set_curr_task(int cpu, struct task_struct *p)
  7055. {
  7056. cpu_curr(cpu) = p;
  7057. }
  7058. #endif
  7059. #ifdef CONFIG_FAIR_GROUP_SCHED
  7060. static void free_fair_sched_group(struct task_group *tg)
  7061. {
  7062. int i;
  7063. for_each_possible_cpu(i) {
  7064. if (tg->cfs_rq)
  7065. kfree(tg->cfs_rq[i]);
  7066. if (tg->se)
  7067. kfree(tg->se[i]);
  7068. }
  7069. kfree(tg->cfs_rq);
  7070. kfree(tg->se);
  7071. }
  7072. static
  7073. int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
  7074. {
  7075. struct cfs_rq *cfs_rq;
  7076. struct sched_entity *se;
  7077. int i;
  7078. tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
  7079. if (!tg->cfs_rq)
  7080. goto err;
  7081. tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL);
  7082. if (!tg->se)
  7083. goto err;
  7084. tg->shares = NICE_0_LOAD;
  7085. for_each_possible_cpu(i) {
  7086. cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
  7087. GFP_KERNEL, cpu_to_node(i));
  7088. if (!cfs_rq)
  7089. goto err;
  7090. se = kzalloc_node(sizeof(struct sched_entity),
  7091. GFP_KERNEL, cpu_to_node(i));
  7092. if (!se)
  7093. goto err_free_rq;
  7094. init_cfs_rq(cfs_rq);
  7095. init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
  7096. }
  7097. return 1;
  7098. err_free_rq:
  7099. kfree(cfs_rq);
  7100. err:
  7101. return 0;
  7102. }
  7103. static inline void unregister_fair_sched_group(struct task_group *tg, int cpu)
  7104. {
  7105. struct rq *rq = cpu_rq(cpu);
  7106. unsigned long flags;
  7107. /*
  7108. * Only empty task groups can be destroyed; so we can speculatively
  7109. * check on_list without danger of it being re-added.
  7110. */
  7111. if (!tg->cfs_rq[cpu]->on_list)
  7112. return;
  7113. raw_spin_lock_irqsave(&rq->lock, flags);
  7114. list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
  7115. raw_spin_unlock_irqrestore(&rq->lock, flags);
  7116. }
  7117. #else /* !CONFIG_FAIR_GROUP_SCHED */
  7118. static inline void free_fair_sched_group(struct task_group *tg)
  7119. {
  7120. }
  7121. static inline
  7122. int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
  7123. {
  7124. return 1;
  7125. }
  7126. static inline void unregister_fair_sched_group(struct task_group *tg, int cpu)
  7127. {
  7128. }
  7129. #endif /* CONFIG_FAIR_GROUP_SCHED */
  7130. #ifdef CONFIG_RT_GROUP_SCHED
  7131. static void free_rt_sched_group(struct task_group *tg)
  7132. {
  7133. int i;
  7134. if (tg->rt_se)
  7135. destroy_rt_bandwidth(&tg->rt_bandwidth);
  7136. for_each_possible_cpu(i) {
  7137. if (tg->rt_rq)
  7138. kfree(tg->rt_rq[i]);
  7139. if (tg->rt_se)
  7140. kfree(tg->rt_se[i]);
  7141. }
  7142. kfree(tg->rt_rq);
  7143. kfree(tg->rt_se);
  7144. }
  7145. static
  7146. int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
  7147. {
  7148. struct rt_rq *rt_rq;
  7149. struct sched_rt_entity *rt_se;
  7150. int i;
  7151. tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL);
  7152. if (!tg->rt_rq)
  7153. goto err;
  7154. tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL);
  7155. if (!tg->rt_se)
  7156. goto err;
  7157. init_rt_bandwidth(&tg->rt_bandwidth,
  7158. ktime_to_ns(def_rt_bandwidth.rt_period), 0);
  7159. for_each_possible_cpu(i) {
  7160. rt_rq = kzalloc_node(sizeof(struct rt_rq),
  7161. GFP_KERNEL, cpu_to_node(i));
  7162. if (!rt_rq)
  7163. goto err;
  7164. rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
  7165. GFP_KERNEL, cpu_to_node(i));
  7166. if (!rt_se)
  7167. goto err_free_rq;
  7168. init_rt_rq(rt_rq, cpu_rq(i));
  7169. rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
  7170. init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
  7171. }
  7172. return 1;
  7173. err_free_rq:
  7174. kfree(rt_rq);
  7175. err:
  7176. return 0;
  7177. }
  7178. #else /* !CONFIG_RT_GROUP_SCHED */
  7179. static inline void free_rt_sched_group(struct task_group *tg)
  7180. {
  7181. }
  7182. static inline
  7183. int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
  7184. {
  7185. return 1;
  7186. }
  7187. #endif /* CONFIG_RT_GROUP_SCHED */
  7188. #ifdef CONFIG_CGROUP_SCHED
  7189. static void free_sched_group(struct task_group *tg)
  7190. {
  7191. free_fair_sched_group(tg);
  7192. free_rt_sched_group(tg);
  7193. autogroup_free(tg);
  7194. kfree(tg);
  7195. }
  7196. /* allocate runqueue etc for a new task group */
  7197. struct task_group *sched_create_group(struct task_group *parent)
  7198. {
  7199. struct task_group *tg;
  7200. unsigned long flags;
  7201. tg = kzalloc(sizeof(*tg), GFP_KERNEL);
  7202. if (!tg)
  7203. return ERR_PTR(-ENOMEM);
  7204. if (!alloc_fair_sched_group(tg, parent))
  7205. goto err;
  7206. if (!alloc_rt_sched_group(tg, parent))
  7207. goto err;
  7208. spin_lock_irqsave(&task_group_lock, flags);
  7209. list_add_rcu(&tg->list, &task_groups);
  7210. WARN_ON(!parent); /* root should already exist */
  7211. tg->parent = parent;
  7212. INIT_LIST_HEAD(&tg->children);
  7213. list_add_rcu(&tg->siblings, &parent->children);
  7214. spin_unlock_irqrestore(&task_group_lock, flags);
  7215. return tg;
  7216. err:
  7217. free_sched_group(tg);
  7218. return ERR_PTR(-ENOMEM);
  7219. }
  7220. /* rcu callback to free various structures associated with a task group */
  7221. static void free_sched_group_rcu(struct rcu_head *rhp)
  7222. {
  7223. /* now it should be safe to free those cfs_rqs */
  7224. free_sched_group(container_of(rhp, struct task_group, rcu));
  7225. }
  7226. /* Destroy runqueue etc associated with a task group */
  7227. void sched_destroy_group(struct task_group *tg)
  7228. {
  7229. unsigned long flags;
  7230. int i;
  7231. /* end participation in shares distribution */
  7232. for_each_possible_cpu(i)
  7233. unregister_fair_sched_group(tg, i);
  7234. spin_lock_irqsave(&task_group_lock, flags);
  7235. list_del_rcu(&tg->list);
  7236. list_del_rcu(&tg->siblings);
  7237. spin_unlock_irqrestore(&task_group_lock, flags);
  7238. /* wait for possible concurrent references to cfs_rqs complete */
  7239. call_rcu(&tg->rcu, free_sched_group_rcu);
  7240. }
  7241. /* change task's runqueue when it moves between groups.
  7242. * The caller of this function should have put the task in its new group
  7243. * by now. This function just updates tsk->se.cfs_rq and tsk->se.parent to
  7244. * reflect its new group.
  7245. */
  7246. void sched_move_task(struct task_struct *tsk)
  7247. {
  7248. int on_rq, running;
  7249. unsigned long flags;
  7250. struct rq *rq;
  7251. rq = task_rq_lock(tsk, &flags);
  7252. running = task_current(rq, tsk);
  7253. on_rq = tsk->on_rq;
  7254. if (on_rq)
  7255. dequeue_task(rq, tsk, 0);
  7256. if (unlikely(running))
  7257. tsk->sched_class->put_prev_task(rq, tsk);
  7258. #ifdef CONFIG_FAIR_GROUP_SCHED
  7259. if (tsk->sched_class->task_move_group)
  7260. tsk->sched_class->task_move_group(tsk, on_rq);
  7261. else
  7262. #endif
  7263. set_task_rq(tsk, task_cpu(tsk));
  7264. if (unlikely(running))
  7265. tsk->sched_class->set_curr_task(rq);
  7266. if (on_rq)
  7267. enqueue_task(rq, tsk, 0);
  7268. task_rq_unlock(rq, tsk, &flags);
  7269. }
  7270. #endif /* CONFIG_CGROUP_SCHED */
  7271. #ifdef CONFIG_FAIR_GROUP_SCHED
  7272. static DEFINE_MUTEX(shares_mutex);
  7273. int sched_group_set_shares(struct task_group *tg, unsigned long shares)
  7274. {
  7275. int i;
  7276. unsigned long flags;
  7277. /*
  7278. * We can't change the weight of the root cgroup.
  7279. */
  7280. if (!tg->se[0])
  7281. return -EINVAL;
  7282. shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));
  7283. mutex_lock(&shares_mutex);
  7284. if (tg->shares == shares)
  7285. goto done;
  7286. tg->shares = shares;
  7287. for_each_possible_cpu(i) {
  7288. struct rq *rq = cpu_rq(i);
  7289. struct sched_entity *se;
  7290. se = tg->se[i];
  7291. /* Propagate contribution to hierarchy */
  7292. raw_spin_lock_irqsave(&rq->lock, flags);
  7293. for_each_sched_entity(se)
  7294. update_cfs_shares(group_cfs_rq(se));
  7295. raw_spin_unlock_irqrestore(&rq->lock, flags);
  7296. }
  7297. done:
  7298. mutex_unlock(&shares_mutex);
  7299. return 0;
  7300. }
  7301. unsigned long sched_group_shares(struct task_group *tg)
  7302. {
  7303. return tg->shares;
  7304. }
  7305. #endif
  7306. #ifdef CONFIG_RT_GROUP_SCHED
  7307. /*
  7308. * Ensure that the real time constraints are schedulable.
  7309. */
  7310. static DEFINE_MUTEX(rt_constraints_mutex);
  7311. static unsigned long to_ratio(u64 period, u64 runtime)
  7312. {
  7313. if (runtime == RUNTIME_INF)
  7314. return 1ULL << 20;
  7315. return div64_u64(runtime << 20, period);
  7316. }
  7317. /* Must be called with tasklist_lock held */
  7318. static inline int tg_has_rt_tasks(struct task_group *tg)
  7319. {
  7320. struct task_struct *g, *p;
  7321. do_each_thread(g, p) {
  7322. if (rt_task(p) && rt_rq_of_se(&p->rt)->tg == tg)
  7323. return 1;
  7324. } while_each_thread(g, p);
  7325. return 0;
  7326. }
  7327. struct rt_schedulable_data {
  7328. struct task_group *tg;
  7329. u64 rt_period;
  7330. u64 rt_runtime;
  7331. };
  7332. static int tg_schedulable(struct task_group *tg, void *data)
  7333. {
  7334. struct rt_schedulable_data *d = data;
  7335. struct task_group *child;
  7336. unsigned long total, sum = 0;
  7337. u64 period, runtime;
  7338. period = ktime_to_ns(tg->rt_bandwidth.rt_period);
  7339. runtime = tg->rt_bandwidth.rt_runtime;
  7340. if (tg == d->tg) {
  7341. period = d->rt_period;
  7342. runtime = d->rt_runtime;
  7343. }
  7344. /*
  7345. * Cannot have more runtime than the period.
  7346. */
  7347. if (runtime > period && runtime != RUNTIME_INF)
  7348. return -EINVAL;
  7349. /*
  7350. * Ensure we don't starve existing RT tasks.
  7351. */
  7352. if (rt_bandwidth_enabled() && !runtime && tg_has_rt_tasks(tg))
  7353. return -EBUSY;
  7354. total = to_ratio(period, runtime);
  7355. /*
  7356. * Nobody can have more than the global setting allows.
  7357. */
  7358. if (total > to_ratio(global_rt_period(), global_rt_runtime()))
  7359. return -EINVAL;
  7360. /*
  7361. * The sum of our children's runtime should not exceed our own.
  7362. */
  7363. list_for_each_entry_rcu(child, &tg->children, siblings) {
  7364. period = ktime_to_ns(child->rt_bandwidth.rt_period);
  7365. runtime = child->rt_bandwidth.rt_runtime;
  7366. if (child == d->tg) {
  7367. period = d->rt_period;
  7368. runtime = d->rt_runtime;
  7369. }
  7370. sum += to_ratio(period, runtime);
  7371. }
  7372. if (sum > total)
  7373. return -EINVAL;
  7374. return 0;
  7375. }
  7376. static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
  7377. {
  7378. struct rt_schedulable_data data = {
  7379. .tg = tg,
  7380. .rt_period = period,
  7381. .rt_runtime = runtime,
  7382. };
  7383. return walk_tg_tree(tg_schedulable, tg_nop, &data);
  7384. }
  7385. static int tg_set_bandwidth(struct task_group *tg,
  7386. u64 rt_period, u64 rt_runtime)
  7387. {
  7388. int i, err = 0;
  7389. mutex_lock(&rt_constraints_mutex);
  7390. read_lock(&tasklist_lock);
  7391. err = __rt_schedulable(tg, rt_period, rt_runtime);
  7392. if (err)
  7393. goto unlock;
  7394. raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
  7395. tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
  7396. tg->rt_bandwidth.rt_runtime = rt_runtime;
  7397. for_each_possible_cpu(i) {
  7398. struct rt_rq *rt_rq = tg->rt_rq[i];
  7399. raw_spin_lock(&rt_rq->rt_runtime_lock);
  7400. rt_rq->rt_runtime = rt_runtime;
  7401. raw_spin_unlock(&rt_rq->rt_runtime_lock);
  7402. }
  7403. raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
  7404. unlock:
  7405. read_unlock(&tasklist_lock);
  7406. mutex_unlock(&rt_constraints_mutex);
  7407. return err;
  7408. }
  7409. int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
  7410. {
  7411. u64 rt_runtime, rt_period;
  7412. rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
  7413. rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
  7414. if (rt_runtime_us < 0)
  7415. rt_runtime = RUNTIME_INF;
  7416. return tg_set_bandwidth(tg, rt_period, rt_runtime);
  7417. }
  7418. long sched_group_rt_runtime(struct task_group *tg)
  7419. {
  7420. u64 rt_runtime_us;
  7421. if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF)
  7422. return -1;
  7423. rt_runtime_us = tg->rt_bandwidth.rt_runtime;
  7424. do_div(rt_runtime_us, NSEC_PER_USEC);
  7425. return rt_runtime_us;
  7426. }
  7427. int sched_group_set_rt_period(struct task_group *tg, long rt_period_us)
  7428. {
  7429. u64 rt_runtime, rt_period;
  7430. rt_period = (u64)rt_period_us * NSEC_PER_USEC;
  7431. rt_runtime = tg->rt_bandwidth.rt_runtime;
  7432. if (rt_period == 0)
  7433. return -EINVAL;
  7434. return tg_set_bandwidth(tg, rt_period, rt_runtime);
  7435. }
  7436. long sched_group_rt_period(struct task_group *tg)
  7437. {
  7438. u64 rt_period_us;
  7439. rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period);
  7440. do_div(rt_period_us, NSEC_PER_USEC);
  7441. return rt_period_us;
  7442. }
  7443. static int sched_rt_global_constraints(void)
  7444. {
  7445. u64 runtime, period;
  7446. int ret = 0;
  7447. if (sysctl_sched_rt_period <= 0)
  7448. return -EINVAL;
  7449. runtime = global_rt_runtime();
  7450. period = global_rt_period();
  7451. /*
  7452. * Sanity check on the sysctl variables.
  7453. */
  7454. if (runtime > period && runtime != RUNTIME_INF)
  7455. return -EINVAL;
  7456. mutex_lock(&rt_constraints_mutex);
  7457. read_lock(&tasklist_lock);
  7458. ret = __rt_schedulable(NULL, 0, 0);
  7459. read_unlock(&tasklist_lock);
  7460. mutex_unlock(&rt_constraints_mutex);
  7461. return ret;
  7462. }
  7463. int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
  7464. {
  7465. /* Don't accept realtime tasks when there is no way for them to run */
  7466. if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
  7467. return 0;
  7468. return 1;
  7469. }
  7470. #else /* !CONFIG_RT_GROUP_SCHED */
  7471. static int sched_rt_global_constraints(void)
  7472. {
  7473. unsigned long flags;
  7474. int i;
  7475. if (sysctl_sched_rt_period <= 0)
  7476. return -EINVAL;
  7477. /*
  7478. * There's always some RT tasks in the root group
  7479. * -- migration, kstopmachine etc..
  7480. */
  7481. if (sysctl_sched_rt_runtime == 0)
  7482. return -EBUSY;
  7483. raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
  7484. for_each_possible_cpu(i) {
  7485. struct rt_rq *rt_rq = &cpu_rq(i)->rt;
  7486. raw_spin_lock(&rt_rq->rt_runtime_lock);
  7487. rt_rq->rt_runtime = global_rt_runtime();
  7488. raw_spin_unlock(&rt_rq->rt_runtime_lock);
  7489. }
  7490. raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
  7491. return 0;
  7492. }
  7493. #endif /* CONFIG_RT_GROUP_SCHED */
  7494. int sched_rt_handler(struct ctl_table *table, int write,
  7495. void __user *buffer, size_t *lenp,
  7496. loff_t *ppos)
  7497. {
  7498. int ret;
  7499. int old_period, old_runtime;
  7500. static DEFINE_MUTEX(mutex);
  7501. mutex_lock(&mutex);
  7502. old_period = sysctl_sched_rt_period;
  7503. old_runtime = sysctl_sched_rt_runtime;
  7504. ret = proc_dointvec(table, write, buffer, lenp, ppos);
  7505. if (!ret && write) {
  7506. ret = sched_rt_global_constraints();
  7507. if (ret) {
  7508. sysctl_sched_rt_period = old_period;
  7509. sysctl_sched_rt_runtime = old_runtime;
  7510. } else {
  7511. def_rt_bandwidth.rt_runtime = global_rt_runtime();
  7512. def_rt_bandwidth.rt_period =
  7513. ns_to_ktime(global_rt_period());
  7514. }
  7515. }
  7516. mutex_unlock(&mutex);
  7517. return ret;
  7518. }
  7519. #ifdef CONFIG_CGROUP_SCHED
  7520. /* return corresponding task_group object of a cgroup */
  7521. static inline struct task_group *cgroup_tg(struct cgroup *cgrp)
  7522. {
  7523. return container_of(cgroup_subsys_state(cgrp, cpu_cgroup_subsys_id),
  7524. struct task_group, css);
  7525. }
  7526. static struct cgroup_subsys_state *
  7527. cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cgrp)
  7528. {
  7529. struct task_group *tg, *parent;
  7530. if (!cgrp->parent) {
  7531. /* This is early initialization for the top cgroup */
  7532. return &root_task_group.css;
  7533. }
  7534. parent = cgroup_tg(cgrp->parent);
  7535. tg = sched_create_group(parent);
  7536. if (IS_ERR(tg))
  7537. return ERR_PTR(-ENOMEM);
  7538. return &tg->css;
  7539. }
  7540. static void
  7541. cpu_cgroup_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
  7542. {
  7543. struct task_group *tg = cgroup_tg(cgrp);
  7544. sched_destroy_group(tg);
  7545. }
  7546. static int
  7547. cpu_cgroup_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
  7548. {
  7549. #ifdef CONFIG_RT_GROUP_SCHED
  7550. if (!sched_rt_can_attach(cgroup_tg(cgrp), tsk))
  7551. return -EINVAL;
  7552. #else
  7553. /* We don't support RT-tasks being in separate groups */
  7554. if (tsk->sched_class != &fair_sched_class)
  7555. return -EINVAL;
  7556. #endif
  7557. return 0;
  7558. }
  7559. static void
  7560. cpu_cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
  7561. {
  7562. sched_move_task(tsk);
  7563. }
  7564. static void
  7565. cpu_cgroup_exit(struct cgroup_subsys *ss, struct cgroup *cgrp,
  7566. struct cgroup *old_cgrp, struct task_struct *task)
  7567. {
  7568. /*
  7569. * cgroup_exit() is called in the copy_process() failure path.
  7570. * Ignore this case since the task hasn't ran yet, this avoids
  7571. * trying to poke a half freed task state from generic code.
  7572. */
  7573. if (!(task->flags & PF_EXITING))
  7574. return;
  7575. sched_move_task(task);
  7576. }
  7577. #ifdef CONFIG_FAIR_GROUP_SCHED
  7578. static int cpu_shares_write_u64(struct cgroup *cgrp, struct cftype *cftype,
  7579. u64 shareval)
  7580. {
  7581. return sched_group_set_shares(cgroup_tg(cgrp), scale_load(shareval));
  7582. }
  7583. static u64 cpu_shares_read_u64(struct cgroup *cgrp, struct cftype *cft)
  7584. {
  7585. struct task_group *tg = cgroup_tg(cgrp);
  7586. return (u64) scale_load_down(tg->shares);
  7587. }
  7588. #endif /* CONFIG_FAIR_GROUP_SCHED */
  7589. #ifdef CONFIG_RT_GROUP_SCHED
  7590. static int cpu_rt_runtime_write(struct cgroup *cgrp, struct cftype *cft,
  7591. s64 val)
  7592. {
  7593. return sched_group_set_rt_runtime(cgroup_tg(cgrp), val);
  7594. }
  7595. static s64 cpu_rt_runtime_read(struct cgroup *cgrp, struct cftype *cft)
  7596. {
  7597. return sched_group_rt_runtime(cgroup_tg(cgrp));
  7598. }
  7599. static int cpu_rt_period_write_uint(struct cgroup *cgrp, struct cftype *cftype,
  7600. u64 rt_period_us)
  7601. {
  7602. return sched_group_set_rt_period(cgroup_tg(cgrp), rt_period_us);
  7603. }
  7604. static u64 cpu_rt_period_read_uint(struct cgroup *cgrp, struct cftype *cft)
  7605. {
  7606. return sched_group_rt_period(cgroup_tg(cgrp));
  7607. }
  7608. #endif /* CONFIG_RT_GROUP_SCHED */
  7609. static struct cftype cpu_files[] = {
  7610. #ifdef CONFIG_FAIR_GROUP_SCHED
  7611. {
  7612. .name = "shares",
  7613. .read_u64 = cpu_shares_read_u64,
  7614. .write_u64 = cpu_shares_write_u64,
  7615. },
  7616. #endif
  7617. #ifdef CONFIG_RT_GROUP_SCHED
  7618. {
  7619. .name = "rt_runtime_us",
  7620. .read_s64 = cpu_rt_runtime_read,
  7621. .write_s64 = cpu_rt_runtime_write,
  7622. },
  7623. {
  7624. .name = "rt_period_us",
  7625. .read_u64 = cpu_rt_period_read_uint,
  7626. .write_u64 = cpu_rt_period_write_uint,
  7627. },
  7628. #endif
  7629. };
  7630. static int cpu_cgroup_populate(struct cgroup_subsys *ss, struct cgroup *cont)
  7631. {
  7632. return cgroup_add_files(cont, ss, cpu_files, ARRAY_SIZE(cpu_files));
  7633. }
  7634. struct cgroup_subsys cpu_cgroup_subsys = {
  7635. .name = "cpu",
  7636. .create = cpu_cgroup_create,
  7637. .destroy = cpu_cgroup_destroy,
  7638. .can_attach_task = cpu_cgroup_can_attach_task,
  7639. .attach_task = cpu_cgroup_attach_task,
  7640. .exit = cpu_cgroup_exit,
  7641. .populate = cpu_cgroup_populate,
  7642. .subsys_id = cpu_cgroup_subsys_id,
  7643. .early_init = 1,
  7644. };
  7645. #endif /* CONFIG_CGROUP_SCHED */
  7646. #ifdef CONFIG_CGROUP_CPUACCT
  7647. /*
  7648. * CPU accounting code for task groups.
  7649. *
  7650. * Based on the work by Paul Menage (menage@google.com) and Balbir Singh
  7651. * (balbir@in.ibm.com).
  7652. */
  7653. /* track cpu usage of a group of tasks and its child groups */
  7654. struct cpuacct {
  7655. struct cgroup_subsys_state css;
  7656. /* cpuusage holds pointer to a u64-type object on every cpu */
  7657. u64 __percpu *cpuusage;
  7658. struct percpu_counter cpustat[CPUACCT_STAT_NSTATS];
  7659. struct cpuacct *parent;
  7660. };
  7661. struct cgroup_subsys cpuacct_subsys;
  7662. /* return cpu accounting group corresponding to this container */
  7663. static inline struct cpuacct *cgroup_ca(struct cgroup *cgrp)
  7664. {
  7665. return container_of(cgroup_subsys_state(cgrp, cpuacct_subsys_id),
  7666. struct cpuacct, css);
  7667. }
  7668. /* return cpu accounting group to which this task belongs */
  7669. static inline struct cpuacct *task_ca(struct task_struct *tsk)
  7670. {
  7671. return container_of(task_subsys_state(tsk, cpuacct_subsys_id),
  7672. struct cpuacct, css);
  7673. }
  7674. /* create a new cpu accounting group */
  7675. static struct cgroup_subsys_state *cpuacct_create(
  7676. struct cgroup_subsys *ss, struct cgroup *cgrp)
  7677. {
  7678. struct cpuacct *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
  7679. int i;
  7680. if (!ca)
  7681. goto out;
  7682. ca->cpuusage = alloc_percpu(u64);
  7683. if (!ca->cpuusage)
  7684. goto out_free_ca;
  7685. for (i = 0; i < CPUACCT_STAT_NSTATS; i++)
  7686. if (percpu_counter_init(&ca->cpustat[i], 0))
  7687. goto out_free_counters;
  7688. if (cgrp->parent)
  7689. ca->parent = cgroup_ca(cgrp->parent);
  7690. return &ca->css;
  7691. out_free_counters:
  7692. while (--i >= 0)
  7693. percpu_counter_destroy(&ca->cpustat[i]);
  7694. free_percpu(ca->cpuusage);
  7695. out_free_ca:
  7696. kfree(ca);
  7697. out:
  7698. return ERR_PTR(-ENOMEM);
  7699. }
  7700. /* destroy an existing cpu accounting group */
  7701. static void
  7702. cpuacct_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
  7703. {
  7704. struct cpuacct *ca = cgroup_ca(cgrp);
  7705. int i;
  7706. for (i = 0; i < CPUACCT_STAT_NSTATS; i++)
  7707. percpu_counter_destroy(&ca->cpustat[i]);
  7708. free_percpu(ca->cpuusage);
  7709. kfree(ca);
  7710. }
  7711. static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu)
  7712. {
  7713. u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
  7714. u64 data;
  7715. #ifndef CONFIG_64BIT
  7716. /*
  7717. * Take rq->lock to make 64-bit read safe on 32-bit platforms.
  7718. */
  7719. raw_spin_lock_irq(&cpu_rq(cpu)->lock);
  7720. data = *cpuusage;
  7721. raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
  7722. #else
  7723. data = *cpuusage;
  7724. #endif
  7725. return data;
  7726. }
  7727. static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val)
  7728. {
  7729. u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
  7730. #ifndef CONFIG_64BIT
  7731. /*
  7732. * Take rq->lock to make 64-bit write safe on 32-bit platforms.
  7733. */
  7734. raw_spin_lock_irq(&cpu_rq(cpu)->lock);
  7735. *cpuusage = val;
  7736. raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
  7737. #else
  7738. *cpuusage = val;
  7739. #endif
  7740. }
  7741. /* return total cpu usage (in nanoseconds) of a group */
  7742. static u64 cpuusage_read(struct cgroup *cgrp, struct cftype *cft)
  7743. {
  7744. struct cpuacct *ca = cgroup_ca(cgrp);
  7745. u64 totalcpuusage = 0;
  7746. int i;
  7747. for_each_present_cpu(i)
  7748. totalcpuusage += cpuacct_cpuusage_read(ca, i);
  7749. return totalcpuusage;
  7750. }
  7751. static int cpuusage_write(struct cgroup *cgrp, struct cftype *cftype,
  7752. u64 reset)
  7753. {
  7754. struct cpuacct *ca = cgroup_ca(cgrp);
  7755. int err = 0;
  7756. int i;
  7757. if (reset) {
  7758. err = -EINVAL;
  7759. goto out;
  7760. }
  7761. for_each_present_cpu(i)
  7762. cpuacct_cpuusage_write(ca, i, 0);
  7763. out:
  7764. return err;
  7765. }
  7766. static int cpuacct_percpu_seq_read(struct cgroup *cgroup, struct cftype *cft,
  7767. struct seq_file *m)
  7768. {
  7769. struct cpuacct *ca = cgroup_ca(cgroup);
  7770. u64 percpu;
  7771. int i;
  7772. for_each_present_cpu(i) {
  7773. percpu = cpuacct_cpuusage_read(ca, i);
  7774. seq_printf(m, "%llu ", (unsigned long long) percpu);
  7775. }
  7776. seq_printf(m, "\n");
  7777. return 0;
  7778. }
  7779. static const char *cpuacct_stat_desc[] = {
  7780. [CPUACCT_STAT_USER] = "user",
  7781. [CPUACCT_STAT_SYSTEM] = "system",
  7782. };
  7783. static int cpuacct_stats_show(struct cgroup *cgrp, struct cftype *cft,
  7784. struct cgroup_map_cb *cb)
  7785. {
  7786. struct cpuacct *ca = cgroup_ca(cgrp);
  7787. int i;
  7788. for (i = 0; i < CPUACCT_STAT_NSTATS; i++) {
  7789. s64 val = percpu_counter_read(&ca->cpustat[i]);
  7790. val = cputime64_to_clock_t(val);
  7791. cb->fill(cb, cpuacct_stat_desc[i], val);
  7792. }
  7793. return 0;
  7794. }
  7795. static struct cftype files[] = {
  7796. {
  7797. .name = "usage",
  7798. .read_u64 = cpuusage_read,
  7799. .write_u64 = cpuusage_write,
  7800. },
  7801. {
  7802. .name = "usage_percpu",
  7803. .read_seq_string = cpuacct_percpu_seq_read,
  7804. },
  7805. {
  7806. .name = "stat",
  7807. .read_map = cpuacct_stats_show,
  7808. },
  7809. };
  7810. static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cgrp)
  7811. {
  7812. return cgroup_add_files(cgrp, ss, files, ARRAY_SIZE(files));
  7813. }
  7814. /*
  7815. * charge this task's execution time to its accounting group.
  7816. *
  7817. * called with rq->lock held.
  7818. */
  7819. static void cpuacct_charge(struct task_struct *tsk, u64 cputime)
  7820. {
  7821. struct cpuacct *ca;
  7822. int cpu;
  7823. if (unlikely(!cpuacct_subsys.active))
  7824. return;
  7825. cpu = task_cpu(tsk);
  7826. rcu_read_lock();
  7827. ca = task_ca(tsk);
  7828. for (; ca; ca = ca->parent) {
  7829. u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
  7830. *cpuusage += cputime;
  7831. }
  7832. rcu_read_unlock();
  7833. }
  7834. /*
  7835. * When CONFIG_VIRT_CPU_ACCOUNTING is enabled one jiffy can be very large
  7836. * in cputime_t units. As a result, cpuacct_update_stats calls
  7837. * percpu_counter_add with values large enough to always overflow the
  7838. * per cpu batch limit causing bad SMP scalability.
  7839. *
  7840. * To fix this we scale percpu_counter_batch by cputime_one_jiffy so we
  7841. * batch the same amount of time with CONFIG_VIRT_CPU_ACCOUNTING disabled
  7842. * and enabled. We cap it at INT_MAX which is the largest allowed batch value.
  7843. */
  7844. #ifdef CONFIG_SMP
  7845. #define CPUACCT_BATCH \
  7846. min_t(long, percpu_counter_batch * cputime_one_jiffy, INT_MAX)
  7847. #else
  7848. #define CPUACCT_BATCH 0
  7849. #endif
  7850. /*
  7851. * Charge the system/user time to the task's accounting group.
  7852. */
  7853. static void cpuacct_update_stats(struct task_struct *tsk,
  7854. enum cpuacct_stat_index idx, cputime_t val)
  7855. {
  7856. struct cpuacct *ca;
  7857. int batch = CPUACCT_BATCH;
  7858. if (unlikely(!cpuacct_subsys.active))
  7859. return;
  7860. rcu_read_lock();
  7861. ca = task_ca(tsk);
  7862. do {
  7863. __percpu_counter_add(&ca->cpustat[idx], val, batch);
  7864. ca = ca->parent;
  7865. } while (ca);
  7866. rcu_read_unlock();
  7867. }
  7868. struct cgroup_subsys cpuacct_subsys = {
  7869. .name = "cpuacct",
  7870. .create = cpuacct_create,
  7871. .destroy = cpuacct_destroy,
  7872. .populate = cpuacct_populate,
  7873. .subsys_id = cpuacct_subsys_id,
  7874. };
  7875. #endif /* CONFIG_CGROUP_CPUACCT */