perf_event.c 141 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275
  1. /*
  2. * Performance events core code:
  3. *
  4. * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
  5. * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
  6. * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
  7. * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
  8. *
  9. * For licensing details see kernel-base/COPYING
  10. */
  11. #include <linux/fs.h>
  12. #include <linux/mm.h>
  13. #include <linux/cpu.h>
  14. #include <linux/smp.h>
  15. #include <linux/file.h>
  16. #include <linux/poll.h>
  17. #include <linux/slab.h>
  18. #include <linux/hash.h>
  19. #include <linux/sysfs.h>
  20. #include <linux/dcache.h>
  21. #include <linux/percpu.h>
  22. #include <linux/ptrace.h>
  23. #include <linux/vmstat.h>
  24. #include <linux/vmalloc.h>
  25. #include <linux/hardirq.h>
  26. #include <linux/rculist.h>
  27. #include <linux/uaccess.h>
  28. #include <linux/syscalls.h>
  29. #include <linux/anon_inodes.h>
  30. #include <linux/kernel_stat.h>
  31. #include <linux/perf_event.h>
  32. #include <linux/ftrace_event.h>
  33. #include <asm/irq_regs.h>
  34. static atomic_t nr_events __read_mostly;
  35. static atomic_t nr_mmap_events __read_mostly;
  36. static atomic_t nr_comm_events __read_mostly;
  37. static atomic_t nr_task_events __read_mostly;
  38. static LIST_HEAD(pmus);
  39. static DEFINE_MUTEX(pmus_lock);
  40. static struct srcu_struct pmus_srcu;
  41. /*
  42. * perf event paranoia level:
  43. * -1 - not paranoid at all
  44. * 0 - disallow raw tracepoint access for unpriv
  45. * 1 - disallow cpu events for unpriv
  46. * 2 - disallow kernel profiling for unpriv
  47. */
  48. int sysctl_perf_event_paranoid __read_mostly = 1;
  49. int sysctl_perf_event_mlock __read_mostly = 512; /* 'free' kb per user */
  50. /*
  51. * max perf event sample rate
  52. */
  53. int sysctl_perf_event_sample_rate __read_mostly = 100000;
  54. static atomic64_t perf_event_id;
  55. void __weak perf_event_print_debug(void) { }
  56. void perf_pmu_disable(struct pmu *pmu)
  57. {
  58. int *count = this_cpu_ptr(pmu->pmu_disable_count);
  59. if (!(*count)++)
  60. pmu->pmu_disable(pmu);
  61. }
  62. void perf_pmu_enable(struct pmu *pmu)
  63. {
  64. int *count = this_cpu_ptr(pmu->pmu_disable_count);
  65. if (!--(*count))
  66. pmu->pmu_enable(pmu);
  67. }
  68. static void perf_pmu_rotate_start(struct pmu *pmu)
  69. {
  70. struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
  71. if (hrtimer_active(&cpuctx->timer))
  72. return;
  73. __hrtimer_start_range_ns(&cpuctx->timer,
  74. ns_to_ktime(cpuctx->timer_interval), 0,
  75. HRTIMER_MODE_REL_PINNED, 0);
  76. }
  77. static void perf_pmu_rotate_stop(struct pmu *pmu)
  78. {
  79. struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
  80. hrtimer_cancel(&cpuctx->timer);
  81. }
  82. static void get_ctx(struct perf_event_context *ctx)
  83. {
  84. WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
  85. }
  86. static void free_ctx(struct rcu_head *head)
  87. {
  88. struct perf_event_context *ctx;
  89. ctx = container_of(head, struct perf_event_context, rcu_head);
  90. kfree(ctx);
  91. }
  92. static void put_ctx(struct perf_event_context *ctx)
  93. {
  94. if (atomic_dec_and_test(&ctx->refcount)) {
  95. if (ctx->parent_ctx)
  96. put_ctx(ctx->parent_ctx);
  97. if (ctx->task)
  98. put_task_struct(ctx->task);
  99. call_rcu(&ctx->rcu_head, free_ctx);
  100. }
  101. }
  102. static void unclone_ctx(struct perf_event_context *ctx)
  103. {
  104. if (ctx->parent_ctx) {
  105. put_ctx(ctx->parent_ctx);
  106. ctx->parent_ctx = NULL;
  107. }
  108. }
  109. /*
  110. * If we inherit events we want to return the parent event id
  111. * to userspace.
  112. */
  113. static u64 primary_event_id(struct perf_event *event)
  114. {
  115. u64 id = event->id;
  116. if (event->parent)
  117. id = event->parent->id;
  118. return id;
  119. }
  120. /*
  121. * Get the perf_event_context for a task and lock it.
  122. * This has to cope with with the fact that until it is locked,
  123. * the context could get moved to another task.
  124. */
  125. static struct perf_event_context *
  126. perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags)
  127. {
  128. struct perf_event_context *ctx;
  129. rcu_read_lock();
  130. retry:
  131. ctx = rcu_dereference(task->perf_event_ctxp[ctxn]);
  132. if (ctx) {
  133. /*
  134. * If this context is a clone of another, it might
  135. * get swapped for another underneath us by
  136. * perf_event_task_sched_out, though the
  137. * rcu_read_lock() protects us from any context
  138. * getting freed. Lock the context and check if it
  139. * got swapped before we could get the lock, and retry
  140. * if so. If we locked the right context, then it
  141. * can't get swapped on us any more.
  142. */
  143. raw_spin_lock_irqsave(&ctx->lock, *flags);
  144. if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) {
  145. raw_spin_unlock_irqrestore(&ctx->lock, *flags);
  146. goto retry;
  147. }
  148. if (!atomic_inc_not_zero(&ctx->refcount)) {
  149. raw_spin_unlock_irqrestore(&ctx->lock, *flags);
  150. ctx = NULL;
  151. }
  152. }
  153. rcu_read_unlock();
  154. return ctx;
  155. }
  156. /*
  157. * Get the context for a task and increment its pin_count so it
  158. * can't get swapped to another task. This also increments its
  159. * reference count so that the context can't get freed.
  160. */
  161. static struct perf_event_context *
  162. perf_pin_task_context(struct task_struct *task, int ctxn)
  163. {
  164. struct perf_event_context *ctx;
  165. unsigned long flags;
  166. ctx = perf_lock_task_context(task, ctxn, &flags);
  167. if (ctx) {
  168. ++ctx->pin_count;
  169. raw_spin_unlock_irqrestore(&ctx->lock, flags);
  170. }
  171. return ctx;
  172. }
  173. static void perf_unpin_context(struct perf_event_context *ctx)
  174. {
  175. unsigned long flags;
  176. raw_spin_lock_irqsave(&ctx->lock, flags);
  177. --ctx->pin_count;
  178. raw_spin_unlock_irqrestore(&ctx->lock, flags);
  179. put_ctx(ctx);
  180. }
  181. static inline u64 perf_clock(void)
  182. {
  183. return local_clock();
  184. }
  185. /*
  186. * Update the record of the current time in a context.
  187. */
  188. static void update_context_time(struct perf_event_context *ctx)
  189. {
  190. u64 now = perf_clock();
  191. ctx->time += now - ctx->timestamp;
  192. ctx->timestamp = now;
  193. }
  194. /*
  195. * Update the total_time_enabled and total_time_running fields for a event.
  196. */
  197. static void update_event_times(struct perf_event *event)
  198. {
  199. struct perf_event_context *ctx = event->ctx;
  200. u64 run_end;
  201. if (event->state < PERF_EVENT_STATE_INACTIVE ||
  202. event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
  203. return;
  204. if (ctx->is_active)
  205. run_end = ctx->time;
  206. else
  207. run_end = event->tstamp_stopped;
  208. event->total_time_enabled = run_end - event->tstamp_enabled;
  209. if (event->state == PERF_EVENT_STATE_INACTIVE)
  210. run_end = event->tstamp_stopped;
  211. else
  212. run_end = ctx->time;
  213. event->total_time_running = run_end - event->tstamp_running;
  214. }
  215. /*
  216. * Update total_time_enabled and total_time_running for all events in a group.
  217. */
  218. static void update_group_times(struct perf_event *leader)
  219. {
  220. struct perf_event *event;
  221. update_event_times(leader);
  222. list_for_each_entry(event, &leader->sibling_list, group_entry)
  223. update_event_times(event);
  224. }
  225. static struct list_head *
  226. ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
  227. {
  228. if (event->attr.pinned)
  229. return &ctx->pinned_groups;
  230. else
  231. return &ctx->flexible_groups;
  232. }
  233. /*
  234. * Add a event from the lists for its context.
  235. * Must be called with ctx->mutex and ctx->lock held.
  236. */
  237. static void
  238. list_add_event(struct perf_event *event, struct perf_event_context *ctx)
  239. {
  240. WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
  241. event->attach_state |= PERF_ATTACH_CONTEXT;
  242. /*
  243. * If we're a stand alone event or group leader, we go to the context
  244. * list, group events are kept attached to the group so that
  245. * perf_group_detach can, at all times, locate all siblings.
  246. */
  247. if (event->group_leader == event) {
  248. struct list_head *list;
  249. if (is_software_event(event))
  250. event->group_flags |= PERF_GROUP_SOFTWARE;
  251. list = ctx_group_list(event, ctx);
  252. list_add_tail(&event->group_entry, list);
  253. }
  254. list_add_rcu(&event->event_entry, &ctx->event_list);
  255. if (!ctx->nr_events)
  256. perf_pmu_rotate_start(ctx->pmu);
  257. ctx->nr_events++;
  258. if (event->attr.inherit_stat)
  259. ctx->nr_stat++;
  260. }
  261. static void perf_group_attach(struct perf_event *event)
  262. {
  263. struct perf_event *group_leader = event->group_leader;
  264. WARN_ON_ONCE(event->attach_state & PERF_ATTACH_GROUP);
  265. event->attach_state |= PERF_ATTACH_GROUP;
  266. if (group_leader == event)
  267. return;
  268. if (group_leader->group_flags & PERF_GROUP_SOFTWARE &&
  269. !is_software_event(event))
  270. group_leader->group_flags &= ~PERF_GROUP_SOFTWARE;
  271. list_add_tail(&event->group_entry, &group_leader->sibling_list);
  272. group_leader->nr_siblings++;
  273. }
  274. /*
  275. * Remove a event from the lists for its context.
  276. * Must be called with ctx->mutex and ctx->lock held.
  277. */
  278. static void
  279. list_del_event(struct perf_event *event, struct perf_event_context *ctx)
  280. {
  281. /*
  282. * We can have double detach due to exit/hot-unplug + close.
  283. */
  284. if (!(event->attach_state & PERF_ATTACH_CONTEXT))
  285. return;
  286. event->attach_state &= ~PERF_ATTACH_CONTEXT;
  287. ctx->nr_events--;
  288. if (event->attr.inherit_stat)
  289. ctx->nr_stat--;
  290. list_del_rcu(&event->event_entry);
  291. if (event->group_leader == event)
  292. list_del_init(&event->group_entry);
  293. update_group_times(event);
  294. /*
  295. * If event was in error state, then keep it
  296. * that way, otherwise bogus counts will be
  297. * returned on read(). The only way to get out
  298. * of error state is by explicit re-enabling
  299. * of the event
  300. */
  301. if (event->state > PERF_EVENT_STATE_OFF)
  302. event->state = PERF_EVENT_STATE_OFF;
  303. }
  304. static void perf_group_detach(struct perf_event *event)
  305. {
  306. struct perf_event *sibling, *tmp;
  307. struct list_head *list = NULL;
  308. /*
  309. * We can have double detach due to exit/hot-unplug + close.
  310. */
  311. if (!(event->attach_state & PERF_ATTACH_GROUP))
  312. return;
  313. event->attach_state &= ~PERF_ATTACH_GROUP;
  314. /*
  315. * If this is a sibling, remove it from its group.
  316. */
  317. if (event->group_leader != event) {
  318. list_del_init(&event->group_entry);
  319. event->group_leader->nr_siblings--;
  320. return;
  321. }
  322. if (!list_empty(&event->group_entry))
  323. list = &event->group_entry;
  324. /*
  325. * If this was a group event with sibling events then
  326. * upgrade the siblings to singleton events by adding them
  327. * to whatever list we are on.
  328. */
  329. list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) {
  330. if (list)
  331. list_move_tail(&sibling->group_entry, list);
  332. sibling->group_leader = sibling;
  333. /* Inherit group flags from the previous leader */
  334. sibling->group_flags = event->group_flags;
  335. }
  336. }
  337. static inline int
  338. event_filter_match(struct perf_event *event)
  339. {
  340. return event->cpu == -1 || event->cpu == smp_processor_id();
  341. }
  342. static void
  343. event_sched_out(struct perf_event *event,
  344. struct perf_cpu_context *cpuctx,
  345. struct perf_event_context *ctx)
  346. {
  347. u64 delta;
  348. /*
  349. * An event which could not be activated because of
  350. * filter mismatch still needs to have its timings
  351. * maintained, otherwise bogus information is return
  352. * via read() for time_enabled, time_running:
  353. */
  354. if (event->state == PERF_EVENT_STATE_INACTIVE
  355. && !event_filter_match(event)) {
  356. delta = ctx->time - event->tstamp_stopped;
  357. event->tstamp_running += delta;
  358. event->tstamp_stopped = ctx->time;
  359. }
  360. if (event->state != PERF_EVENT_STATE_ACTIVE)
  361. return;
  362. event->state = PERF_EVENT_STATE_INACTIVE;
  363. if (event->pending_disable) {
  364. event->pending_disable = 0;
  365. event->state = PERF_EVENT_STATE_OFF;
  366. }
  367. event->tstamp_stopped = ctx->time;
  368. event->pmu->del(event, 0);
  369. event->oncpu = -1;
  370. if (!is_software_event(event))
  371. cpuctx->active_oncpu--;
  372. ctx->nr_active--;
  373. if (event->attr.exclusive || !cpuctx->active_oncpu)
  374. cpuctx->exclusive = 0;
  375. }
  376. static void
  377. group_sched_out(struct perf_event *group_event,
  378. struct perf_cpu_context *cpuctx,
  379. struct perf_event_context *ctx)
  380. {
  381. struct perf_event *event;
  382. int state = group_event->state;
  383. event_sched_out(group_event, cpuctx, ctx);
  384. /*
  385. * Schedule out siblings (if any):
  386. */
  387. list_for_each_entry(event, &group_event->sibling_list, group_entry)
  388. event_sched_out(event, cpuctx, ctx);
  389. if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive)
  390. cpuctx->exclusive = 0;
  391. }
  392. static inline struct perf_cpu_context *
  393. __get_cpu_context(struct perf_event_context *ctx)
  394. {
  395. return this_cpu_ptr(ctx->pmu->pmu_cpu_context);
  396. }
  397. /*
  398. * Cross CPU call to remove a performance event
  399. *
  400. * We disable the event on the hardware level first. After that we
  401. * remove it from the context list.
  402. */
  403. static void __perf_event_remove_from_context(void *info)
  404. {
  405. struct perf_event *event = info;
  406. struct perf_event_context *ctx = event->ctx;
  407. struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
  408. /*
  409. * If this is a task context, we need to check whether it is
  410. * the current task context of this cpu. If not it has been
  411. * scheduled out before the smp call arrived.
  412. */
  413. if (ctx->task && cpuctx->task_ctx != ctx)
  414. return;
  415. raw_spin_lock(&ctx->lock);
  416. event_sched_out(event, cpuctx, ctx);
  417. list_del_event(event, ctx);
  418. raw_spin_unlock(&ctx->lock);
  419. }
  420. /*
  421. * Remove the event from a task's (or a CPU's) list of events.
  422. *
  423. * Must be called with ctx->mutex held.
  424. *
  425. * CPU events are removed with a smp call. For task events we only
  426. * call when the task is on a CPU.
  427. *
  428. * If event->ctx is a cloned context, callers must make sure that
  429. * every task struct that event->ctx->task could possibly point to
  430. * remains valid. This is OK when called from perf_release since
  431. * that only calls us on the top-level context, which can't be a clone.
  432. * When called from perf_event_exit_task, it's OK because the
  433. * context has been detached from its task.
  434. */
  435. static void perf_event_remove_from_context(struct perf_event *event)
  436. {
  437. struct perf_event_context *ctx = event->ctx;
  438. struct task_struct *task = ctx->task;
  439. if (!task) {
  440. /*
  441. * Per cpu events are removed via an smp call and
  442. * the removal is always successful.
  443. */
  444. smp_call_function_single(event->cpu,
  445. __perf_event_remove_from_context,
  446. event, 1);
  447. return;
  448. }
  449. retry:
  450. task_oncpu_function_call(task, __perf_event_remove_from_context,
  451. event);
  452. raw_spin_lock_irq(&ctx->lock);
  453. /*
  454. * If the context is active we need to retry the smp call.
  455. */
  456. if (ctx->nr_active && !list_empty(&event->group_entry)) {
  457. raw_spin_unlock_irq(&ctx->lock);
  458. goto retry;
  459. }
  460. /*
  461. * The lock prevents that this context is scheduled in so we
  462. * can remove the event safely, if the call above did not
  463. * succeed.
  464. */
  465. if (!list_empty(&event->group_entry))
  466. list_del_event(event, ctx);
  467. raw_spin_unlock_irq(&ctx->lock);
  468. }
  469. /*
  470. * Cross CPU call to disable a performance event
  471. */
  472. static void __perf_event_disable(void *info)
  473. {
  474. struct perf_event *event = info;
  475. struct perf_event_context *ctx = event->ctx;
  476. struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
  477. /*
  478. * If this is a per-task event, need to check whether this
  479. * event's task is the current task on this cpu.
  480. */
  481. if (ctx->task && cpuctx->task_ctx != ctx)
  482. return;
  483. raw_spin_lock(&ctx->lock);
  484. /*
  485. * If the event is on, turn it off.
  486. * If it is in error state, leave it in error state.
  487. */
  488. if (event->state >= PERF_EVENT_STATE_INACTIVE) {
  489. update_context_time(ctx);
  490. update_group_times(event);
  491. if (event == event->group_leader)
  492. group_sched_out(event, cpuctx, ctx);
  493. else
  494. event_sched_out(event, cpuctx, ctx);
  495. event->state = PERF_EVENT_STATE_OFF;
  496. }
  497. raw_spin_unlock(&ctx->lock);
  498. }
  499. /*
  500. * Disable a event.
  501. *
  502. * If event->ctx is a cloned context, callers must make sure that
  503. * every task struct that event->ctx->task could possibly point to
  504. * remains valid. This condition is satisifed when called through
  505. * perf_event_for_each_child or perf_event_for_each because they
  506. * hold the top-level event's child_mutex, so any descendant that
  507. * goes to exit will block in sync_child_event.
  508. * When called from perf_pending_event it's OK because event->ctx
  509. * is the current context on this CPU and preemption is disabled,
  510. * hence we can't get into perf_event_task_sched_out for this context.
  511. */
  512. void perf_event_disable(struct perf_event *event)
  513. {
  514. struct perf_event_context *ctx = event->ctx;
  515. struct task_struct *task = ctx->task;
  516. if (!task) {
  517. /*
  518. * Disable the event on the cpu that it's on
  519. */
  520. smp_call_function_single(event->cpu, __perf_event_disable,
  521. event, 1);
  522. return;
  523. }
  524. retry:
  525. task_oncpu_function_call(task, __perf_event_disable, event);
  526. raw_spin_lock_irq(&ctx->lock);
  527. /*
  528. * If the event is still active, we need to retry the cross-call.
  529. */
  530. if (event->state == PERF_EVENT_STATE_ACTIVE) {
  531. raw_spin_unlock_irq(&ctx->lock);
  532. goto retry;
  533. }
  534. /*
  535. * Since we have the lock this context can't be scheduled
  536. * in, so we can change the state safely.
  537. */
  538. if (event->state == PERF_EVENT_STATE_INACTIVE) {
  539. update_group_times(event);
  540. event->state = PERF_EVENT_STATE_OFF;
  541. }
  542. raw_spin_unlock_irq(&ctx->lock);
  543. }
  544. static int
  545. event_sched_in(struct perf_event *event,
  546. struct perf_cpu_context *cpuctx,
  547. struct perf_event_context *ctx)
  548. {
  549. if (event->state <= PERF_EVENT_STATE_OFF)
  550. return 0;
  551. event->state = PERF_EVENT_STATE_ACTIVE;
  552. event->oncpu = smp_processor_id();
  553. /*
  554. * The new state must be visible before we turn it on in the hardware:
  555. */
  556. smp_wmb();
  557. if (event->pmu->add(event, PERF_EF_START)) {
  558. event->state = PERF_EVENT_STATE_INACTIVE;
  559. event->oncpu = -1;
  560. return -EAGAIN;
  561. }
  562. event->tstamp_running += ctx->time - event->tstamp_stopped;
  563. if (!is_software_event(event))
  564. cpuctx->active_oncpu++;
  565. ctx->nr_active++;
  566. if (event->attr.exclusive)
  567. cpuctx->exclusive = 1;
  568. return 0;
  569. }
  570. static int
  571. group_sched_in(struct perf_event *group_event,
  572. struct perf_cpu_context *cpuctx,
  573. struct perf_event_context *ctx)
  574. {
  575. struct perf_event *event, *partial_group = NULL;
  576. struct pmu *pmu = group_event->pmu;
  577. if (group_event->state == PERF_EVENT_STATE_OFF)
  578. return 0;
  579. pmu->start_txn(pmu);
  580. if (event_sched_in(group_event, cpuctx, ctx)) {
  581. pmu->cancel_txn(pmu);
  582. return -EAGAIN;
  583. }
  584. /*
  585. * Schedule in siblings as one group (if any):
  586. */
  587. list_for_each_entry(event, &group_event->sibling_list, group_entry) {
  588. if (event_sched_in(event, cpuctx, ctx)) {
  589. partial_group = event;
  590. goto group_error;
  591. }
  592. }
  593. if (!pmu->commit_txn(pmu))
  594. return 0;
  595. group_error:
  596. /*
  597. * Groups can be scheduled in as one unit only, so undo any
  598. * partial group before returning:
  599. */
  600. list_for_each_entry(event, &group_event->sibling_list, group_entry) {
  601. if (event == partial_group)
  602. break;
  603. event_sched_out(event, cpuctx, ctx);
  604. }
  605. event_sched_out(group_event, cpuctx, ctx);
  606. pmu->cancel_txn(pmu);
  607. return -EAGAIN;
  608. }
  609. /*
  610. * Work out whether we can put this event group on the CPU now.
  611. */
  612. static int group_can_go_on(struct perf_event *event,
  613. struct perf_cpu_context *cpuctx,
  614. int can_add_hw)
  615. {
  616. /*
  617. * Groups consisting entirely of software events can always go on.
  618. */
  619. if (event->group_flags & PERF_GROUP_SOFTWARE)
  620. return 1;
  621. /*
  622. * If an exclusive group is already on, no other hardware
  623. * events can go on.
  624. */
  625. if (cpuctx->exclusive)
  626. return 0;
  627. /*
  628. * If this group is exclusive and there are already
  629. * events on the CPU, it can't go on.
  630. */
  631. if (event->attr.exclusive && cpuctx->active_oncpu)
  632. return 0;
  633. /*
  634. * Otherwise, try to add it if all previous groups were able
  635. * to go on.
  636. */
  637. return can_add_hw;
  638. }
  639. static void add_event_to_ctx(struct perf_event *event,
  640. struct perf_event_context *ctx)
  641. {
  642. list_add_event(event, ctx);
  643. perf_group_attach(event);
  644. event->tstamp_enabled = ctx->time;
  645. event->tstamp_running = ctx->time;
  646. event->tstamp_stopped = ctx->time;
  647. }
  648. /*
  649. * Cross CPU call to install and enable a performance event
  650. *
  651. * Must be called with ctx->mutex held
  652. */
  653. static void __perf_install_in_context(void *info)
  654. {
  655. struct perf_event *event = info;
  656. struct perf_event_context *ctx = event->ctx;
  657. struct perf_event *leader = event->group_leader;
  658. struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
  659. int err;
  660. /*
  661. * If this is a task context, we need to check whether it is
  662. * the current task context of this cpu. If not it has been
  663. * scheduled out before the smp call arrived.
  664. * Or possibly this is the right context but it isn't
  665. * on this cpu because it had no events.
  666. */
  667. if (ctx->task && cpuctx->task_ctx != ctx) {
  668. if (cpuctx->task_ctx || ctx->task != current)
  669. return;
  670. cpuctx->task_ctx = ctx;
  671. }
  672. raw_spin_lock(&ctx->lock);
  673. ctx->is_active = 1;
  674. update_context_time(ctx);
  675. add_event_to_ctx(event, ctx);
  676. if (event->cpu != -1 && event->cpu != smp_processor_id())
  677. goto unlock;
  678. /*
  679. * Don't put the event on if it is disabled or if
  680. * it is in a group and the group isn't on.
  681. */
  682. if (event->state != PERF_EVENT_STATE_INACTIVE ||
  683. (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE))
  684. goto unlock;
  685. /*
  686. * An exclusive event can't go on if there are already active
  687. * hardware events, and no hardware event can go on if there
  688. * is already an exclusive event on.
  689. */
  690. if (!group_can_go_on(event, cpuctx, 1))
  691. err = -EEXIST;
  692. else
  693. err = event_sched_in(event, cpuctx, ctx);
  694. if (err) {
  695. /*
  696. * This event couldn't go on. If it is in a group
  697. * then we have to pull the whole group off.
  698. * If the event group is pinned then put it in error state.
  699. */
  700. if (leader != event)
  701. group_sched_out(leader, cpuctx, ctx);
  702. if (leader->attr.pinned) {
  703. update_group_times(leader);
  704. leader->state = PERF_EVENT_STATE_ERROR;
  705. }
  706. }
  707. unlock:
  708. raw_spin_unlock(&ctx->lock);
  709. }
  710. /*
  711. * Attach a performance event to a context
  712. *
  713. * First we add the event to the list with the hardware enable bit
  714. * in event->hw_config cleared.
  715. *
  716. * If the event is attached to a task which is on a CPU we use a smp
  717. * call to enable it in the task context. The task might have been
  718. * scheduled away, but we check this in the smp call again.
  719. *
  720. * Must be called with ctx->mutex held.
  721. */
  722. static void
  723. perf_install_in_context(struct perf_event_context *ctx,
  724. struct perf_event *event,
  725. int cpu)
  726. {
  727. struct task_struct *task = ctx->task;
  728. event->ctx = ctx;
  729. if (!task) {
  730. /*
  731. * Per cpu events are installed via an smp call and
  732. * the install is always successful.
  733. */
  734. smp_call_function_single(cpu, __perf_install_in_context,
  735. event, 1);
  736. return;
  737. }
  738. retry:
  739. task_oncpu_function_call(task, __perf_install_in_context,
  740. event);
  741. raw_spin_lock_irq(&ctx->lock);
  742. /*
  743. * we need to retry the smp call.
  744. */
  745. if (ctx->is_active && list_empty(&event->group_entry)) {
  746. raw_spin_unlock_irq(&ctx->lock);
  747. goto retry;
  748. }
  749. /*
  750. * The lock prevents that this context is scheduled in so we
  751. * can add the event safely, if it the call above did not
  752. * succeed.
  753. */
  754. if (list_empty(&event->group_entry))
  755. add_event_to_ctx(event, ctx);
  756. raw_spin_unlock_irq(&ctx->lock);
  757. }
  758. /*
  759. * Put a event into inactive state and update time fields.
  760. * Enabling the leader of a group effectively enables all
  761. * the group members that aren't explicitly disabled, so we
  762. * have to update their ->tstamp_enabled also.
  763. * Note: this works for group members as well as group leaders
  764. * since the non-leader members' sibling_lists will be empty.
  765. */
  766. static void __perf_event_mark_enabled(struct perf_event *event,
  767. struct perf_event_context *ctx)
  768. {
  769. struct perf_event *sub;
  770. event->state = PERF_EVENT_STATE_INACTIVE;
  771. event->tstamp_enabled = ctx->time - event->total_time_enabled;
  772. list_for_each_entry(sub, &event->sibling_list, group_entry) {
  773. if (sub->state >= PERF_EVENT_STATE_INACTIVE) {
  774. sub->tstamp_enabled =
  775. ctx->time - sub->total_time_enabled;
  776. }
  777. }
  778. }
  779. /*
  780. * Cross CPU call to enable a performance event
  781. */
  782. static void __perf_event_enable(void *info)
  783. {
  784. struct perf_event *event = info;
  785. struct perf_event_context *ctx = event->ctx;
  786. struct perf_event *leader = event->group_leader;
  787. struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
  788. int err;
  789. /*
  790. * If this is a per-task event, need to check whether this
  791. * event's task is the current task on this cpu.
  792. */
  793. if (ctx->task && cpuctx->task_ctx != ctx) {
  794. if (cpuctx->task_ctx || ctx->task != current)
  795. return;
  796. cpuctx->task_ctx = ctx;
  797. }
  798. raw_spin_lock(&ctx->lock);
  799. ctx->is_active = 1;
  800. update_context_time(ctx);
  801. if (event->state >= PERF_EVENT_STATE_INACTIVE)
  802. goto unlock;
  803. __perf_event_mark_enabled(event, ctx);
  804. if (event->cpu != -1 && event->cpu != smp_processor_id())
  805. goto unlock;
  806. /*
  807. * If the event is in a group and isn't the group leader,
  808. * then don't put it on unless the group is on.
  809. */
  810. if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE)
  811. goto unlock;
  812. if (!group_can_go_on(event, cpuctx, 1)) {
  813. err = -EEXIST;
  814. } else {
  815. if (event == leader)
  816. err = group_sched_in(event, cpuctx, ctx);
  817. else
  818. err = event_sched_in(event, cpuctx, ctx);
  819. }
  820. if (err) {
  821. /*
  822. * If this event can't go on and it's part of a
  823. * group, then the whole group has to come off.
  824. */
  825. if (leader != event)
  826. group_sched_out(leader, cpuctx, ctx);
  827. if (leader->attr.pinned) {
  828. update_group_times(leader);
  829. leader->state = PERF_EVENT_STATE_ERROR;
  830. }
  831. }
  832. unlock:
  833. raw_spin_unlock(&ctx->lock);
  834. }
  835. /*
  836. * Enable a event.
  837. *
  838. * If event->ctx is a cloned context, callers must make sure that
  839. * every task struct that event->ctx->task could possibly point to
  840. * remains valid. This condition is satisfied when called through
  841. * perf_event_for_each_child or perf_event_for_each as described
  842. * for perf_event_disable.
  843. */
  844. void perf_event_enable(struct perf_event *event)
  845. {
  846. struct perf_event_context *ctx = event->ctx;
  847. struct task_struct *task = ctx->task;
  848. if (!task) {
  849. /*
  850. * Enable the event on the cpu that it's on
  851. */
  852. smp_call_function_single(event->cpu, __perf_event_enable,
  853. event, 1);
  854. return;
  855. }
  856. raw_spin_lock_irq(&ctx->lock);
  857. if (event->state >= PERF_EVENT_STATE_INACTIVE)
  858. goto out;
  859. /*
  860. * If the event is in error state, clear that first.
  861. * That way, if we see the event in error state below, we
  862. * know that it has gone back into error state, as distinct
  863. * from the task having been scheduled away before the
  864. * cross-call arrived.
  865. */
  866. if (event->state == PERF_EVENT_STATE_ERROR)
  867. event->state = PERF_EVENT_STATE_OFF;
  868. retry:
  869. raw_spin_unlock_irq(&ctx->lock);
  870. task_oncpu_function_call(task, __perf_event_enable, event);
  871. raw_spin_lock_irq(&ctx->lock);
  872. /*
  873. * If the context is active and the event is still off,
  874. * we need to retry the cross-call.
  875. */
  876. if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF)
  877. goto retry;
  878. /*
  879. * Since we have the lock this context can't be scheduled
  880. * in, so we can change the state safely.
  881. */
  882. if (event->state == PERF_EVENT_STATE_OFF)
  883. __perf_event_mark_enabled(event, ctx);
  884. out:
  885. raw_spin_unlock_irq(&ctx->lock);
  886. }
  887. static int perf_event_refresh(struct perf_event *event, int refresh)
  888. {
  889. /*
  890. * not supported on inherited events
  891. */
  892. if (event->attr.inherit)
  893. return -EINVAL;
  894. atomic_add(refresh, &event->event_limit);
  895. perf_event_enable(event);
  896. return 0;
  897. }
  898. enum event_type_t {
  899. EVENT_FLEXIBLE = 0x1,
  900. EVENT_PINNED = 0x2,
  901. EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
  902. };
  903. static void ctx_sched_out(struct perf_event_context *ctx,
  904. struct perf_cpu_context *cpuctx,
  905. enum event_type_t event_type)
  906. {
  907. struct perf_event *event;
  908. raw_spin_lock(&ctx->lock);
  909. perf_pmu_disable(ctx->pmu);
  910. ctx->is_active = 0;
  911. if (likely(!ctx->nr_events))
  912. goto out;
  913. update_context_time(ctx);
  914. if (!ctx->nr_active)
  915. goto out;
  916. if (event_type & EVENT_PINNED) {
  917. list_for_each_entry(event, &ctx->pinned_groups, group_entry)
  918. group_sched_out(event, cpuctx, ctx);
  919. }
  920. if (event_type & EVENT_FLEXIBLE) {
  921. list_for_each_entry(event, &ctx->flexible_groups, group_entry)
  922. group_sched_out(event, cpuctx, ctx);
  923. }
  924. out:
  925. perf_pmu_enable(ctx->pmu);
  926. raw_spin_unlock(&ctx->lock);
  927. }
  928. /*
  929. * Test whether two contexts are equivalent, i.e. whether they
  930. * have both been cloned from the same version of the same context
  931. * and they both have the same number of enabled events.
  932. * If the number of enabled events is the same, then the set
  933. * of enabled events should be the same, because these are both
  934. * inherited contexts, therefore we can't access individual events
  935. * in them directly with an fd; we can only enable/disable all
  936. * events via prctl, or enable/disable all events in a family
  937. * via ioctl, which will have the same effect on both contexts.
  938. */
  939. static int context_equiv(struct perf_event_context *ctx1,
  940. struct perf_event_context *ctx2)
  941. {
  942. return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx
  943. && ctx1->parent_gen == ctx2->parent_gen
  944. && !ctx1->pin_count && !ctx2->pin_count;
  945. }
  946. static void __perf_event_sync_stat(struct perf_event *event,
  947. struct perf_event *next_event)
  948. {
  949. u64 value;
  950. if (!event->attr.inherit_stat)
  951. return;
  952. /*
  953. * Update the event value, we cannot use perf_event_read()
  954. * because we're in the middle of a context switch and have IRQs
  955. * disabled, which upsets smp_call_function_single(), however
  956. * we know the event must be on the current CPU, therefore we
  957. * don't need to use it.
  958. */
  959. switch (event->state) {
  960. case PERF_EVENT_STATE_ACTIVE:
  961. event->pmu->read(event);
  962. /* fall-through */
  963. case PERF_EVENT_STATE_INACTIVE:
  964. update_event_times(event);
  965. break;
  966. default:
  967. break;
  968. }
  969. /*
  970. * In order to keep per-task stats reliable we need to flip the event
  971. * values when we flip the contexts.
  972. */
  973. value = local64_read(&next_event->count);
  974. value = local64_xchg(&event->count, value);
  975. local64_set(&next_event->count, value);
  976. swap(event->total_time_enabled, next_event->total_time_enabled);
  977. swap(event->total_time_running, next_event->total_time_running);
  978. /*
  979. * Since we swizzled the values, update the user visible data too.
  980. */
  981. perf_event_update_userpage(event);
  982. perf_event_update_userpage(next_event);
  983. }
  984. #define list_next_entry(pos, member) \
  985. list_entry(pos->member.next, typeof(*pos), member)
  986. static void perf_event_sync_stat(struct perf_event_context *ctx,
  987. struct perf_event_context *next_ctx)
  988. {
  989. struct perf_event *event, *next_event;
  990. if (!ctx->nr_stat)
  991. return;
  992. update_context_time(ctx);
  993. event = list_first_entry(&ctx->event_list,
  994. struct perf_event, event_entry);
  995. next_event = list_first_entry(&next_ctx->event_list,
  996. struct perf_event, event_entry);
  997. while (&event->event_entry != &ctx->event_list &&
  998. &next_event->event_entry != &next_ctx->event_list) {
  999. __perf_event_sync_stat(event, next_event);
  1000. event = list_next_entry(event, event_entry);
  1001. next_event = list_next_entry(next_event, event_entry);
  1002. }
  1003. }
  1004. void perf_event_context_sched_out(struct task_struct *task, int ctxn,
  1005. struct task_struct *next)
  1006. {
  1007. struct perf_event_context *ctx = task->perf_event_ctxp[ctxn];
  1008. struct perf_event_context *next_ctx;
  1009. struct perf_event_context *parent;
  1010. struct perf_cpu_context *cpuctx;
  1011. int do_switch = 1;
  1012. if (likely(!ctx))
  1013. return;
  1014. cpuctx = __get_cpu_context(ctx);
  1015. if (!cpuctx->task_ctx)
  1016. return;
  1017. rcu_read_lock();
  1018. parent = rcu_dereference(ctx->parent_ctx);
  1019. next_ctx = next->perf_event_ctxp[ctxn];
  1020. if (parent && next_ctx &&
  1021. rcu_dereference(next_ctx->parent_ctx) == parent) {
  1022. /*
  1023. * Looks like the two contexts are clones, so we might be
  1024. * able to optimize the context switch. We lock both
  1025. * contexts and check that they are clones under the
  1026. * lock (including re-checking that neither has been
  1027. * uncloned in the meantime). It doesn't matter which
  1028. * order we take the locks because no other cpu could
  1029. * be trying to lock both of these tasks.
  1030. */
  1031. raw_spin_lock(&ctx->lock);
  1032. raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
  1033. if (context_equiv(ctx, next_ctx)) {
  1034. /*
  1035. * XXX do we need a memory barrier of sorts
  1036. * wrt to rcu_dereference() of perf_event_ctxp
  1037. */
  1038. task->perf_event_ctxp[ctxn] = next_ctx;
  1039. next->perf_event_ctxp[ctxn] = ctx;
  1040. ctx->task = next;
  1041. next_ctx->task = task;
  1042. do_switch = 0;
  1043. perf_event_sync_stat(ctx, next_ctx);
  1044. }
  1045. raw_spin_unlock(&next_ctx->lock);
  1046. raw_spin_unlock(&ctx->lock);
  1047. }
  1048. rcu_read_unlock();
  1049. if (do_switch) {
  1050. ctx_sched_out(ctx, cpuctx, EVENT_ALL);
  1051. cpuctx->task_ctx = NULL;
  1052. }
  1053. }
  1054. #define for_each_task_context_nr(ctxn) \
  1055. for ((ctxn) = 0; (ctxn) < perf_nr_task_contexts; (ctxn)++)
  1056. /*
  1057. * Called from scheduler to remove the events of the current task,
  1058. * with interrupts disabled.
  1059. *
  1060. * We stop each event and update the event value in event->count.
  1061. *
  1062. * This does not protect us against NMI, but disable()
  1063. * sets the disabled bit in the control field of event _before_
  1064. * accessing the event control register. If a NMI hits, then it will
  1065. * not restart the event.
  1066. */
  1067. void perf_event_task_sched_out(struct task_struct *task,
  1068. struct task_struct *next)
  1069. {
  1070. int ctxn;
  1071. perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0);
  1072. for_each_task_context_nr(ctxn)
  1073. perf_event_context_sched_out(task, ctxn, next);
  1074. }
  1075. static void task_ctx_sched_out(struct perf_event_context *ctx,
  1076. enum event_type_t event_type)
  1077. {
  1078. struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
  1079. if (!cpuctx->task_ctx)
  1080. return;
  1081. if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
  1082. return;
  1083. ctx_sched_out(ctx, cpuctx, event_type);
  1084. cpuctx->task_ctx = NULL;
  1085. }
  1086. /*
  1087. * Called with IRQs disabled
  1088. */
  1089. static void __perf_event_task_sched_out(struct perf_event_context *ctx)
  1090. {
  1091. task_ctx_sched_out(ctx, EVENT_ALL);
  1092. }
  1093. /*
  1094. * Called with IRQs disabled
  1095. */
  1096. static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
  1097. enum event_type_t event_type)
  1098. {
  1099. ctx_sched_out(&cpuctx->ctx, cpuctx, event_type);
  1100. }
  1101. static void
  1102. ctx_pinned_sched_in(struct perf_event_context *ctx,
  1103. struct perf_cpu_context *cpuctx)
  1104. {
  1105. struct perf_event *event;
  1106. list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
  1107. if (event->state <= PERF_EVENT_STATE_OFF)
  1108. continue;
  1109. if (event->cpu != -1 && event->cpu != smp_processor_id())
  1110. continue;
  1111. if (group_can_go_on(event, cpuctx, 1))
  1112. group_sched_in(event, cpuctx, ctx);
  1113. /*
  1114. * If this pinned group hasn't been scheduled,
  1115. * put it in error state.
  1116. */
  1117. if (event->state == PERF_EVENT_STATE_INACTIVE) {
  1118. update_group_times(event);
  1119. event->state = PERF_EVENT_STATE_ERROR;
  1120. }
  1121. }
  1122. }
  1123. static void
  1124. ctx_flexible_sched_in(struct perf_event_context *ctx,
  1125. struct perf_cpu_context *cpuctx)
  1126. {
  1127. struct perf_event *event;
  1128. int can_add_hw = 1;
  1129. list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
  1130. /* Ignore events in OFF or ERROR state */
  1131. if (event->state <= PERF_EVENT_STATE_OFF)
  1132. continue;
  1133. /*
  1134. * Listen to the 'cpu' scheduling filter constraint
  1135. * of events:
  1136. */
  1137. if (event->cpu != -1 && event->cpu != smp_processor_id())
  1138. continue;
  1139. if (group_can_go_on(event, cpuctx, can_add_hw)) {
  1140. if (group_sched_in(event, cpuctx, ctx))
  1141. can_add_hw = 0;
  1142. }
  1143. }
  1144. }
  1145. static void
  1146. ctx_sched_in(struct perf_event_context *ctx,
  1147. struct perf_cpu_context *cpuctx,
  1148. enum event_type_t event_type)
  1149. {
  1150. raw_spin_lock(&ctx->lock);
  1151. ctx->is_active = 1;
  1152. if (likely(!ctx->nr_events))
  1153. goto out;
  1154. ctx->timestamp = perf_clock();
  1155. /*
  1156. * First go through the list and put on any pinned groups
  1157. * in order to give them the best chance of going on.
  1158. */
  1159. if (event_type & EVENT_PINNED)
  1160. ctx_pinned_sched_in(ctx, cpuctx);
  1161. /* Then walk through the lower prio flexible groups */
  1162. if (event_type & EVENT_FLEXIBLE)
  1163. ctx_flexible_sched_in(ctx, cpuctx);
  1164. out:
  1165. raw_spin_unlock(&ctx->lock);
  1166. }
  1167. static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
  1168. enum event_type_t event_type)
  1169. {
  1170. struct perf_event_context *ctx = &cpuctx->ctx;
  1171. ctx_sched_in(ctx, cpuctx, event_type);
  1172. }
  1173. static void task_ctx_sched_in(struct perf_event_context *ctx,
  1174. enum event_type_t event_type)
  1175. {
  1176. struct perf_cpu_context *cpuctx;
  1177. cpuctx = __get_cpu_context(ctx);
  1178. if (cpuctx->task_ctx == ctx)
  1179. return;
  1180. ctx_sched_in(ctx, cpuctx, event_type);
  1181. cpuctx->task_ctx = ctx;
  1182. }
  1183. void perf_event_context_sched_in(struct perf_event_context *ctx)
  1184. {
  1185. struct perf_cpu_context *cpuctx;
  1186. cpuctx = __get_cpu_context(ctx);
  1187. if (cpuctx->task_ctx == ctx)
  1188. return;
  1189. perf_pmu_disable(ctx->pmu);
  1190. /*
  1191. * We want to keep the following priority order:
  1192. * cpu pinned (that don't need to move), task pinned,
  1193. * cpu flexible, task flexible.
  1194. */
  1195. cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
  1196. ctx_sched_in(ctx, cpuctx, EVENT_PINNED);
  1197. cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE);
  1198. ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE);
  1199. cpuctx->task_ctx = ctx;
  1200. /*
  1201. * Since these rotations are per-cpu, we need to ensure the
  1202. * cpu-context we got scheduled on is actually rotating.
  1203. */
  1204. perf_pmu_rotate_start(ctx->pmu);
  1205. perf_pmu_enable(ctx->pmu);
  1206. }
  1207. /*
  1208. * Called from scheduler to add the events of the current task
  1209. * with interrupts disabled.
  1210. *
  1211. * We restore the event value and then enable it.
  1212. *
  1213. * This does not protect us against NMI, but enable()
  1214. * sets the enabled bit in the control field of event _before_
  1215. * accessing the event control register. If a NMI hits, then it will
  1216. * keep the event running.
  1217. */
  1218. void perf_event_task_sched_in(struct task_struct *task)
  1219. {
  1220. struct perf_event_context *ctx;
  1221. int ctxn;
  1222. for_each_task_context_nr(ctxn) {
  1223. ctx = task->perf_event_ctxp[ctxn];
  1224. if (likely(!ctx))
  1225. continue;
  1226. perf_event_context_sched_in(ctx);
  1227. }
  1228. }
  1229. #define MAX_INTERRUPTS (~0ULL)
  1230. static void perf_log_throttle(struct perf_event *event, int enable);
  1231. static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
  1232. {
  1233. u64 frequency = event->attr.sample_freq;
  1234. u64 sec = NSEC_PER_SEC;
  1235. u64 divisor, dividend;
  1236. int count_fls, nsec_fls, frequency_fls, sec_fls;
  1237. count_fls = fls64(count);
  1238. nsec_fls = fls64(nsec);
  1239. frequency_fls = fls64(frequency);
  1240. sec_fls = 30;
  1241. /*
  1242. * We got @count in @nsec, with a target of sample_freq HZ
  1243. * the target period becomes:
  1244. *
  1245. * @count * 10^9
  1246. * period = -------------------
  1247. * @nsec * sample_freq
  1248. *
  1249. */
  1250. /*
  1251. * Reduce accuracy by one bit such that @a and @b converge
  1252. * to a similar magnitude.
  1253. */
  1254. #define REDUCE_FLS(a, b) \
  1255. do { \
  1256. if (a##_fls > b##_fls) { \
  1257. a >>= 1; \
  1258. a##_fls--; \
  1259. } else { \
  1260. b >>= 1; \
  1261. b##_fls--; \
  1262. } \
  1263. } while (0)
  1264. /*
  1265. * Reduce accuracy until either term fits in a u64, then proceed with
  1266. * the other, so that finally we can do a u64/u64 division.
  1267. */
  1268. while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) {
  1269. REDUCE_FLS(nsec, frequency);
  1270. REDUCE_FLS(sec, count);
  1271. }
  1272. if (count_fls + sec_fls > 64) {
  1273. divisor = nsec * frequency;
  1274. while (count_fls + sec_fls > 64) {
  1275. REDUCE_FLS(count, sec);
  1276. divisor >>= 1;
  1277. }
  1278. dividend = count * sec;
  1279. } else {
  1280. dividend = count * sec;
  1281. while (nsec_fls + frequency_fls > 64) {
  1282. REDUCE_FLS(nsec, frequency);
  1283. dividend >>= 1;
  1284. }
  1285. divisor = nsec * frequency;
  1286. }
  1287. if (!divisor)
  1288. return dividend;
  1289. return div64_u64(dividend, divisor);
  1290. }
  1291. static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count)
  1292. {
  1293. struct hw_perf_event *hwc = &event->hw;
  1294. s64 period, sample_period;
  1295. s64 delta;
  1296. period = perf_calculate_period(event, nsec, count);
  1297. delta = (s64)(period - hwc->sample_period);
  1298. delta = (delta + 7) / 8; /* low pass filter */
  1299. sample_period = hwc->sample_period + delta;
  1300. if (!sample_period)
  1301. sample_period = 1;
  1302. hwc->sample_period = sample_period;
  1303. if (local64_read(&hwc->period_left) > 8*sample_period) {
  1304. event->pmu->stop(event, PERF_EF_UPDATE);
  1305. local64_set(&hwc->period_left, 0);
  1306. event->pmu->start(event, PERF_EF_RELOAD);
  1307. }
  1308. }
  1309. static void perf_ctx_adjust_freq(struct perf_event_context *ctx, u64 period)
  1310. {
  1311. struct perf_event *event;
  1312. struct hw_perf_event *hwc;
  1313. u64 interrupts, now;
  1314. s64 delta;
  1315. raw_spin_lock(&ctx->lock);
  1316. list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
  1317. if (event->state != PERF_EVENT_STATE_ACTIVE)
  1318. continue;
  1319. if (event->cpu != -1 && event->cpu != smp_processor_id())
  1320. continue;
  1321. hwc = &event->hw;
  1322. interrupts = hwc->interrupts;
  1323. hwc->interrupts = 0;
  1324. /*
  1325. * unthrottle events on the tick
  1326. */
  1327. if (interrupts == MAX_INTERRUPTS) {
  1328. perf_log_throttle(event, 1);
  1329. event->pmu->start(event, 0);
  1330. }
  1331. if (!event->attr.freq || !event->attr.sample_freq)
  1332. continue;
  1333. event->pmu->read(event);
  1334. now = local64_read(&event->count);
  1335. delta = now - hwc->freq_count_stamp;
  1336. hwc->freq_count_stamp = now;
  1337. if (delta > 0)
  1338. perf_adjust_period(event, period, delta);
  1339. }
  1340. raw_spin_unlock(&ctx->lock);
  1341. }
  1342. /*
  1343. * Round-robin a context's events:
  1344. */
  1345. static void rotate_ctx(struct perf_event_context *ctx)
  1346. {
  1347. raw_spin_lock(&ctx->lock);
  1348. /* Rotate the first entry last of non-pinned groups */
  1349. list_rotate_left(&ctx->flexible_groups);
  1350. raw_spin_unlock(&ctx->lock);
  1351. }
  1352. /*
  1353. * Cannot race with ->pmu_rotate_start() because this is ran from hardirq
  1354. * context, and ->pmu_rotate_start() is called with irqs disabled (both are
  1355. * cpu affine, so there are no SMP races).
  1356. */
  1357. static enum hrtimer_restart perf_event_context_tick(struct hrtimer *timer)
  1358. {
  1359. enum hrtimer_restart restart = HRTIMER_NORESTART;
  1360. struct perf_cpu_context *cpuctx;
  1361. struct perf_event_context *ctx = NULL;
  1362. int rotate = 0;
  1363. cpuctx = container_of(timer, struct perf_cpu_context, timer);
  1364. if (cpuctx->ctx.nr_events) {
  1365. restart = HRTIMER_RESTART;
  1366. if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
  1367. rotate = 1;
  1368. }
  1369. ctx = cpuctx->task_ctx;
  1370. if (ctx && ctx->nr_events) {
  1371. restart = HRTIMER_RESTART;
  1372. if (ctx->nr_events != ctx->nr_active)
  1373. rotate = 1;
  1374. }
  1375. perf_pmu_disable(cpuctx->ctx.pmu);
  1376. perf_ctx_adjust_freq(&cpuctx->ctx, cpuctx->timer_interval);
  1377. if (ctx)
  1378. perf_ctx_adjust_freq(ctx, cpuctx->timer_interval);
  1379. if (!rotate)
  1380. goto done;
  1381. cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
  1382. if (ctx)
  1383. task_ctx_sched_out(ctx, EVENT_FLEXIBLE);
  1384. rotate_ctx(&cpuctx->ctx);
  1385. if (ctx)
  1386. rotate_ctx(ctx);
  1387. cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE);
  1388. if (ctx)
  1389. task_ctx_sched_in(ctx, EVENT_FLEXIBLE);
  1390. done:
  1391. perf_pmu_enable(cpuctx->ctx.pmu);
  1392. hrtimer_forward_now(timer, ns_to_ktime(cpuctx->timer_interval));
  1393. return restart;
  1394. }
  1395. static int event_enable_on_exec(struct perf_event *event,
  1396. struct perf_event_context *ctx)
  1397. {
  1398. if (!event->attr.enable_on_exec)
  1399. return 0;
  1400. event->attr.enable_on_exec = 0;
  1401. if (event->state >= PERF_EVENT_STATE_INACTIVE)
  1402. return 0;
  1403. __perf_event_mark_enabled(event, ctx);
  1404. return 1;
  1405. }
  1406. /*
  1407. * Enable all of a task's events that have been marked enable-on-exec.
  1408. * This expects task == current.
  1409. */
  1410. static void perf_event_enable_on_exec(struct perf_event_context *ctx)
  1411. {
  1412. struct perf_event *event;
  1413. unsigned long flags;
  1414. int enabled = 0;
  1415. int ret;
  1416. local_irq_save(flags);
  1417. if (!ctx || !ctx->nr_events)
  1418. goto out;
  1419. task_ctx_sched_out(ctx, EVENT_ALL);
  1420. raw_spin_lock(&ctx->lock);
  1421. list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
  1422. ret = event_enable_on_exec(event, ctx);
  1423. if (ret)
  1424. enabled = 1;
  1425. }
  1426. list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
  1427. ret = event_enable_on_exec(event, ctx);
  1428. if (ret)
  1429. enabled = 1;
  1430. }
  1431. /*
  1432. * Unclone this context if we enabled any event.
  1433. */
  1434. if (enabled)
  1435. unclone_ctx(ctx);
  1436. raw_spin_unlock(&ctx->lock);
  1437. perf_event_context_sched_in(ctx);
  1438. out:
  1439. local_irq_restore(flags);
  1440. }
  1441. /*
  1442. * Cross CPU call to read the hardware event
  1443. */
  1444. static void __perf_event_read(void *info)
  1445. {
  1446. struct perf_event *event = info;
  1447. struct perf_event_context *ctx = event->ctx;
  1448. struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
  1449. /*
  1450. * If this is a task context, we need to check whether it is
  1451. * the current task context of this cpu. If not it has been
  1452. * scheduled out before the smp call arrived. In that case
  1453. * event->count would have been updated to a recent sample
  1454. * when the event was scheduled out.
  1455. */
  1456. if (ctx->task && cpuctx->task_ctx != ctx)
  1457. return;
  1458. raw_spin_lock(&ctx->lock);
  1459. update_context_time(ctx);
  1460. update_event_times(event);
  1461. raw_spin_unlock(&ctx->lock);
  1462. event->pmu->read(event);
  1463. }
  1464. static inline u64 perf_event_count(struct perf_event *event)
  1465. {
  1466. return local64_read(&event->count) + atomic64_read(&event->child_count);
  1467. }
  1468. static u64 perf_event_read(struct perf_event *event)
  1469. {
  1470. /*
  1471. * If event is enabled and currently active on a CPU, update the
  1472. * value in the event structure:
  1473. */
  1474. if (event->state == PERF_EVENT_STATE_ACTIVE) {
  1475. smp_call_function_single(event->oncpu,
  1476. __perf_event_read, event, 1);
  1477. } else if (event->state == PERF_EVENT_STATE_INACTIVE) {
  1478. struct perf_event_context *ctx = event->ctx;
  1479. unsigned long flags;
  1480. raw_spin_lock_irqsave(&ctx->lock, flags);
  1481. update_context_time(ctx);
  1482. update_event_times(event);
  1483. raw_spin_unlock_irqrestore(&ctx->lock, flags);
  1484. }
  1485. return perf_event_count(event);
  1486. }
  1487. /*
  1488. * Callchain support
  1489. */
  1490. struct callchain_cpus_entries {
  1491. struct rcu_head rcu_head;
  1492. struct perf_callchain_entry *cpu_entries[0];
  1493. };
  1494. static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]);
  1495. static atomic_t nr_callchain_events;
  1496. static DEFINE_MUTEX(callchain_mutex);
  1497. struct callchain_cpus_entries *callchain_cpus_entries;
  1498. __weak void perf_callchain_kernel(struct perf_callchain_entry *entry,
  1499. struct pt_regs *regs)
  1500. {
  1501. }
  1502. __weak void perf_callchain_user(struct perf_callchain_entry *entry,
  1503. struct pt_regs *regs)
  1504. {
  1505. }
  1506. static void release_callchain_buffers_rcu(struct rcu_head *head)
  1507. {
  1508. struct callchain_cpus_entries *entries;
  1509. int cpu;
  1510. entries = container_of(head, struct callchain_cpus_entries, rcu_head);
  1511. for_each_possible_cpu(cpu)
  1512. kfree(entries->cpu_entries[cpu]);
  1513. kfree(entries);
  1514. }
  1515. static void release_callchain_buffers(void)
  1516. {
  1517. struct callchain_cpus_entries *entries;
  1518. entries = callchain_cpus_entries;
  1519. rcu_assign_pointer(callchain_cpus_entries, NULL);
  1520. call_rcu(&entries->rcu_head, release_callchain_buffers_rcu);
  1521. }
  1522. static int alloc_callchain_buffers(void)
  1523. {
  1524. int cpu;
  1525. int size;
  1526. struct callchain_cpus_entries *entries;
  1527. /*
  1528. * We can't use the percpu allocation API for data that can be
  1529. * accessed from NMI. Use a temporary manual per cpu allocation
  1530. * until that gets sorted out.
  1531. */
  1532. size = sizeof(*entries) + sizeof(struct perf_callchain_entry *) *
  1533. num_possible_cpus();
  1534. entries = kzalloc(size, GFP_KERNEL);
  1535. if (!entries)
  1536. return -ENOMEM;
  1537. size = sizeof(struct perf_callchain_entry) * PERF_NR_CONTEXTS;
  1538. for_each_possible_cpu(cpu) {
  1539. entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL,
  1540. cpu_to_node(cpu));
  1541. if (!entries->cpu_entries[cpu])
  1542. goto fail;
  1543. }
  1544. rcu_assign_pointer(callchain_cpus_entries, entries);
  1545. return 0;
  1546. fail:
  1547. for_each_possible_cpu(cpu)
  1548. kfree(entries->cpu_entries[cpu]);
  1549. kfree(entries);
  1550. return -ENOMEM;
  1551. }
  1552. static int get_callchain_buffers(void)
  1553. {
  1554. int err = 0;
  1555. int count;
  1556. mutex_lock(&callchain_mutex);
  1557. count = atomic_inc_return(&nr_callchain_events);
  1558. if (WARN_ON_ONCE(count < 1)) {
  1559. err = -EINVAL;
  1560. goto exit;
  1561. }
  1562. if (count > 1) {
  1563. /* If the allocation failed, give up */
  1564. if (!callchain_cpus_entries)
  1565. err = -ENOMEM;
  1566. goto exit;
  1567. }
  1568. err = alloc_callchain_buffers();
  1569. if (err)
  1570. release_callchain_buffers();
  1571. exit:
  1572. mutex_unlock(&callchain_mutex);
  1573. return err;
  1574. }
  1575. static void put_callchain_buffers(void)
  1576. {
  1577. if (atomic_dec_and_mutex_lock(&nr_callchain_events, &callchain_mutex)) {
  1578. release_callchain_buffers();
  1579. mutex_unlock(&callchain_mutex);
  1580. }
  1581. }
  1582. static int get_recursion_context(int *recursion)
  1583. {
  1584. int rctx;
  1585. if (in_nmi())
  1586. rctx = 3;
  1587. else if (in_irq())
  1588. rctx = 2;
  1589. else if (in_softirq())
  1590. rctx = 1;
  1591. else
  1592. rctx = 0;
  1593. if (recursion[rctx])
  1594. return -1;
  1595. recursion[rctx]++;
  1596. barrier();
  1597. return rctx;
  1598. }
  1599. static inline void put_recursion_context(int *recursion, int rctx)
  1600. {
  1601. barrier();
  1602. recursion[rctx]--;
  1603. }
  1604. static struct perf_callchain_entry *get_callchain_entry(int *rctx)
  1605. {
  1606. int cpu;
  1607. struct callchain_cpus_entries *entries;
  1608. *rctx = get_recursion_context(__get_cpu_var(callchain_recursion));
  1609. if (*rctx == -1)
  1610. return NULL;
  1611. entries = rcu_dereference(callchain_cpus_entries);
  1612. if (!entries)
  1613. return NULL;
  1614. cpu = smp_processor_id();
  1615. return &entries->cpu_entries[cpu][*rctx];
  1616. }
  1617. static void
  1618. put_callchain_entry(int rctx)
  1619. {
  1620. put_recursion_context(__get_cpu_var(callchain_recursion), rctx);
  1621. }
  1622. static struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
  1623. {
  1624. int rctx;
  1625. struct perf_callchain_entry *entry;
  1626. entry = get_callchain_entry(&rctx);
  1627. if (rctx == -1)
  1628. return NULL;
  1629. if (!entry)
  1630. goto exit_put;
  1631. entry->nr = 0;
  1632. if (!user_mode(regs)) {
  1633. perf_callchain_store(entry, PERF_CONTEXT_KERNEL);
  1634. perf_callchain_kernel(entry, regs);
  1635. if (current->mm)
  1636. regs = task_pt_regs(current);
  1637. else
  1638. regs = NULL;
  1639. }
  1640. if (regs) {
  1641. perf_callchain_store(entry, PERF_CONTEXT_USER);
  1642. perf_callchain_user(entry, regs);
  1643. }
  1644. exit_put:
  1645. put_callchain_entry(rctx);
  1646. return entry;
  1647. }
  1648. /*
  1649. * Initialize the perf_event context in a task_struct:
  1650. */
  1651. static void __perf_event_init_context(struct perf_event_context *ctx)
  1652. {
  1653. raw_spin_lock_init(&ctx->lock);
  1654. mutex_init(&ctx->mutex);
  1655. INIT_LIST_HEAD(&ctx->pinned_groups);
  1656. INIT_LIST_HEAD(&ctx->flexible_groups);
  1657. INIT_LIST_HEAD(&ctx->event_list);
  1658. atomic_set(&ctx->refcount, 1);
  1659. }
  1660. static struct perf_event_context *
  1661. alloc_perf_context(struct pmu *pmu, struct task_struct *task)
  1662. {
  1663. struct perf_event_context *ctx;
  1664. ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL);
  1665. if (!ctx)
  1666. return NULL;
  1667. __perf_event_init_context(ctx);
  1668. if (task) {
  1669. ctx->task = task;
  1670. get_task_struct(task);
  1671. }
  1672. ctx->pmu = pmu;
  1673. return ctx;
  1674. }
  1675. static struct task_struct *
  1676. find_lively_task_by_vpid(pid_t vpid)
  1677. {
  1678. struct task_struct *task;
  1679. int err;
  1680. rcu_read_lock();
  1681. if (!vpid)
  1682. task = current;
  1683. else
  1684. task = find_task_by_vpid(vpid);
  1685. if (task)
  1686. get_task_struct(task);
  1687. rcu_read_unlock();
  1688. if (!task)
  1689. return ERR_PTR(-ESRCH);
  1690. /*
  1691. * Can't attach events to a dying task.
  1692. */
  1693. err = -ESRCH;
  1694. if (task->flags & PF_EXITING)
  1695. goto errout;
  1696. /* Reuse ptrace permission checks for now. */
  1697. err = -EACCES;
  1698. if (!ptrace_may_access(task, PTRACE_MODE_READ))
  1699. goto errout;
  1700. return task;
  1701. errout:
  1702. put_task_struct(task);
  1703. return ERR_PTR(err);
  1704. }
  1705. static struct perf_event_context *
  1706. find_get_context(struct pmu *pmu, struct task_struct *task, int cpu)
  1707. {
  1708. struct perf_event_context *ctx;
  1709. struct perf_cpu_context *cpuctx;
  1710. unsigned long flags;
  1711. int ctxn, err;
  1712. if (!task && cpu != -1) {
  1713. /* Must be root to operate on a CPU event: */
  1714. if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
  1715. return ERR_PTR(-EACCES);
  1716. if (cpu < 0 || cpu >= nr_cpumask_bits)
  1717. return ERR_PTR(-EINVAL);
  1718. /*
  1719. * We could be clever and allow to attach a event to an
  1720. * offline CPU and activate it when the CPU comes up, but
  1721. * that's for later.
  1722. */
  1723. if (!cpu_online(cpu))
  1724. return ERR_PTR(-ENODEV);
  1725. cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
  1726. ctx = &cpuctx->ctx;
  1727. get_ctx(ctx);
  1728. return ctx;
  1729. }
  1730. err = -EINVAL;
  1731. ctxn = pmu->task_ctx_nr;
  1732. if (ctxn < 0)
  1733. goto errout;
  1734. retry:
  1735. ctx = perf_lock_task_context(task, ctxn, &flags);
  1736. if (ctx) {
  1737. unclone_ctx(ctx);
  1738. raw_spin_unlock_irqrestore(&ctx->lock, flags);
  1739. }
  1740. if (!ctx) {
  1741. ctx = alloc_perf_context(pmu, task);
  1742. err = -ENOMEM;
  1743. if (!ctx)
  1744. goto errout;
  1745. get_ctx(ctx);
  1746. if (cmpxchg(&task->perf_event_ctxp[ctxn], NULL, ctx)) {
  1747. /*
  1748. * We raced with some other task; use
  1749. * the context they set.
  1750. */
  1751. put_task_struct(task);
  1752. kfree(ctx);
  1753. goto retry;
  1754. }
  1755. }
  1756. put_task_struct(task);
  1757. return ctx;
  1758. errout:
  1759. put_task_struct(task);
  1760. return ERR_PTR(err);
  1761. }
  1762. static void perf_event_free_filter(struct perf_event *event);
  1763. static void free_event_rcu(struct rcu_head *head)
  1764. {
  1765. struct perf_event *event;
  1766. event = container_of(head, struct perf_event, rcu_head);
  1767. if (event->ns)
  1768. put_pid_ns(event->ns);
  1769. perf_event_free_filter(event);
  1770. kfree(event);
  1771. }
  1772. static void perf_pending_sync(struct perf_event *event);
  1773. static void perf_buffer_put(struct perf_buffer *buffer);
  1774. static void free_event(struct perf_event *event)
  1775. {
  1776. perf_pending_sync(event);
  1777. if (!event->parent) {
  1778. atomic_dec(&nr_events);
  1779. if (event->attr.mmap || event->attr.mmap_data)
  1780. atomic_dec(&nr_mmap_events);
  1781. if (event->attr.comm)
  1782. atomic_dec(&nr_comm_events);
  1783. if (event->attr.task)
  1784. atomic_dec(&nr_task_events);
  1785. if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
  1786. put_callchain_buffers();
  1787. }
  1788. if (event->buffer) {
  1789. perf_buffer_put(event->buffer);
  1790. event->buffer = NULL;
  1791. }
  1792. if (event->destroy)
  1793. event->destroy(event);
  1794. if (event->ctx)
  1795. put_ctx(event->ctx);
  1796. call_rcu(&event->rcu_head, free_event_rcu);
  1797. }
  1798. int perf_event_release_kernel(struct perf_event *event)
  1799. {
  1800. struct perf_event_context *ctx = event->ctx;
  1801. /*
  1802. * Remove from the PMU, can't get re-enabled since we got
  1803. * here because the last ref went.
  1804. */
  1805. perf_event_disable(event);
  1806. WARN_ON_ONCE(ctx->parent_ctx);
  1807. /*
  1808. * There are two ways this annotation is useful:
  1809. *
  1810. * 1) there is a lock recursion from perf_event_exit_task
  1811. * see the comment there.
  1812. *
  1813. * 2) there is a lock-inversion with mmap_sem through
  1814. * perf_event_read_group(), which takes faults while
  1815. * holding ctx->mutex, however this is called after
  1816. * the last filedesc died, so there is no possibility
  1817. * to trigger the AB-BA case.
  1818. */
  1819. mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING);
  1820. raw_spin_lock_irq(&ctx->lock);
  1821. perf_group_detach(event);
  1822. list_del_event(event, ctx);
  1823. raw_spin_unlock_irq(&ctx->lock);
  1824. mutex_unlock(&ctx->mutex);
  1825. mutex_lock(&event->owner->perf_event_mutex);
  1826. list_del_init(&event->owner_entry);
  1827. mutex_unlock(&event->owner->perf_event_mutex);
  1828. put_task_struct(event->owner);
  1829. free_event(event);
  1830. return 0;
  1831. }
  1832. EXPORT_SYMBOL_GPL(perf_event_release_kernel);
  1833. /*
  1834. * Called when the last reference to the file is gone.
  1835. */
  1836. static int perf_release(struct inode *inode, struct file *file)
  1837. {
  1838. struct perf_event *event = file->private_data;
  1839. file->private_data = NULL;
  1840. return perf_event_release_kernel(event);
  1841. }
  1842. static int perf_event_read_size(struct perf_event *event)
  1843. {
  1844. int entry = sizeof(u64); /* value */
  1845. int size = 0;
  1846. int nr = 1;
  1847. if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
  1848. size += sizeof(u64);
  1849. if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
  1850. size += sizeof(u64);
  1851. if (event->attr.read_format & PERF_FORMAT_ID)
  1852. entry += sizeof(u64);
  1853. if (event->attr.read_format & PERF_FORMAT_GROUP) {
  1854. nr += event->group_leader->nr_siblings;
  1855. size += sizeof(u64);
  1856. }
  1857. size += entry * nr;
  1858. return size;
  1859. }
  1860. u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
  1861. {
  1862. struct perf_event *child;
  1863. u64 total = 0;
  1864. *enabled = 0;
  1865. *running = 0;
  1866. mutex_lock(&event->child_mutex);
  1867. total += perf_event_read(event);
  1868. *enabled += event->total_time_enabled +
  1869. atomic64_read(&event->child_total_time_enabled);
  1870. *running += event->total_time_running +
  1871. atomic64_read(&event->child_total_time_running);
  1872. list_for_each_entry(child, &event->child_list, child_list) {
  1873. total += perf_event_read(child);
  1874. *enabled += child->total_time_enabled;
  1875. *running += child->total_time_running;
  1876. }
  1877. mutex_unlock(&event->child_mutex);
  1878. return total;
  1879. }
  1880. EXPORT_SYMBOL_GPL(perf_event_read_value);
  1881. static int perf_event_read_group(struct perf_event *event,
  1882. u64 read_format, char __user *buf)
  1883. {
  1884. struct perf_event *leader = event->group_leader, *sub;
  1885. int n = 0, size = 0, ret = -EFAULT;
  1886. struct perf_event_context *ctx = leader->ctx;
  1887. u64 values[5];
  1888. u64 count, enabled, running;
  1889. mutex_lock(&ctx->mutex);
  1890. count = perf_event_read_value(leader, &enabled, &running);
  1891. values[n++] = 1 + leader->nr_siblings;
  1892. if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
  1893. values[n++] = enabled;
  1894. if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
  1895. values[n++] = running;
  1896. values[n++] = count;
  1897. if (read_format & PERF_FORMAT_ID)
  1898. values[n++] = primary_event_id(leader);
  1899. size = n * sizeof(u64);
  1900. if (copy_to_user(buf, values, size))
  1901. goto unlock;
  1902. ret = size;
  1903. list_for_each_entry(sub, &leader->sibling_list, group_entry) {
  1904. n = 0;
  1905. values[n++] = perf_event_read_value(sub, &enabled, &running);
  1906. if (read_format & PERF_FORMAT_ID)
  1907. values[n++] = primary_event_id(sub);
  1908. size = n * sizeof(u64);
  1909. if (copy_to_user(buf + ret, values, size)) {
  1910. ret = -EFAULT;
  1911. goto unlock;
  1912. }
  1913. ret += size;
  1914. }
  1915. unlock:
  1916. mutex_unlock(&ctx->mutex);
  1917. return ret;
  1918. }
  1919. static int perf_event_read_one(struct perf_event *event,
  1920. u64 read_format, char __user *buf)
  1921. {
  1922. u64 enabled, running;
  1923. u64 values[4];
  1924. int n = 0;
  1925. values[n++] = perf_event_read_value(event, &enabled, &running);
  1926. if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
  1927. values[n++] = enabled;
  1928. if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
  1929. values[n++] = running;
  1930. if (read_format & PERF_FORMAT_ID)
  1931. values[n++] = primary_event_id(event);
  1932. if (copy_to_user(buf, values, n * sizeof(u64)))
  1933. return -EFAULT;
  1934. return n * sizeof(u64);
  1935. }
  1936. /*
  1937. * Read the performance event - simple non blocking version for now
  1938. */
  1939. static ssize_t
  1940. perf_read_hw(struct perf_event *event, char __user *buf, size_t count)
  1941. {
  1942. u64 read_format = event->attr.read_format;
  1943. int ret;
  1944. /*
  1945. * Return end-of-file for a read on a event that is in
  1946. * error state (i.e. because it was pinned but it couldn't be
  1947. * scheduled on to the CPU at some point).
  1948. */
  1949. if (event->state == PERF_EVENT_STATE_ERROR)
  1950. return 0;
  1951. if (count < perf_event_read_size(event))
  1952. return -ENOSPC;
  1953. WARN_ON_ONCE(event->ctx->parent_ctx);
  1954. if (read_format & PERF_FORMAT_GROUP)
  1955. ret = perf_event_read_group(event, read_format, buf);
  1956. else
  1957. ret = perf_event_read_one(event, read_format, buf);
  1958. return ret;
  1959. }
  1960. static ssize_t
  1961. perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
  1962. {
  1963. struct perf_event *event = file->private_data;
  1964. return perf_read_hw(event, buf, count);
  1965. }
  1966. static unsigned int perf_poll(struct file *file, poll_table *wait)
  1967. {
  1968. struct perf_event *event = file->private_data;
  1969. struct perf_buffer *buffer;
  1970. unsigned int events = POLL_HUP;
  1971. rcu_read_lock();
  1972. buffer = rcu_dereference(event->buffer);
  1973. if (buffer)
  1974. events = atomic_xchg(&buffer->poll, 0);
  1975. rcu_read_unlock();
  1976. poll_wait(file, &event->waitq, wait);
  1977. return events;
  1978. }
  1979. static void perf_event_reset(struct perf_event *event)
  1980. {
  1981. (void)perf_event_read(event);
  1982. local64_set(&event->count, 0);
  1983. perf_event_update_userpage(event);
  1984. }
  1985. /*
  1986. * Holding the top-level event's child_mutex means that any
  1987. * descendant process that has inherited this event will block
  1988. * in sync_child_event if it goes to exit, thus satisfying the
  1989. * task existence requirements of perf_event_enable/disable.
  1990. */
  1991. static void perf_event_for_each_child(struct perf_event *event,
  1992. void (*func)(struct perf_event *))
  1993. {
  1994. struct perf_event *child;
  1995. WARN_ON_ONCE(event->ctx->parent_ctx);
  1996. mutex_lock(&event->child_mutex);
  1997. func(event);
  1998. list_for_each_entry(child, &event->child_list, child_list)
  1999. func(child);
  2000. mutex_unlock(&event->child_mutex);
  2001. }
  2002. static void perf_event_for_each(struct perf_event *event,
  2003. void (*func)(struct perf_event *))
  2004. {
  2005. struct perf_event_context *ctx = event->ctx;
  2006. struct perf_event *sibling;
  2007. WARN_ON_ONCE(ctx->parent_ctx);
  2008. mutex_lock(&ctx->mutex);
  2009. event = event->group_leader;
  2010. perf_event_for_each_child(event, func);
  2011. func(event);
  2012. list_for_each_entry(sibling, &event->sibling_list, group_entry)
  2013. perf_event_for_each_child(event, func);
  2014. mutex_unlock(&ctx->mutex);
  2015. }
  2016. static int perf_event_period(struct perf_event *event, u64 __user *arg)
  2017. {
  2018. struct perf_event_context *ctx = event->ctx;
  2019. unsigned long size;
  2020. int ret = 0;
  2021. u64 value;
  2022. if (!event->attr.sample_period)
  2023. return -EINVAL;
  2024. size = copy_from_user(&value, arg, sizeof(value));
  2025. if (size != sizeof(value))
  2026. return -EFAULT;
  2027. if (!value)
  2028. return -EINVAL;
  2029. raw_spin_lock_irq(&ctx->lock);
  2030. if (event->attr.freq) {
  2031. if (value > sysctl_perf_event_sample_rate) {
  2032. ret = -EINVAL;
  2033. goto unlock;
  2034. }
  2035. event->attr.sample_freq = value;
  2036. } else {
  2037. event->attr.sample_period = value;
  2038. event->hw.sample_period = value;
  2039. }
  2040. unlock:
  2041. raw_spin_unlock_irq(&ctx->lock);
  2042. return ret;
  2043. }
  2044. static const struct file_operations perf_fops;
  2045. static struct perf_event *perf_fget_light(int fd, int *fput_needed)
  2046. {
  2047. struct file *file;
  2048. file = fget_light(fd, fput_needed);
  2049. if (!file)
  2050. return ERR_PTR(-EBADF);
  2051. if (file->f_op != &perf_fops) {
  2052. fput_light(file, *fput_needed);
  2053. *fput_needed = 0;
  2054. return ERR_PTR(-EBADF);
  2055. }
  2056. return file->private_data;
  2057. }
  2058. static int perf_event_set_output(struct perf_event *event,
  2059. struct perf_event *output_event);
  2060. static int perf_event_set_filter(struct perf_event *event, void __user *arg);
  2061. static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
  2062. {
  2063. struct perf_event *event = file->private_data;
  2064. void (*func)(struct perf_event *);
  2065. u32 flags = arg;
  2066. switch (cmd) {
  2067. case PERF_EVENT_IOC_ENABLE:
  2068. func = perf_event_enable;
  2069. break;
  2070. case PERF_EVENT_IOC_DISABLE:
  2071. func = perf_event_disable;
  2072. break;
  2073. case PERF_EVENT_IOC_RESET:
  2074. func = perf_event_reset;
  2075. break;
  2076. case PERF_EVENT_IOC_REFRESH:
  2077. return perf_event_refresh(event, arg);
  2078. case PERF_EVENT_IOC_PERIOD:
  2079. return perf_event_period(event, (u64 __user *)arg);
  2080. case PERF_EVENT_IOC_SET_OUTPUT:
  2081. {
  2082. struct perf_event *output_event = NULL;
  2083. int fput_needed = 0;
  2084. int ret;
  2085. if (arg != -1) {
  2086. output_event = perf_fget_light(arg, &fput_needed);
  2087. if (IS_ERR(output_event))
  2088. return PTR_ERR(output_event);
  2089. }
  2090. ret = perf_event_set_output(event, output_event);
  2091. if (output_event)
  2092. fput_light(output_event->filp, fput_needed);
  2093. return ret;
  2094. }
  2095. case PERF_EVENT_IOC_SET_FILTER:
  2096. return perf_event_set_filter(event, (void __user *)arg);
  2097. default:
  2098. return -ENOTTY;
  2099. }
  2100. if (flags & PERF_IOC_FLAG_GROUP)
  2101. perf_event_for_each(event, func);
  2102. else
  2103. perf_event_for_each_child(event, func);
  2104. return 0;
  2105. }
  2106. int perf_event_task_enable(void)
  2107. {
  2108. struct perf_event *event;
  2109. mutex_lock(&current->perf_event_mutex);
  2110. list_for_each_entry(event, &current->perf_event_list, owner_entry)
  2111. perf_event_for_each_child(event, perf_event_enable);
  2112. mutex_unlock(&current->perf_event_mutex);
  2113. return 0;
  2114. }
  2115. int perf_event_task_disable(void)
  2116. {
  2117. struct perf_event *event;
  2118. mutex_lock(&current->perf_event_mutex);
  2119. list_for_each_entry(event, &current->perf_event_list, owner_entry)
  2120. perf_event_for_each_child(event, perf_event_disable);
  2121. mutex_unlock(&current->perf_event_mutex);
  2122. return 0;
  2123. }
  2124. #ifndef PERF_EVENT_INDEX_OFFSET
  2125. # define PERF_EVENT_INDEX_OFFSET 0
  2126. #endif
  2127. static int perf_event_index(struct perf_event *event)
  2128. {
  2129. if (event->hw.state & PERF_HES_STOPPED)
  2130. return 0;
  2131. if (event->state != PERF_EVENT_STATE_ACTIVE)
  2132. return 0;
  2133. return event->hw.idx + 1 - PERF_EVENT_INDEX_OFFSET;
  2134. }
  2135. /*
  2136. * Callers need to ensure there can be no nesting of this function, otherwise
  2137. * the seqlock logic goes bad. We can not serialize this because the arch
  2138. * code calls this from NMI context.
  2139. */
  2140. void perf_event_update_userpage(struct perf_event *event)
  2141. {
  2142. struct perf_event_mmap_page *userpg;
  2143. struct perf_buffer *buffer;
  2144. rcu_read_lock();
  2145. buffer = rcu_dereference(event->buffer);
  2146. if (!buffer)
  2147. goto unlock;
  2148. userpg = buffer->user_page;
  2149. /*
  2150. * Disable preemption so as to not let the corresponding user-space
  2151. * spin too long if we get preempted.
  2152. */
  2153. preempt_disable();
  2154. ++userpg->lock;
  2155. barrier();
  2156. userpg->index = perf_event_index(event);
  2157. userpg->offset = perf_event_count(event);
  2158. if (event->state == PERF_EVENT_STATE_ACTIVE)
  2159. userpg->offset -= local64_read(&event->hw.prev_count);
  2160. userpg->time_enabled = event->total_time_enabled +
  2161. atomic64_read(&event->child_total_time_enabled);
  2162. userpg->time_running = event->total_time_running +
  2163. atomic64_read(&event->child_total_time_running);
  2164. barrier();
  2165. ++userpg->lock;
  2166. preempt_enable();
  2167. unlock:
  2168. rcu_read_unlock();
  2169. }
  2170. static unsigned long perf_data_size(struct perf_buffer *buffer);
  2171. static void
  2172. perf_buffer_init(struct perf_buffer *buffer, long watermark, int flags)
  2173. {
  2174. long max_size = perf_data_size(buffer);
  2175. if (watermark)
  2176. buffer->watermark = min(max_size, watermark);
  2177. if (!buffer->watermark)
  2178. buffer->watermark = max_size / 2;
  2179. if (flags & PERF_BUFFER_WRITABLE)
  2180. buffer->writable = 1;
  2181. atomic_set(&buffer->refcount, 1);
  2182. }
  2183. #ifndef CONFIG_PERF_USE_VMALLOC
  2184. /*
  2185. * Back perf_mmap() with regular GFP_KERNEL-0 pages.
  2186. */
  2187. static struct page *
  2188. perf_mmap_to_page(struct perf_buffer *buffer, unsigned long pgoff)
  2189. {
  2190. if (pgoff > buffer->nr_pages)
  2191. return NULL;
  2192. if (pgoff == 0)
  2193. return virt_to_page(buffer->user_page);
  2194. return virt_to_page(buffer->data_pages[pgoff - 1]);
  2195. }
  2196. static void *perf_mmap_alloc_page(int cpu)
  2197. {
  2198. struct page *page;
  2199. int node;
  2200. node = (cpu == -1) ? cpu : cpu_to_node(cpu);
  2201. page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
  2202. if (!page)
  2203. return NULL;
  2204. return page_address(page);
  2205. }
  2206. static struct perf_buffer *
  2207. perf_buffer_alloc(int nr_pages, long watermark, int cpu, int flags)
  2208. {
  2209. struct perf_buffer *buffer;
  2210. unsigned long size;
  2211. int i;
  2212. size = sizeof(struct perf_buffer);
  2213. size += nr_pages * sizeof(void *);
  2214. buffer = kzalloc(size, GFP_KERNEL);
  2215. if (!buffer)
  2216. goto fail;
  2217. buffer->user_page = perf_mmap_alloc_page(cpu);
  2218. if (!buffer->user_page)
  2219. goto fail_user_page;
  2220. for (i = 0; i < nr_pages; i++) {
  2221. buffer->data_pages[i] = perf_mmap_alloc_page(cpu);
  2222. if (!buffer->data_pages[i])
  2223. goto fail_data_pages;
  2224. }
  2225. buffer->nr_pages = nr_pages;
  2226. perf_buffer_init(buffer, watermark, flags);
  2227. return buffer;
  2228. fail_data_pages:
  2229. for (i--; i >= 0; i--)
  2230. free_page((unsigned long)buffer->data_pages[i]);
  2231. free_page((unsigned long)buffer->user_page);
  2232. fail_user_page:
  2233. kfree(buffer);
  2234. fail:
  2235. return NULL;
  2236. }
  2237. static void perf_mmap_free_page(unsigned long addr)
  2238. {
  2239. struct page *page = virt_to_page((void *)addr);
  2240. page->mapping = NULL;
  2241. __free_page(page);
  2242. }
  2243. static void perf_buffer_free(struct perf_buffer *buffer)
  2244. {
  2245. int i;
  2246. perf_mmap_free_page((unsigned long)buffer->user_page);
  2247. for (i = 0; i < buffer->nr_pages; i++)
  2248. perf_mmap_free_page((unsigned long)buffer->data_pages[i]);
  2249. kfree(buffer);
  2250. }
  2251. static inline int page_order(struct perf_buffer *buffer)
  2252. {
  2253. return 0;
  2254. }
  2255. #else
  2256. /*
  2257. * Back perf_mmap() with vmalloc memory.
  2258. *
  2259. * Required for architectures that have d-cache aliasing issues.
  2260. */
  2261. static inline int page_order(struct perf_buffer *buffer)
  2262. {
  2263. return buffer->page_order;
  2264. }
  2265. static struct page *
  2266. perf_mmap_to_page(struct perf_buffer *buffer, unsigned long pgoff)
  2267. {
  2268. if (pgoff > (1UL << page_order(buffer)))
  2269. return NULL;
  2270. return vmalloc_to_page((void *)buffer->user_page + pgoff * PAGE_SIZE);
  2271. }
  2272. static void perf_mmap_unmark_page(void *addr)
  2273. {
  2274. struct page *page = vmalloc_to_page(addr);
  2275. page->mapping = NULL;
  2276. }
  2277. static void perf_buffer_free_work(struct work_struct *work)
  2278. {
  2279. struct perf_buffer *buffer;
  2280. void *base;
  2281. int i, nr;
  2282. buffer = container_of(work, struct perf_buffer, work);
  2283. nr = 1 << page_order(buffer);
  2284. base = buffer->user_page;
  2285. for (i = 0; i < nr + 1; i++)
  2286. perf_mmap_unmark_page(base + (i * PAGE_SIZE));
  2287. vfree(base);
  2288. kfree(buffer);
  2289. }
  2290. static void perf_buffer_free(struct perf_buffer *buffer)
  2291. {
  2292. schedule_work(&buffer->work);
  2293. }
  2294. static struct perf_buffer *
  2295. perf_buffer_alloc(int nr_pages, long watermark, int cpu, int flags)
  2296. {
  2297. struct perf_buffer *buffer;
  2298. unsigned long size;
  2299. void *all_buf;
  2300. size = sizeof(struct perf_buffer);
  2301. size += sizeof(void *);
  2302. buffer = kzalloc(size, GFP_KERNEL);
  2303. if (!buffer)
  2304. goto fail;
  2305. INIT_WORK(&buffer->work, perf_buffer_free_work);
  2306. all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE);
  2307. if (!all_buf)
  2308. goto fail_all_buf;
  2309. buffer->user_page = all_buf;
  2310. buffer->data_pages[0] = all_buf + PAGE_SIZE;
  2311. buffer->page_order = ilog2(nr_pages);
  2312. buffer->nr_pages = 1;
  2313. perf_buffer_init(buffer, watermark, flags);
  2314. return buffer;
  2315. fail_all_buf:
  2316. kfree(buffer);
  2317. fail:
  2318. return NULL;
  2319. }
  2320. #endif
  2321. static unsigned long perf_data_size(struct perf_buffer *buffer)
  2322. {
  2323. return buffer->nr_pages << (PAGE_SHIFT + page_order(buffer));
  2324. }
  2325. static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  2326. {
  2327. struct perf_event *event = vma->vm_file->private_data;
  2328. struct perf_buffer *buffer;
  2329. int ret = VM_FAULT_SIGBUS;
  2330. if (vmf->flags & FAULT_FLAG_MKWRITE) {
  2331. if (vmf->pgoff == 0)
  2332. ret = 0;
  2333. return ret;
  2334. }
  2335. rcu_read_lock();
  2336. buffer = rcu_dereference(event->buffer);
  2337. if (!buffer)
  2338. goto unlock;
  2339. if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE))
  2340. goto unlock;
  2341. vmf->page = perf_mmap_to_page(buffer, vmf->pgoff);
  2342. if (!vmf->page)
  2343. goto unlock;
  2344. get_page(vmf->page);
  2345. vmf->page->mapping = vma->vm_file->f_mapping;
  2346. vmf->page->index = vmf->pgoff;
  2347. ret = 0;
  2348. unlock:
  2349. rcu_read_unlock();
  2350. return ret;
  2351. }
  2352. static void perf_buffer_free_rcu(struct rcu_head *rcu_head)
  2353. {
  2354. struct perf_buffer *buffer;
  2355. buffer = container_of(rcu_head, struct perf_buffer, rcu_head);
  2356. perf_buffer_free(buffer);
  2357. }
  2358. static struct perf_buffer *perf_buffer_get(struct perf_event *event)
  2359. {
  2360. struct perf_buffer *buffer;
  2361. rcu_read_lock();
  2362. buffer = rcu_dereference(event->buffer);
  2363. if (buffer) {
  2364. if (!atomic_inc_not_zero(&buffer->refcount))
  2365. buffer = NULL;
  2366. }
  2367. rcu_read_unlock();
  2368. return buffer;
  2369. }
  2370. static void perf_buffer_put(struct perf_buffer *buffer)
  2371. {
  2372. if (!atomic_dec_and_test(&buffer->refcount))
  2373. return;
  2374. call_rcu(&buffer->rcu_head, perf_buffer_free_rcu);
  2375. }
  2376. static void perf_mmap_open(struct vm_area_struct *vma)
  2377. {
  2378. struct perf_event *event = vma->vm_file->private_data;
  2379. atomic_inc(&event->mmap_count);
  2380. }
  2381. static void perf_mmap_close(struct vm_area_struct *vma)
  2382. {
  2383. struct perf_event *event = vma->vm_file->private_data;
  2384. if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) {
  2385. unsigned long size = perf_data_size(event->buffer);
  2386. struct user_struct *user = event->mmap_user;
  2387. struct perf_buffer *buffer = event->buffer;
  2388. atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm);
  2389. vma->vm_mm->locked_vm -= event->mmap_locked;
  2390. rcu_assign_pointer(event->buffer, NULL);
  2391. mutex_unlock(&event->mmap_mutex);
  2392. perf_buffer_put(buffer);
  2393. free_uid(user);
  2394. }
  2395. }
  2396. static const struct vm_operations_struct perf_mmap_vmops = {
  2397. .open = perf_mmap_open,
  2398. .close = perf_mmap_close,
  2399. .fault = perf_mmap_fault,
  2400. .page_mkwrite = perf_mmap_fault,
  2401. };
  2402. static int perf_mmap(struct file *file, struct vm_area_struct *vma)
  2403. {
  2404. struct perf_event *event = file->private_data;
  2405. unsigned long user_locked, user_lock_limit;
  2406. struct user_struct *user = current_user();
  2407. unsigned long locked, lock_limit;
  2408. struct perf_buffer *buffer;
  2409. unsigned long vma_size;
  2410. unsigned long nr_pages;
  2411. long user_extra, extra;
  2412. int ret = 0, flags = 0;
  2413. /*
  2414. * Don't allow mmap() of inherited per-task counters. This would
  2415. * create a performance issue due to all children writing to the
  2416. * same buffer.
  2417. */
  2418. if (event->cpu == -1 && event->attr.inherit)
  2419. return -EINVAL;
  2420. if (!(vma->vm_flags & VM_SHARED))
  2421. return -EINVAL;
  2422. vma_size = vma->vm_end - vma->vm_start;
  2423. nr_pages = (vma_size / PAGE_SIZE) - 1;
  2424. /*
  2425. * If we have buffer pages ensure they're a power-of-two number, so we
  2426. * can do bitmasks instead of modulo.
  2427. */
  2428. if (nr_pages != 0 && !is_power_of_2(nr_pages))
  2429. return -EINVAL;
  2430. if (vma_size != PAGE_SIZE * (1 + nr_pages))
  2431. return -EINVAL;
  2432. if (vma->vm_pgoff != 0)
  2433. return -EINVAL;
  2434. WARN_ON_ONCE(event->ctx->parent_ctx);
  2435. mutex_lock(&event->mmap_mutex);
  2436. if (event->buffer) {
  2437. if (event->buffer->nr_pages == nr_pages)
  2438. atomic_inc(&event->buffer->refcount);
  2439. else
  2440. ret = -EINVAL;
  2441. goto unlock;
  2442. }
  2443. user_extra = nr_pages + 1;
  2444. user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10);
  2445. /*
  2446. * Increase the limit linearly with more CPUs:
  2447. */
  2448. user_lock_limit *= num_online_cpus();
  2449. user_locked = atomic_long_read(&user->locked_vm) + user_extra;
  2450. extra = 0;
  2451. if (user_locked > user_lock_limit)
  2452. extra = user_locked - user_lock_limit;
  2453. lock_limit = rlimit(RLIMIT_MEMLOCK);
  2454. lock_limit >>= PAGE_SHIFT;
  2455. locked = vma->vm_mm->locked_vm + extra;
  2456. if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() &&
  2457. !capable(CAP_IPC_LOCK)) {
  2458. ret = -EPERM;
  2459. goto unlock;
  2460. }
  2461. WARN_ON(event->buffer);
  2462. if (vma->vm_flags & VM_WRITE)
  2463. flags |= PERF_BUFFER_WRITABLE;
  2464. buffer = perf_buffer_alloc(nr_pages, event->attr.wakeup_watermark,
  2465. event->cpu, flags);
  2466. if (!buffer) {
  2467. ret = -ENOMEM;
  2468. goto unlock;
  2469. }
  2470. rcu_assign_pointer(event->buffer, buffer);
  2471. atomic_long_add(user_extra, &user->locked_vm);
  2472. event->mmap_locked = extra;
  2473. event->mmap_user = get_current_user();
  2474. vma->vm_mm->locked_vm += event->mmap_locked;
  2475. unlock:
  2476. if (!ret)
  2477. atomic_inc(&event->mmap_count);
  2478. mutex_unlock(&event->mmap_mutex);
  2479. vma->vm_flags |= VM_RESERVED;
  2480. vma->vm_ops = &perf_mmap_vmops;
  2481. return ret;
  2482. }
  2483. static int perf_fasync(int fd, struct file *filp, int on)
  2484. {
  2485. struct inode *inode = filp->f_path.dentry->d_inode;
  2486. struct perf_event *event = filp->private_data;
  2487. int retval;
  2488. mutex_lock(&inode->i_mutex);
  2489. retval = fasync_helper(fd, filp, on, &event->fasync);
  2490. mutex_unlock(&inode->i_mutex);
  2491. if (retval < 0)
  2492. return retval;
  2493. return 0;
  2494. }
  2495. static const struct file_operations perf_fops = {
  2496. .llseek = no_llseek,
  2497. .release = perf_release,
  2498. .read = perf_read,
  2499. .poll = perf_poll,
  2500. .unlocked_ioctl = perf_ioctl,
  2501. .compat_ioctl = perf_ioctl,
  2502. .mmap = perf_mmap,
  2503. .fasync = perf_fasync,
  2504. };
  2505. /*
  2506. * Perf event wakeup
  2507. *
  2508. * If there's data, ensure we set the poll() state and publish everything
  2509. * to user-space before waking everybody up.
  2510. */
  2511. void perf_event_wakeup(struct perf_event *event)
  2512. {
  2513. wake_up_all(&event->waitq);
  2514. if (event->pending_kill) {
  2515. kill_fasync(&event->fasync, SIGIO, event->pending_kill);
  2516. event->pending_kill = 0;
  2517. }
  2518. }
  2519. /*
  2520. * Pending wakeups
  2521. *
  2522. * Handle the case where we need to wakeup up from NMI (or rq->lock) context.
  2523. *
  2524. * The NMI bit means we cannot possibly take locks. Therefore, maintain a
  2525. * single linked list and use cmpxchg() to add entries lockless.
  2526. */
  2527. static void perf_pending_event(struct perf_pending_entry *entry)
  2528. {
  2529. struct perf_event *event = container_of(entry,
  2530. struct perf_event, pending);
  2531. if (event->pending_disable) {
  2532. event->pending_disable = 0;
  2533. __perf_event_disable(event);
  2534. }
  2535. if (event->pending_wakeup) {
  2536. event->pending_wakeup = 0;
  2537. perf_event_wakeup(event);
  2538. }
  2539. }
  2540. #define PENDING_TAIL ((struct perf_pending_entry *)-1UL)
  2541. static DEFINE_PER_CPU(struct perf_pending_entry *, perf_pending_head) = {
  2542. PENDING_TAIL,
  2543. };
  2544. static void perf_pending_queue(struct perf_pending_entry *entry,
  2545. void (*func)(struct perf_pending_entry *))
  2546. {
  2547. struct perf_pending_entry **head;
  2548. if (cmpxchg(&entry->next, NULL, PENDING_TAIL) != NULL)
  2549. return;
  2550. entry->func = func;
  2551. head = &get_cpu_var(perf_pending_head);
  2552. do {
  2553. entry->next = *head;
  2554. } while (cmpxchg(head, entry->next, entry) != entry->next);
  2555. set_perf_event_pending();
  2556. put_cpu_var(perf_pending_head);
  2557. }
  2558. static int __perf_pending_run(void)
  2559. {
  2560. struct perf_pending_entry *list;
  2561. int nr = 0;
  2562. list = xchg(&__get_cpu_var(perf_pending_head), PENDING_TAIL);
  2563. while (list != PENDING_TAIL) {
  2564. void (*func)(struct perf_pending_entry *);
  2565. struct perf_pending_entry *entry = list;
  2566. list = list->next;
  2567. func = entry->func;
  2568. entry->next = NULL;
  2569. /*
  2570. * Ensure we observe the unqueue before we issue the wakeup,
  2571. * so that we won't be waiting forever.
  2572. * -- see perf_not_pending().
  2573. */
  2574. smp_wmb();
  2575. func(entry);
  2576. nr++;
  2577. }
  2578. return nr;
  2579. }
  2580. static inline int perf_not_pending(struct perf_event *event)
  2581. {
  2582. /*
  2583. * If we flush on whatever cpu we run, there is a chance we don't
  2584. * need to wait.
  2585. */
  2586. get_cpu();
  2587. __perf_pending_run();
  2588. put_cpu();
  2589. /*
  2590. * Ensure we see the proper queue state before going to sleep
  2591. * so that we do not miss the wakeup. -- see perf_pending_handle()
  2592. */
  2593. smp_rmb();
  2594. return event->pending.next == NULL;
  2595. }
  2596. static void perf_pending_sync(struct perf_event *event)
  2597. {
  2598. wait_event(event->waitq, perf_not_pending(event));
  2599. }
  2600. void perf_event_do_pending(void)
  2601. {
  2602. __perf_pending_run();
  2603. }
  2604. /*
  2605. * We assume there is only KVM supporting the callbacks.
  2606. * Later on, we might change it to a list if there is
  2607. * another virtualization implementation supporting the callbacks.
  2608. */
  2609. struct perf_guest_info_callbacks *perf_guest_cbs;
  2610. int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
  2611. {
  2612. perf_guest_cbs = cbs;
  2613. return 0;
  2614. }
  2615. EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks);
  2616. int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
  2617. {
  2618. perf_guest_cbs = NULL;
  2619. return 0;
  2620. }
  2621. EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks);
  2622. /*
  2623. * Output
  2624. */
  2625. static bool perf_output_space(struct perf_buffer *buffer, unsigned long tail,
  2626. unsigned long offset, unsigned long head)
  2627. {
  2628. unsigned long mask;
  2629. if (!buffer->writable)
  2630. return true;
  2631. mask = perf_data_size(buffer) - 1;
  2632. offset = (offset - tail) & mask;
  2633. head = (head - tail) & mask;
  2634. if ((int)(head - offset) < 0)
  2635. return false;
  2636. return true;
  2637. }
  2638. static void perf_output_wakeup(struct perf_output_handle *handle)
  2639. {
  2640. atomic_set(&handle->buffer->poll, POLL_IN);
  2641. if (handle->nmi) {
  2642. handle->event->pending_wakeup = 1;
  2643. perf_pending_queue(&handle->event->pending,
  2644. perf_pending_event);
  2645. } else
  2646. perf_event_wakeup(handle->event);
  2647. }
  2648. /*
  2649. * We need to ensure a later event_id doesn't publish a head when a former
  2650. * event isn't done writing. However since we need to deal with NMIs we
  2651. * cannot fully serialize things.
  2652. *
  2653. * We only publish the head (and generate a wakeup) when the outer-most
  2654. * event completes.
  2655. */
  2656. static void perf_output_get_handle(struct perf_output_handle *handle)
  2657. {
  2658. struct perf_buffer *buffer = handle->buffer;
  2659. preempt_disable();
  2660. local_inc(&buffer->nest);
  2661. handle->wakeup = local_read(&buffer->wakeup);
  2662. }
  2663. static void perf_output_put_handle(struct perf_output_handle *handle)
  2664. {
  2665. struct perf_buffer *buffer = handle->buffer;
  2666. unsigned long head;
  2667. again:
  2668. head = local_read(&buffer->head);
  2669. /*
  2670. * IRQ/NMI can happen here, which means we can miss a head update.
  2671. */
  2672. if (!local_dec_and_test(&buffer->nest))
  2673. goto out;
  2674. /*
  2675. * Publish the known good head. Rely on the full barrier implied
  2676. * by atomic_dec_and_test() order the buffer->head read and this
  2677. * write.
  2678. */
  2679. buffer->user_page->data_head = head;
  2680. /*
  2681. * Now check if we missed an update, rely on the (compiler)
  2682. * barrier in atomic_dec_and_test() to re-read buffer->head.
  2683. */
  2684. if (unlikely(head != local_read(&buffer->head))) {
  2685. local_inc(&buffer->nest);
  2686. goto again;
  2687. }
  2688. if (handle->wakeup != local_read(&buffer->wakeup))
  2689. perf_output_wakeup(handle);
  2690. out:
  2691. preempt_enable();
  2692. }
  2693. __always_inline void perf_output_copy(struct perf_output_handle *handle,
  2694. const void *buf, unsigned int len)
  2695. {
  2696. do {
  2697. unsigned long size = min_t(unsigned long, handle->size, len);
  2698. memcpy(handle->addr, buf, size);
  2699. len -= size;
  2700. handle->addr += size;
  2701. buf += size;
  2702. handle->size -= size;
  2703. if (!handle->size) {
  2704. struct perf_buffer *buffer = handle->buffer;
  2705. handle->page++;
  2706. handle->page &= buffer->nr_pages - 1;
  2707. handle->addr = buffer->data_pages[handle->page];
  2708. handle->size = PAGE_SIZE << page_order(buffer);
  2709. }
  2710. } while (len);
  2711. }
  2712. int perf_output_begin(struct perf_output_handle *handle,
  2713. struct perf_event *event, unsigned int size,
  2714. int nmi, int sample)
  2715. {
  2716. struct perf_buffer *buffer;
  2717. unsigned long tail, offset, head;
  2718. int have_lost;
  2719. struct {
  2720. struct perf_event_header header;
  2721. u64 id;
  2722. u64 lost;
  2723. } lost_event;
  2724. rcu_read_lock();
  2725. /*
  2726. * For inherited events we send all the output towards the parent.
  2727. */
  2728. if (event->parent)
  2729. event = event->parent;
  2730. buffer = rcu_dereference(event->buffer);
  2731. if (!buffer)
  2732. goto out;
  2733. handle->buffer = buffer;
  2734. handle->event = event;
  2735. handle->nmi = nmi;
  2736. handle->sample = sample;
  2737. if (!buffer->nr_pages)
  2738. goto out;
  2739. have_lost = local_read(&buffer->lost);
  2740. if (have_lost)
  2741. size += sizeof(lost_event);
  2742. perf_output_get_handle(handle);
  2743. do {
  2744. /*
  2745. * Userspace could choose to issue a mb() before updating the
  2746. * tail pointer. So that all reads will be completed before the
  2747. * write is issued.
  2748. */
  2749. tail = ACCESS_ONCE(buffer->user_page->data_tail);
  2750. smp_rmb();
  2751. offset = head = local_read(&buffer->head);
  2752. head += size;
  2753. if (unlikely(!perf_output_space(buffer, tail, offset, head)))
  2754. goto fail;
  2755. } while (local_cmpxchg(&buffer->head, offset, head) != offset);
  2756. if (head - local_read(&buffer->wakeup) > buffer->watermark)
  2757. local_add(buffer->watermark, &buffer->wakeup);
  2758. handle->page = offset >> (PAGE_SHIFT + page_order(buffer));
  2759. handle->page &= buffer->nr_pages - 1;
  2760. handle->size = offset & ((PAGE_SIZE << page_order(buffer)) - 1);
  2761. handle->addr = buffer->data_pages[handle->page];
  2762. handle->addr += handle->size;
  2763. handle->size = (PAGE_SIZE << page_order(buffer)) - handle->size;
  2764. if (have_lost) {
  2765. lost_event.header.type = PERF_RECORD_LOST;
  2766. lost_event.header.misc = 0;
  2767. lost_event.header.size = sizeof(lost_event);
  2768. lost_event.id = event->id;
  2769. lost_event.lost = local_xchg(&buffer->lost, 0);
  2770. perf_output_put(handle, lost_event);
  2771. }
  2772. return 0;
  2773. fail:
  2774. local_inc(&buffer->lost);
  2775. perf_output_put_handle(handle);
  2776. out:
  2777. rcu_read_unlock();
  2778. return -ENOSPC;
  2779. }
  2780. void perf_output_end(struct perf_output_handle *handle)
  2781. {
  2782. struct perf_event *event = handle->event;
  2783. struct perf_buffer *buffer = handle->buffer;
  2784. int wakeup_events = event->attr.wakeup_events;
  2785. if (handle->sample && wakeup_events) {
  2786. int events = local_inc_return(&buffer->events);
  2787. if (events >= wakeup_events) {
  2788. local_sub(wakeup_events, &buffer->events);
  2789. local_inc(&buffer->wakeup);
  2790. }
  2791. }
  2792. perf_output_put_handle(handle);
  2793. rcu_read_unlock();
  2794. }
  2795. static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
  2796. {
  2797. /*
  2798. * only top level events have the pid namespace they were created in
  2799. */
  2800. if (event->parent)
  2801. event = event->parent;
  2802. return task_tgid_nr_ns(p, event->ns);
  2803. }
  2804. static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
  2805. {
  2806. /*
  2807. * only top level events have the pid namespace they were created in
  2808. */
  2809. if (event->parent)
  2810. event = event->parent;
  2811. return task_pid_nr_ns(p, event->ns);
  2812. }
  2813. static void perf_output_read_one(struct perf_output_handle *handle,
  2814. struct perf_event *event)
  2815. {
  2816. u64 read_format = event->attr.read_format;
  2817. u64 values[4];
  2818. int n = 0;
  2819. values[n++] = perf_event_count(event);
  2820. if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
  2821. values[n++] = event->total_time_enabled +
  2822. atomic64_read(&event->child_total_time_enabled);
  2823. }
  2824. if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
  2825. values[n++] = event->total_time_running +
  2826. atomic64_read(&event->child_total_time_running);
  2827. }
  2828. if (read_format & PERF_FORMAT_ID)
  2829. values[n++] = primary_event_id(event);
  2830. perf_output_copy(handle, values, n * sizeof(u64));
  2831. }
  2832. /*
  2833. * XXX PERF_FORMAT_GROUP vs inherited events seems difficult.
  2834. */
  2835. static void perf_output_read_group(struct perf_output_handle *handle,
  2836. struct perf_event *event)
  2837. {
  2838. struct perf_event *leader = event->group_leader, *sub;
  2839. u64 read_format = event->attr.read_format;
  2840. u64 values[5];
  2841. int n = 0;
  2842. values[n++] = 1 + leader->nr_siblings;
  2843. if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
  2844. values[n++] = leader->total_time_enabled;
  2845. if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
  2846. values[n++] = leader->total_time_running;
  2847. if (leader != event)
  2848. leader->pmu->read(leader);
  2849. values[n++] = perf_event_count(leader);
  2850. if (read_format & PERF_FORMAT_ID)
  2851. values[n++] = primary_event_id(leader);
  2852. perf_output_copy(handle, values, n * sizeof(u64));
  2853. list_for_each_entry(sub, &leader->sibling_list, group_entry) {
  2854. n = 0;
  2855. if (sub != event)
  2856. sub->pmu->read(sub);
  2857. values[n++] = perf_event_count(sub);
  2858. if (read_format & PERF_FORMAT_ID)
  2859. values[n++] = primary_event_id(sub);
  2860. perf_output_copy(handle, values, n * sizeof(u64));
  2861. }
  2862. }
  2863. static void perf_output_read(struct perf_output_handle *handle,
  2864. struct perf_event *event)
  2865. {
  2866. if (event->attr.read_format & PERF_FORMAT_GROUP)
  2867. perf_output_read_group(handle, event);
  2868. else
  2869. perf_output_read_one(handle, event);
  2870. }
  2871. void perf_output_sample(struct perf_output_handle *handle,
  2872. struct perf_event_header *header,
  2873. struct perf_sample_data *data,
  2874. struct perf_event *event)
  2875. {
  2876. u64 sample_type = data->type;
  2877. perf_output_put(handle, *header);
  2878. if (sample_type & PERF_SAMPLE_IP)
  2879. perf_output_put(handle, data->ip);
  2880. if (sample_type & PERF_SAMPLE_TID)
  2881. perf_output_put(handle, data->tid_entry);
  2882. if (sample_type & PERF_SAMPLE_TIME)
  2883. perf_output_put(handle, data->time);
  2884. if (sample_type & PERF_SAMPLE_ADDR)
  2885. perf_output_put(handle, data->addr);
  2886. if (sample_type & PERF_SAMPLE_ID)
  2887. perf_output_put(handle, data->id);
  2888. if (sample_type & PERF_SAMPLE_STREAM_ID)
  2889. perf_output_put(handle, data->stream_id);
  2890. if (sample_type & PERF_SAMPLE_CPU)
  2891. perf_output_put(handle, data->cpu_entry);
  2892. if (sample_type & PERF_SAMPLE_PERIOD)
  2893. perf_output_put(handle, data->period);
  2894. if (sample_type & PERF_SAMPLE_READ)
  2895. perf_output_read(handle, event);
  2896. if (sample_type & PERF_SAMPLE_CALLCHAIN) {
  2897. if (data->callchain) {
  2898. int size = 1;
  2899. if (data->callchain)
  2900. size += data->callchain->nr;
  2901. size *= sizeof(u64);
  2902. perf_output_copy(handle, data->callchain, size);
  2903. } else {
  2904. u64 nr = 0;
  2905. perf_output_put(handle, nr);
  2906. }
  2907. }
  2908. if (sample_type & PERF_SAMPLE_RAW) {
  2909. if (data->raw) {
  2910. perf_output_put(handle, data->raw->size);
  2911. perf_output_copy(handle, data->raw->data,
  2912. data->raw->size);
  2913. } else {
  2914. struct {
  2915. u32 size;
  2916. u32 data;
  2917. } raw = {
  2918. .size = sizeof(u32),
  2919. .data = 0,
  2920. };
  2921. perf_output_put(handle, raw);
  2922. }
  2923. }
  2924. }
  2925. void perf_prepare_sample(struct perf_event_header *header,
  2926. struct perf_sample_data *data,
  2927. struct perf_event *event,
  2928. struct pt_regs *regs)
  2929. {
  2930. u64 sample_type = event->attr.sample_type;
  2931. data->type = sample_type;
  2932. header->type = PERF_RECORD_SAMPLE;
  2933. header->size = sizeof(*header);
  2934. header->misc = 0;
  2935. header->misc |= perf_misc_flags(regs);
  2936. if (sample_type & PERF_SAMPLE_IP) {
  2937. data->ip = perf_instruction_pointer(regs);
  2938. header->size += sizeof(data->ip);
  2939. }
  2940. if (sample_type & PERF_SAMPLE_TID) {
  2941. /* namespace issues */
  2942. data->tid_entry.pid = perf_event_pid(event, current);
  2943. data->tid_entry.tid = perf_event_tid(event, current);
  2944. header->size += sizeof(data->tid_entry);
  2945. }
  2946. if (sample_type & PERF_SAMPLE_TIME) {
  2947. data->time = perf_clock();
  2948. header->size += sizeof(data->time);
  2949. }
  2950. if (sample_type & PERF_SAMPLE_ADDR)
  2951. header->size += sizeof(data->addr);
  2952. if (sample_type & PERF_SAMPLE_ID) {
  2953. data->id = primary_event_id(event);
  2954. header->size += sizeof(data->id);
  2955. }
  2956. if (sample_type & PERF_SAMPLE_STREAM_ID) {
  2957. data->stream_id = event->id;
  2958. header->size += sizeof(data->stream_id);
  2959. }
  2960. if (sample_type & PERF_SAMPLE_CPU) {
  2961. data->cpu_entry.cpu = raw_smp_processor_id();
  2962. data->cpu_entry.reserved = 0;
  2963. header->size += sizeof(data->cpu_entry);
  2964. }
  2965. if (sample_type & PERF_SAMPLE_PERIOD)
  2966. header->size += sizeof(data->period);
  2967. if (sample_type & PERF_SAMPLE_READ)
  2968. header->size += perf_event_read_size(event);
  2969. if (sample_type & PERF_SAMPLE_CALLCHAIN) {
  2970. int size = 1;
  2971. data->callchain = perf_callchain(regs);
  2972. if (data->callchain)
  2973. size += data->callchain->nr;
  2974. header->size += size * sizeof(u64);
  2975. }
  2976. if (sample_type & PERF_SAMPLE_RAW) {
  2977. int size = sizeof(u32);
  2978. if (data->raw)
  2979. size += data->raw->size;
  2980. else
  2981. size += sizeof(u32);
  2982. WARN_ON_ONCE(size & (sizeof(u64)-1));
  2983. header->size += size;
  2984. }
  2985. }
  2986. static void perf_event_output(struct perf_event *event, int nmi,
  2987. struct perf_sample_data *data,
  2988. struct pt_regs *regs)
  2989. {
  2990. struct perf_output_handle handle;
  2991. struct perf_event_header header;
  2992. /* protect the callchain buffers */
  2993. rcu_read_lock();
  2994. perf_prepare_sample(&header, data, event, regs);
  2995. if (perf_output_begin(&handle, event, header.size, nmi, 1))
  2996. goto exit;
  2997. perf_output_sample(&handle, &header, data, event);
  2998. perf_output_end(&handle);
  2999. exit:
  3000. rcu_read_unlock();
  3001. }
  3002. /*
  3003. * read event_id
  3004. */
  3005. struct perf_read_event {
  3006. struct perf_event_header header;
  3007. u32 pid;
  3008. u32 tid;
  3009. };
  3010. static void
  3011. perf_event_read_event(struct perf_event *event,
  3012. struct task_struct *task)
  3013. {
  3014. struct perf_output_handle handle;
  3015. struct perf_read_event read_event = {
  3016. .header = {
  3017. .type = PERF_RECORD_READ,
  3018. .misc = 0,
  3019. .size = sizeof(read_event) + perf_event_read_size(event),
  3020. },
  3021. .pid = perf_event_pid(event, task),
  3022. .tid = perf_event_tid(event, task),
  3023. };
  3024. int ret;
  3025. ret = perf_output_begin(&handle, event, read_event.header.size, 0, 0);
  3026. if (ret)
  3027. return;
  3028. perf_output_put(&handle, read_event);
  3029. perf_output_read(&handle, event);
  3030. perf_output_end(&handle);
  3031. }
  3032. /*
  3033. * task tracking -- fork/exit
  3034. *
  3035. * enabled by: attr.comm | attr.mmap | attr.mmap_data | attr.task
  3036. */
  3037. struct perf_task_event {
  3038. struct task_struct *task;
  3039. struct perf_event_context *task_ctx;
  3040. struct {
  3041. struct perf_event_header header;
  3042. u32 pid;
  3043. u32 ppid;
  3044. u32 tid;
  3045. u32 ptid;
  3046. u64 time;
  3047. } event_id;
  3048. };
  3049. static void perf_event_task_output(struct perf_event *event,
  3050. struct perf_task_event *task_event)
  3051. {
  3052. struct perf_output_handle handle;
  3053. struct task_struct *task = task_event->task;
  3054. int size, ret;
  3055. size = task_event->event_id.header.size;
  3056. ret = perf_output_begin(&handle, event, size, 0, 0);
  3057. if (ret)
  3058. return;
  3059. task_event->event_id.pid = perf_event_pid(event, task);
  3060. task_event->event_id.ppid = perf_event_pid(event, current);
  3061. task_event->event_id.tid = perf_event_tid(event, task);
  3062. task_event->event_id.ptid = perf_event_tid(event, current);
  3063. perf_output_put(&handle, task_event->event_id);
  3064. perf_output_end(&handle);
  3065. }
  3066. static int perf_event_task_match(struct perf_event *event)
  3067. {
  3068. if (event->state < PERF_EVENT_STATE_INACTIVE)
  3069. return 0;
  3070. if (event->cpu != -1 && event->cpu != smp_processor_id())
  3071. return 0;
  3072. if (event->attr.comm || event->attr.mmap ||
  3073. event->attr.mmap_data || event->attr.task)
  3074. return 1;
  3075. return 0;
  3076. }
  3077. static void perf_event_task_ctx(struct perf_event_context *ctx,
  3078. struct perf_task_event *task_event)
  3079. {
  3080. struct perf_event *event;
  3081. list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
  3082. if (perf_event_task_match(event))
  3083. perf_event_task_output(event, task_event);
  3084. }
  3085. }
  3086. static void perf_event_task_event(struct perf_task_event *task_event)
  3087. {
  3088. struct perf_cpu_context *cpuctx;
  3089. struct perf_event_context *ctx;
  3090. struct pmu *pmu;
  3091. int ctxn;
  3092. rcu_read_lock();
  3093. list_for_each_entry_rcu(pmu, &pmus, entry) {
  3094. cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
  3095. perf_event_task_ctx(&cpuctx->ctx, task_event);
  3096. ctx = task_event->task_ctx;
  3097. if (!ctx) {
  3098. ctxn = pmu->task_ctx_nr;
  3099. if (ctxn < 0)
  3100. continue;
  3101. ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
  3102. }
  3103. if (ctx)
  3104. perf_event_task_ctx(ctx, task_event);
  3105. }
  3106. rcu_read_unlock();
  3107. }
  3108. static void perf_event_task(struct task_struct *task,
  3109. struct perf_event_context *task_ctx,
  3110. int new)
  3111. {
  3112. struct perf_task_event task_event;
  3113. if (!atomic_read(&nr_comm_events) &&
  3114. !atomic_read(&nr_mmap_events) &&
  3115. !atomic_read(&nr_task_events))
  3116. return;
  3117. task_event = (struct perf_task_event){
  3118. .task = task,
  3119. .task_ctx = task_ctx,
  3120. .event_id = {
  3121. .header = {
  3122. .type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT,
  3123. .misc = 0,
  3124. .size = sizeof(task_event.event_id),
  3125. },
  3126. /* .pid */
  3127. /* .ppid */
  3128. /* .tid */
  3129. /* .ptid */
  3130. .time = perf_clock(),
  3131. },
  3132. };
  3133. perf_event_task_event(&task_event);
  3134. }
  3135. void perf_event_fork(struct task_struct *task)
  3136. {
  3137. perf_event_task(task, NULL, 1);
  3138. }
  3139. /*
  3140. * comm tracking
  3141. */
  3142. struct perf_comm_event {
  3143. struct task_struct *task;
  3144. char *comm;
  3145. int comm_size;
  3146. struct {
  3147. struct perf_event_header header;
  3148. u32 pid;
  3149. u32 tid;
  3150. } event_id;
  3151. };
  3152. static void perf_event_comm_output(struct perf_event *event,
  3153. struct perf_comm_event *comm_event)
  3154. {
  3155. struct perf_output_handle handle;
  3156. int size = comm_event->event_id.header.size;
  3157. int ret = perf_output_begin(&handle, event, size, 0, 0);
  3158. if (ret)
  3159. return;
  3160. comm_event->event_id.pid = perf_event_pid(event, comm_event->task);
  3161. comm_event->event_id.tid = perf_event_tid(event, comm_event->task);
  3162. perf_output_put(&handle, comm_event->event_id);
  3163. perf_output_copy(&handle, comm_event->comm,
  3164. comm_event->comm_size);
  3165. perf_output_end(&handle);
  3166. }
  3167. static int perf_event_comm_match(struct perf_event *event)
  3168. {
  3169. if (event->state < PERF_EVENT_STATE_INACTIVE)
  3170. return 0;
  3171. if (event->cpu != -1 && event->cpu != smp_processor_id())
  3172. return 0;
  3173. if (event->attr.comm)
  3174. return 1;
  3175. return 0;
  3176. }
  3177. static void perf_event_comm_ctx(struct perf_event_context *ctx,
  3178. struct perf_comm_event *comm_event)
  3179. {
  3180. struct perf_event *event;
  3181. list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
  3182. if (perf_event_comm_match(event))
  3183. perf_event_comm_output(event, comm_event);
  3184. }
  3185. }
  3186. static void perf_event_comm_event(struct perf_comm_event *comm_event)
  3187. {
  3188. struct perf_cpu_context *cpuctx;
  3189. struct perf_event_context *ctx;
  3190. char comm[TASK_COMM_LEN];
  3191. unsigned int size;
  3192. struct pmu *pmu;
  3193. int ctxn;
  3194. memset(comm, 0, sizeof(comm));
  3195. strlcpy(comm, comm_event->task->comm, sizeof(comm));
  3196. size = ALIGN(strlen(comm)+1, sizeof(u64));
  3197. comm_event->comm = comm;
  3198. comm_event->comm_size = size;
  3199. comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
  3200. rcu_read_lock();
  3201. list_for_each_entry_rcu(pmu, &pmus, entry) {
  3202. cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
  3203. perf_event_comm_ctx(&cpuctx->ctx, comm_event);
  3204. ctxn = pmu->task_ctx_nr;
  3205. if (ctxn < 0)
  3206. continue;
  3207. ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
  3208. if (ctx)
  3209. perf_event_comm_ctx(ctx, comm_event);
  3210. }
  3211. rcu_read_unlock();
  3212. }
  3213. void perf_event_comm(struct task_struct *task)
  3214. {
  3215. struct perf_comm_event comm_event;
  3216. struct perf_event_context *ctx;
  3217. int ctxn;
  3218. for_each_task_context_nr(ctxn) {
  3219. ctx = task->perf_event_ctxp[ctxn];
  3220. if (!ctx)
  3221. continue;
  3222. perf_event_enable_on_exec(ctx);
  3223. }
  3224. if (!atomic_read(&nr_comm_events))
  3225. return;
  3226. comm_event = (struct perf_comm_event){
  3227. .task = task,
  3228. /* .comm */
  3229. /* .comm_size */
  3230. .event_id = {
  3231. .header = {
  3232. .type = PERF_RECORD_COMM,
  3233. .misc = 0,
  3234. /* .size */
  3235. },
  3236. /* .pid */
  3237. /* .tid */
  3238. },
  3239. };
  3240. perf_event_comm_event(&comm_event);
  3241. }
  3242. /*
  3243. * mmap tracking
  3244. */
  3245. struct perf_mmap_event {
  3246. struct vm_area_struct *vma;
  3247. const char *file_name;
  3248. int file_size;
  3249. struct {
  3250. struct perf_event_header header;
  3251. u32 pid;
  3252. u32 tid;
  3253. u64 start;
  3254. u64 len;
  3255. u64 pgoff;
  3256. } event_id;
  3257. };
  3258. static void perf_event_mmap_output(struct perf_event *event,
  3259. struct perf_mmap_event *mmap_event)
  3260. {
  3261. struct perf_output_handle handle;
  3262. int size = mmap_event->event_id.header.size;
  3263. int ret = perf_output_begin(&handle, event, size, 0, 0);
  3264. if (ret)
  3265. return;
  3266. mmap_event->event_id.pid = perf_event_pid(event, current);
  3267. mmap_event->event_id.tid = perf_event_tid(event, current);
  3268. perf_output_put(&handle, mmap_event->event_id);
  3269. perf_output_copy(&handle, mmap_event->file_name,
  3270. mmap_event->file_size);
  3271. perf_output_end(&handle);
  3272. }
  3273. static int perf_event_mmap_match(struct perf_event *event,
  3274. struct perf_mmap_event *mmap_event,
  3275. int executable)
  3276. {
  3277. if (event->state < PERF_EVENT_STATE_INACTIVE)
  3278. return 0;
  3279. if (event->cpu != -1 && event->cpu != smp_processor_id())
  3280. return 0;
  3281. if ((!executable && event->attr.mmap_data) ||
  3282. (executable && event->attr.mmap))
  3283. return 1;
  3284. return 0;
  3285. }
  3286. static void perf_event_mmap_ctx(struct perf_event_context *ctx,
  3287. struct perf_mmap_event *mmap_event,
  3288. int executable)
  3289. {
  3290. struct perf_event *event;
  3291. list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
  3292. if (perf_event_mmap_match(event, mmap_event, executable))
  3293. perf_event_mmap_output(event, mmap_event);
  3294. }
  3295. }
  3296. static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
  3297. {
  3298. struct perf_cpu_context *cpuctx;
  3299. struct perf_event_context *ctx;
  3300. struct vm_area_struct *vma = mmap_event->vma;
  3301. struct file *file = vma->vm_file;
  3302. unsigned int size;
  3303. char tmp[16];
  3304. char *buf = NULL;
  3305. const char *name;
  3306. struct pmu *pmu;
  3307. int ctxn;
  3308. memset(tmp, 0, sizeof(tmp));
  3309. if (file) {
  3310. /*
  3311. * d_path works from the end of the buffer backwards, so we
  3312. * need to add enough zero bytes after the string to handle
  3313. * the 64bit alignment we do later.
  3314. */
  3315. buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
  3316. if (!buf) {
  3317. name = strncpy(tmp, "//enomem", sizeof(tmp));
  3318. goto got_name;
  3319. }
  3320. name = d_path(&file->f_path, buf, PATH_MAX);
  3321. if (IS_ERR(name)) {
  3322. name = strncpy(tmp, "//toolong", sizeof(tmp));
  3323. goto got_name;
  3324. }
  3325. } else {
  3326. if (arch_vma_name(mmap_event->vma)) {
  3327. name = strncpy(tmp, arch_vma_name(mmap_event->vma),
  3328. sizeof(tmp));
  3329. goto got_name;
  3330. }
  3331. if (!vma->vm_mm) {
  3332. name = strncpy(tmp, "[vdso]", sizeof(tmp));
  3333. goto got_name;
  3334. } else if (vma->vm_start <= vma->vm_mm->start_brk &&
  3335. vma->vm_end >= vma->vm_mm->brk) {
  3336. name = strncpy(tmp, "[heap]", sizeof(tmp));
  3337. goto got_name;
  3338. } else if (vma->vm_start <= vma->vm_mm->start_stack &&
  3339. vma->vm_end >= vma->vm_mm->start_stack) {
  3340. name = strncpy(tmp, "[stack]", sizeof(tmp));
  3341. goto got_name;
  3342. }
  3343. name = strncpy(tmp, "//anon", sizeof(tmp));
  3344. goto got_name;
  3345. }
  3346. got_name:
  3347. size = ALIGN(strlen(name)+1, sizeof(u64));
  3348. mmap_event->file_name = name;
  3349. mmap_event->file_size = size;
  3350. mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
  3351. rcu_read_lock();
  3352. list_for_each_entry_rcu(pmu, &pmus, entry) {
  3353. cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
  3354. perf_event_mmap_ctx(&cpuctx->ctx, mmap_event,
  3355. vma->vm_flags & VM_EXEC);
  3356. ctxn = pmu->task_ctx_nr;
  3357. if (ctxn < 0)
  3358. continue;
  3359. ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
  3360. if (ctx) {
  3361. perf_event_mmap_ctx(ctx, mmap_event,
  3362. vma->vm_flags & VM_EXEC);
  3363. }
  3364. }
  3365. rcu_read_unlock();
  3366. kfree(buf);
  3367. }
  3368. void perf_event_mmap(struct vm_area_struct *vma)
  3369. {
  3370. struct perf_mmap_event mmap_event;
  3371. if (!atomic_read(&nr_mmap_events))
  3372. return;
  3373. mmap_event = (struct perf_mmap_event){
  3374. .vma = vma,
  3375. /* .file_name */
  3376. /* .file_size */
  3377. .event_id = {
  3378. .header = {
  3379. .type = PERF_RECORD_MMAP,
  3380. .misc = PERF_RECORD_MISC_USER,
  3381. /* .size */
  3382. },
  3383. /* .pid */
  3384. /* .tid */
  3385. .start = vma->vm_start,
  3386. .len = vma->vm_end - vma->vm_start,
  3387. .pgoff = (u64)vma->vm_pgoff << PAGE_SHIFT,
  3388. },
  3389. };
  3390. perf_event_mmap_event(&mmap_event);
  3391. }
  3392. /*
  3393. * IRQ throttle logging
  3394. */
  3395. static void perf_log_throttle(struct perf_event *event, int enable)
  3396. {
  3397. struct perf_output_handle handle;
  3398. int ret;
  3399. struct {
  3400. struct perf_event_header header;
  3401. u64 time;
  3402. u64 id;
  3403. u64 stream_id;
  3404. } throttle_event = {
  3405. .header = {
  3406. .type = PERF_RECORD_THROTTLE,
  3407. .misc = 0,
  3408. .size = sizeof(throttle_event),
  3409. },
  3410. .time = perf_clock(),
  3411. .id = primary_event_id(event),
  3412. .stream_id = event->id,
  3413. };
  3414. if (enable)
  3415. throttle_event.header.type = PERF_RECORD_UNTHROTTLE;
  3416. ret = perf_output_begin(&handle, event, sizeof(throttle_event), 1, 0);
  3417. if (ret)
  3418. return;
  3419. perf_output_put(&handle, throttle_event);
  3420. perf_output_end(&handle);
  3421. }
  3422. /*
  3423. * Generic event overflow handling, sampling.
  3424. */
  3425. static int __perf_event_overflow(struct perf_event *event, int nmi,
  3426. int throttle, struct perf_sample_data *data,
  3427. struct pt_regs *regs)
  3428. {
  3429. int events = atomic_read(&event->event_limit);
  3430. struct hw_perf_event *hwc = &event->hw;
  3431. int ret = 0;
  3432. if (!throttle) {
  3433. hwc->interrupts++;
  3434. } else {
  3435. if (hwc->interrupts != MAX_INTERRUPTS) {
  3436. hwc->interrupts++;
  3437. if (HZ * hwc->interrupts >
  3438. (u64)sysctl_perf_event_sample_rate) {
  3439. hwc->interrupts = MAX_INTERRUPTS;
  3440. perf_log_throttle(event, 0);
  3441. ret = 1;
  3442. }
  3443. } else {
  3444. /*
  3445. * Keep re-disabling events even though on the previous
  3446. * pass we disabled it - just in case we raced with a
  3447. * sched-in and the event got enabled again:
  3448. */
  3449. ret = 1;
  3450. }
  3451. }
  3452. if (event->attr.freq) {
  3453. u64 now = perf_clock();
  3454. s64 delta = now - hwc->freq_time_stamp;
  3455. hwc->freq_time_stamp = now;
  3456. if (delta > 0 && delta < 2*TICK_NSEC)
  3457. perf_adjust_period(event, delta, hwc->last_period);
  3458. }
  3459. /*
  3460. * XXX event_limit might not quite work as expected on inherited
  3461. * events
  3462. */
  3463. event->pending_kill = POLL_IN;
  3464. if (events && atomic_dec_and_test(&event->event_limit)) {
  3465. ret = 1;
  3466. event->pending_kill = POLL_HUP;
  3467. if (nmi) {
  3468. event->pending_disable = 1;
  3469. perf_pending_queue(&event->pending,
  3470. perf_pending_event);
  3471. } else
  3472. perf_event_disable(event);
  3473. }
  3474. if (event->overflow_handler)
  3475. event->overflow_handler(event, nmi, data, regs);
  3476. else
  3477. perf_event_output(event, nmi, data, regs);
  3478. return ret;
  3479. }
  3480. int perf_event_overflow(struct perf_event *event, int nmi,
  3481. struct perf_sample_data *data,
  3482. struct pt_regs *regs)
  3483. {
  3484. return __perf_event_overflow(event, nmi, 1, data, regs);
  3485. }
  3486. /*
  3487. * Generic software event infrastructure
  3488. */
  3489. struct swevent_htable {
  3490. struct swevent_hlist *swevent_hlist;
  3491. struct mutex hlist_mutex;
  3492. int hlist_refcount;
  3493. /* Recursion avoidance in each contexts */
  3494. int recursion[PERF_NR_CONTEXTS];
  3495. };
  3496. static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);
  3497. /*
  3498. * We directly increment event->count and keep a second value in
  3499. * event->hw.period_left to count intervals. This period event
  3500. * is kept in the range [-sample_period, 0] so that we can use the
  3501. * sign as trigger.
  3502. */
  3503. static u64 perf_swevent_set_period(struct perf_event *event)
  3504. {
  3505. struct hw_perf_event *hwc = &event->hw;
  3506. u64 period = hwc->last_period;
  3507. u64 nr, offset;
  3508. s64 old, val;
  3509. hwc->last_period = hwc->sample_period;
  3510. again:
  3511. old = val = local64_read(&hwc->period_left);
  3512. if (val < 0)
  3513. return 0;
  3514. nr = div64_u64(period + val, period);
  3515. offset = nr * period;
  3516. val -= offset;
  3517. if (local64_cmpxchg(&hwc->period_left, old, val) != old)
  3518. goto again;
  3519. return nr;
  3520. }
  3521. static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
  3522. int nmi, struct perf_sample_data *data,
  3523. struct pt_regs *regs)
  3524. {
  3525. struct hw_perf_event *hwc = &event->hw;
  3526. int throttle = 0;
  3527. data->period = event->hw.last_period;
  3528. if (!overflow)
  3529. overflow = perf_swevent_set_period(event);
  3530. if (hwc->interrupts == MAX_INTERRUPTS)
  3531. return;
  3532. for (; overflow; overflow--) {
  3533. if (__perf_event_overflow(event, nmi, throttle,
  3534. data, regs)) {
  3535. /*
  3536. * We inhibit the overflow from happening when
  3537. * hwc->interrupts == MAX_INTERRUPTS.
  3538. */
  3539. break;
  3540. }
  3541. throttle = 1;
  3542. }
  3543. }
  3544. static void perf_swevent_event(struct perf_event *event, u64 nr,
  3545. int nmi, struct perf_sample_data *data,
  3546. struct pt_regs *regs)
  3547. {
  3548. struct hw_perf_event *hwc = &event->hw;
  3549. local64_add(nr, &event->count);
  3550. if (!regs)
  3551. return;
  3552. if (!hwc->sample_period)
  3553. return;
  3554. if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
  3555. return perf_swevent_overflow(event, 1, nmi, data, regs);
  3556. if (local64_add_negative(nr, &hwc->period_left))
  3557. return;
  3558. perf_swevent_overflow(event, 0, nmi, data, regs);
  3559. }
  3560. static int perf_exclude_event(struct perf_event *event,
  3561. struct pt_regs *regs)
  3562. {
  3563. if (event->hw.state & PERF_HES_STOPPED)
  3564. return 0;
  3565. if (regs) {
  3566. if (event->attr.exclude_user && user_mode(regs))
  3567. return 1;
  3568. if (event->attr.exclude_kernel && !user_mode(regs))
  3569. return 1;
  3570. }
  3571. return 0;
  3572. }
  3573. static int perf_swevent_match(struct perf_event *event,
  3574. enum perf_type_id type,
  3575. u32 event_id,
  3576. struct perf_sample_data *data,
  3577. struct pt_regs *regs)
  3578. {
  3579. if (event->attr.type != type)
  3580. return 0;
  3581. if (event->attr.config != event_id)
  3582. return 0;
  3583. if (perf_exclude_event(event, regs))
  3584. return 0;
  3585. return 1;
  3586. }
  3587. static inline u64 swevent_hash(u64 type, u32 event_id)
  3588. {
  3589. u64 val = event_id | (type << 32);
  3590. return hash_64(val, SWEVENT_HLIST_BITS);
  3591. }
  3592. static inline struct hlist_head *
  3593. __find_swevent_head(struct swevent_hlist *hlist, u64 type, u32 event_id)
  3594. {
  3595. u64 hash = swevent_hash(type, event_id);
  3596. return &hlist->heads[hash];
  3597. }
  3598. /* For the read side: events when they trigger */
  3599. static inline struct hlist_head *
  3600. find_swevent_head_rcu(struct swevent_htable *swhash, u64 type, u32 event_id)
  3601. {
  3602. struct swevent_hlist *hlist;
  3603. hlist = rcu_dereference(swhash->swevent_hlist);
  3604. if (!hlist)
  3605. return NULL;
  3606. return __find_swevent_head(hlist, type, event_id);
  3607. }
  3608. /* For the event head insertion and removal in the hlist */
  3609. static inline struct hlist_head *
  3610. find_swevent_head(struct swevent_htable *swhash, struct perf_event *event)
  3611. {
  3612. struct swevent_hlist *hlist;
  3613. u32 event_id = event->attr.config;
  3614. u64 type = event->attr.type;
  3615. /*
  3616. * Event scheduling is always serialized against hlist allocation
  3617. * and release. Which makes the protected version suitable here.
  3618. * The context lock guarantees that.
  3619. */
  3620. hlist = rcu_dereference_protected(swhash->swevent_hlist,
  3621. lockdep_is_held(&event->ctx->lock));
  3622. if (!hlist)
  3623. return NULL;
  3624. return __find_swevent_head(hlist, type, event_id);
  3625. }
  3626. static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
  3627. u64 nr, int nmi,
  3628. struct perf_sample_data *data,
  3629. struct pt_regs *regs)
  3630. {
  3631. struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
  3632. struct perf_event *event;
  3633. struct hlist_node *node;
  3634. struct hlist_head *head;
  3635. rcu_read_lock();
  3636. head = find_swevent_head_rcu(swhash, type, event_id);
  3637. if (!head)
  3638. goto end;
  3639. hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
  3640. if (perf_swevent_match(event, type, event_id, data, regs))
  3641. perf_swevent_event(event, nr, nmi, data, regs);
  3642. }
  3643. end:
  3644. rcu_read_unlock();
  3645. }
  3646. int perf_swevent_get_recursion_context(void)
  3647. {
  3648. struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
  3649. return get_recursion_context(swhash->recursion);
  3650. }
  3651. EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
  3652. void inline perf_swevent_put_recursion_context(int rctx)
  3653. {
  3654. struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
  3655. put_recursion_context(swhash->recursion, rctx);
  3656. }
  3657. void __perf_sw_event(u32 event_id, u64 nr, int nmi,
  3658. struct pt_regs *regs, u64 addr)
  3659. {
  3660. struct perf_sample_data data;
  3661. int rctx;
  3662. preempt_disable_notrace();
  3663. rctx = perf_swevent_get_recursion_context();
  3664. if (rctx < 0)
  3665. return;
  3666. perf_sample_data_init(&data, addr);
  3667. do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, nmi, &data, regs);
  3668. perf_swevent_put_recursion_context(rctx);
  3669. preempt_enable_notrace();
  3670. }
  3671. static void perf_swevent_read(struct perf_event *event)
  3672. {
  3673. }
  3674. static int perf_swevent_add(struct perf_event *event, int flags)
  3675. {
  3676. struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
  3677. struct hw_perf_event *hwc = &event->hw;
  3678. struct hlist_head *head;
  3679. if (hwc->sample_period) {
  3680. hwc->last_period = hwc->sample_period;
  3681. perf_swevent_set_period(event);
  3682. }
  3683. hwc->state = !(flags & PERF_EF_START);
  3684. head = find_swevent_head(swhash, event);
  3685. if (WARN_ON_ONCE(!head))
  3686. return -EINVAL;
  3687. hlist_add_head_rcu(&event->hlist_entry, head);
  3688. return 0;
  3689. }
  3690. static void perf_swevent_del(struct perf_event *event, int flags)
  3691. {
  3692. hlist_del_rcu(&event->hlist_entry);
  3693. }
  3694. static void perf_swevent_start(struct perf_event *event, int flags)
  3695. {
  3696. event->hw.state = 0;
  3697. }
  3698. static void perf_swevent_stop(struct perf_event *event, int flags)
  3699. {
  3700. event->hw.state = PERF_HES_STOPPED;
  3701. }
  3702. /* Deref the hlist from the update side */
  3703. static inline struct swevent_hlist *
  3704. swevent_hlist_deref(struct swevent_htable *swhash)
  3705. {
  3706. return rcu_dereference_protected(swhash->swevent_hlist,
  3707. lockdep_is_held(&swhash->hlist_mutex));
  3708. }
  3709. static void swevent_hlist_release_rcu(struct rcu_head *rcu_head)
  3710. {
  3711. struct swevent_hlist *hlist;
  3712. hlist = container_of(rcu_head, struct swevent_hlist, rcu_head);
  3713. kfree(hlist);
  3714. }
  3715. static void swevent_hlist_release(struct swevent_htable *swhash)
  3716. {
  3717. struct swevent_hlist *hlist = swevent_hlist_deref(swhash);
  3718. if (!hlist)
  3719. return;
  3720. rcu_assign_pointer(swhash->swevent_hlist, NULL);
  3721. call_rcu(&hlist->rcu_head, swevent_hlist_release_rcu);
  3722. }
  3723. static void swevent_hlist_put_cpu(struct perf_event *event, int cpu)
  3724. {
  3725. struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
  3726. mutex_lock(&swhash->hlist_mutex);
  3727. if (!--swhash->hlist_refcount)
  3728. swevent_hlist_release(swhash);
  3729. mutex_unlock(&swhash->hlist_mutex);
  3730. }
  3731. static void swevent_hlist_put(struct perf_event *event)
  3732. {
  3733. int cpu;
  3734. if (event->cpu != -1) {
  3735. swevent_hlist_put_cpu(event, event->cpu);
  3736. return;
  3737. }
  3738. for_each_possible_cpu(cpu)
  3739. swevent_hlist_put_cpu(event, cpu);
  3740. }
  3741. static int swevent_hlist_get_cpu(struct perf_event *event, int cpu)
  3742. {
  3743. struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
  3744. int err = 0;
  3745. mutex_lock(&swhash->hlist_mutex);
  3746. if (!swevent_hlist_deref(swhash) && cpu_online(cpu)) {
  3747. struct swevent_hlist *hlist;
  3748. hlist = kzalloc(sizeof(*hlist), GFP_KERNEL);
  3749. if (!hlist) {
  3750. err = -ENOMEM;
  3751. goto exit;
  3752. }
  3753. rcu_assign_pointer(swhash->swevent_hlist, hlist);
  3754. }
  3755. swhash->hlist_refcount++;
  3756. exit:
  3757. mutex_unlock(&swhash->hlist_mutex);
  3758. return err;
  3759. }
  3760. static int swevent_hlist_get(struct perf_event *event)
  3761. {
  3762. int err;
  3763. int cpu, failed_cpu;
  3764. if (event->cpu != -1)
  3765. return swevent_hlist_get_cpu(event, event->cpu);
  3766. get_online_cpus();
  3767. for_each_possible_cpu(cpu) {
  3768. err = swevent_hlist_get_cpu(event, cpu);
  3769. if (err) {
  3770. failed_cpu = cpu;
  3771. goto fail;
  3772. }
  3773. }
  3774. put_online_cpus();
  3775. return 0;
  3776. fail:
  3777. for_each_possible_cpu(cpu) {
  3778. if (cpu == failed_cpu)
  3779. break;
  3780. swevent_hlist_put_cpu(event, cpu);
  3781. }
  3782. put_online_cpus();
  3783. return err;
  3784. }
  3785. atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX];
  3786. static void sw_perf_event_destroy(struct perf_event *event)
  3787. {
  3788. u64 event_id = event->attr.config;
  3789. WARN_ON(event->parent);
  3790. atomic_dec(&perf_swevent_enabled[event_id]);
  3791. swevent_hlist_put(event);
  3792. }
  3793. static int perf_swevent_init(struct perf_event *event)
  3794. {
  3795. int event_id = event->attr.config;
  3796. if (event->attr.type != PERF_TYPE_SOFTWARE)
  3797. return -ENOENT;
  3798. switch (event_id) {
  3799. case PERF_COUNT_SW_CPU_CLOCK:
  3800. case PERF_COUNT_SW_TASK_CLOCK:
  3801. return -ENOENT;
  3802. default:
  3803. break;
  3804. }
  3805. if (event_id > PERF_COUNT_SW_MAX)
  3806. return -ENOENT;
  3807. if (!event->parent) {
  3808. int err;
  3809. err = swevent_hlist_get(event);
  3810. if (err)
  3811. return err;
  3812. atomic_inc(&perf_swevent_enabled[event_id]);
  3813. event->destroy = sw_perf_event_destroy;
  3814. }
  3815. return 0;
  3816. }
  3817. static struct pmu perf_swevent = {
  3818. .task_ctx_nr = perf_sw_context,
  3819. .event_init = perf_swevent_init,
  3820. .add = perf_swevent_add,
  3821. .del = perf_swevent_del,
  3822. .start = perf_swevent_start,
  3823. .stop = perf_swevent_stop,
  3824. .read = perf_swevent_read,
  3825. };
  3826. #ifdef CONFIG_EVENT_TRACING
  3827. static int perf_tp_filter_match(struct perf_event *event,
  3828. struct perf_sample_data *data)
  3829. {
  3830. void *record = data->raw->data;
  3831. if (likely(!event->filter) || filter_match_preds(event->filter, record))
  3832. return 1;
  3833. return 0;
  3834. }
  3835. static int perf_tp_event_match(struct perf_event *event,
  3836. struct perf_sample_data *data,
  3837. struct pt_regs *regs)
  3838. {
  3839. /*
  3840. * All tracepoints are from kernel-space.
  3841. */
  3842. if (event->attr.exclude_kernel)
  3843. return 0;
  3844. if (!perf_tp_filter_match(event, data))
  3845. return 0;
  3846. return 1;
  3847. }
  3848. void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
  3849. struct pt_regs *regs, struct hlist_head *head, int rctx)
  3850. {
  3851. struct perf_sample_data data;
  3852. struct perf_event *event;
  3853. struct hlist_node *node;
  3854. struct perf_raw_record raw = {
  3855. .size = entry_size,
  3856. .data = record,
  3857. };
  3858. perf_sample_data_init(&data, addr);
  3859. data.raw = &raw;
  3860. hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
  3861. if (perf_tp_event_match(event, &data, regs))
  3862. perf_swevent_event(event, count, 1, &data, regs);
  3863. }
  3864. perf_swevent_put_recursion_context(rctx);
  3865. }
  3866. EXPORT_SYMBOL_GPL(perf_tp_event);
  3867. static void tp_perf_event_destroy(struct perf_event *event)
  3868. {
  3869. perf_trace_destroy(event);
  3870. }
  3871. static int perf_tp_event_init(struct perf_event *event)
  3872. {
  3873. int err;
  3874. if (event->attr.type != PERF_TYPE_TRACEPOINT)
  3875. return -ENOENT;
  3876. /*
  3877. * Raw tracepoint data is a severe data leak, only allow root to
  3878. * have these.
  3879. */
  3880. if ((event->attr.sample_type & PERF_SAMPLE_RAW) &&
  3881. perf_paranoid_tracepoint_raw() &&
  3882. !capable(CAP_SYS_ADMIN))
  3883. return -EPERM;
  3884. err = perf_trace_init(event);
  3885. if (err)
  3886. return err;
  3887. event->destroy = tp_perf_event_destroy;
  3888. return 0;
  3889. }
  3890. static struct pmu perf_tracepoint = {
  3891. .task_ctx_nr = perf_sw_context,
  3892. .event_init = perf_tp_event_init,
  3893. .add = perf_trace_add,
  3894. .del = perf_trace_del,
  3895. .start = perf_swevent_start,
  3896. .stop = perf_swevent_stop,
  3897. .read = perf_swevent_read,
  3898. };
  3899. static inline void perf_tp_register(void)
  3900. {
  3901. perf_pmu_register(&perf_tracepoint);
  3902. }
  3903. static int perf_event_set_filter(struct perf_event *event, void __user *arg)
  3904. {
  3905. char *filter_str;
  3906. int ret;
  3907. if (event->attr.type != PERF_TYPE_TRACEPOINT)
  3908. return -EINVAL;
  3909. filter_str = strndup_user(arg, PAGE_SIZE);
  3910. if (IS_ERR(filter_str))
  3911. return PTR_ERR(filter_str);
  3912. ret = ftrace_profile_set_filter(event, event->attr.config, filter_str);
  3913. kfree(filter_str);
  3914. return ret;
  3915. }
  3916. static void perf_event_free_filter(struct perf_event *event)
  3917. {
  3918. ftrace_profile_free_filter(event);
  3919. }
  3920. #else
  3921. static inline void perf_tp_register(void)
  3922. {
  3923. }
  3924. static int perf_event_set_filter(struct perf_event *event, void __user *arg)
  3925. {
  3926. return -ENOENT;
  3927. }
  3928. static void perf_event_free_filter(struct perf_event *event)
  3929. {
  3930. }
  3931. #endif /* CONFIG_EVENT_TRACING */
  3932. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  3933. void perf_bp_event(struct perf_event *bp, void *data)
  3934. {
  3935. struct perf_sample_data sample;
  3936. struct pt_regs *regs = data;
  3937. perf_sample_data_init(&sample, bp->attr.bp_addr);
  3938. if (!bp->hw.state && !perf_exclude_event(bp, regs))
  3939. perf_swevent_event(bp, 1, 1, &sample, regs);
  3940. }
  3941. #endif
  3942. /*
  3943. * hrtimer based swevent callback
  3944. */
  3945. static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
  3946. {
  3947. enum hrtimer_restart ret = HRTIMER_RESTART;
  3948. struct perf_sample_data data;
  3949. struct pt_regs *regs;
  3950. struct perf_event *event;
  3951. u64 period;
  3952. event = container_of(hrtimer, struct perf_event, hw.hrtimer);
  3953. event->pmu->read(event);
  3954. perf_sample_data_init(&data, 0);
  3955. data.period = event->hw.last_period;
  3956. regs = get_irq_regs();
  3957. if (regs && !perf_exclude_event(event, regs)) {
  3958. if (!(event->attr.exclude_idle && current->pid == 0))
  3959. if (perf_event_overflow(event, 0, &data, regs))
  3960. ret = HRTIMER_NORESTART;
  3961. }
  3962. period = max_t(u64, 10000, event->hw.sample_period);
  3963. hrtimer_forward_now(hrtimer, ns_to_ktime(period));
  3964. return ret;
  3965. }
  3966. static void perf_swevent_start_hrtimer(struct perf_event *event)
  3967. {
  3968. struct hw_perf_event *hwc = &event->hw;
  3969. hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  3970. hwc->hrtimer.function = perf_swevent_hrtimer;
  3971. if (hwc->sample_period) {
  3972. s64 period = local64_read(&hwc->period_left);
  3973. if (period) {
  3974. if (period < 0)
  3975. period = 10000;
  3976. local64_set(&hwc->period_left, 0);
  3977. } else {
  3978. period = max_t(u64, 10000, hwc->sample_period);
  3979. }
  3980. __hrtimer_start_range_ns(&hwc->hrtimer,
  3981. ns_to_ktime(period), 0,
  3982. HRTIMER_MODE_REL_PINNED, 0);
  3983. }
  3984. }
  3985. static void perf_swevent_cancel_hrtimer(struct perf_event *event)
  3986. {
  3987. struct hw_perf_event *hwc = &event->hw;
  3988. if (hwc->sample_period) {
  3989. ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer);
  3990. local64_set(&hwc->period_left, ktime_to_ns(remaining));
  3991. hrtimer_cancel(&hwc->hrtimer);
  3992. }
  3993. }
  3994. /*
  3995. * Software event: cpu wall time clock
  3996. */
  3997. static void cpu_clock_event_update(struct perf_event *event)
  3998. {
  3999. s64 prev;
  4000. u64 now;
  4001. now = local_clock();
  4002. prev = local64_xchg(&event->hw.prev_count, now);
  4003. local64_add(now - prev, &event->count);
  4004. }
  4005. static void cpu_clock_event_start(struct perf_event *event, int flags)
  4006. {
  4007. local64_set(&event->hw.prev_count, local_clock());
  4008. perf_swevent_start_hrtimer(event);
  4009. }
  4010. static void cpu_clock_event_stop(struct perf_event *event, int flags)
  4011. {
  4012. perf_swevent_cancel_hrtimer(event);
  4013. cpu_clock_event_update(event);
  4014. }
  4015. static int cpu_clock_event_add(struct perf_event *event, int flags)
  4016. {
  4017. if (flags & PERF_EF_START)
  4018. cpu_clock_event_start(event, flags);
  4019. return 0;
  4020. }
  4021. static void cpu_clock_event_del(struct perf_event *event, int flags)
  4022. {
  4023. cpu_clock_event_stop(event, flags);
  4024. }
  4025. static void cpu_clock_event_read(struct perf_event *event)
  4026. {
  4027. cpu_clock_event_update(event);
  4028. }
  4029. static int cpu_clock_event_init(struct perf_event *event)
  4030. {
  4031. if (event->attr.type != PERF_TYPE_SOFTWARE)
  4032. return -ENOENT;
  4033. if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK)
  4034. return -ENOENT;
  4035. return 0;
  4036. }
  4037. static struct pmu perf_cpu_clock = {
  4038. .task_ctx_nr = perf_sw_context,
  4039. .event_init = cpu_clock_event_init,
  4040. .add = cpu_clock_event_add,
  4041. .del = cpu_clock_event_del,
  4042. .start = cpu_clock_event_start,
  4043. .stop = cpu_clock_event_stop,
  4044. .read = cpu_clock_event_read,
  4045. };
  4046. /*
  4047. * Software event: task time clock
  4048. */
  4049. static void task_clock_event_update(struct perf_event *event, u64 now)
  4050. {
  4051. u64 prev;
  4052. s64 delta;
  4053. prev = local64_xchg(&event->hw.prev_count, now);
  4054. delta = now - prev;
  4055. local64_add(delta, &event->count);
  4056. }
  4057. static void task_clock_event_start(struct perf_event *event, int flags)
  4058. {
  4059. local64_set(&event->hw.prev_count, event->ctx->time);
  4060. perf_swevent_start_hrtimer(event);
  4061. }
  4062. static void task_clock_event_stop(struct perf_event *event, int flags)
  4063. {
  4064. perf_swevent_cancel_hrtimer(event);
  4065. task_clock_event_update(event, event->ctx->time);
  4066. }
  4067. static int task_clock_event_add(struct perf_event *event, int flags)
  4068. {
  4069. if (flags & PERF_EF_START)
  4070. task_clock_event_start(event, flags);
  4071. return 0;
  4072. }
  4073. static void task_clock_event_del(struct perf_event *event, int flags)
  4074. {
  4075. task_clock_event_stop(event, PERF_EF_UPDATE);
  4076. }
  4077. static void task_clock_event_read(struct perf_event *event)
  4078. {
  4079. u64 time;
  4080. if (!in_nmi()) {
  4081. update_context_time(event->ctx);
  4082. time = event->ctx->time;
  4083. } else {
  4084. u64 now = perf_clock();
  4085. u64 delta = now - event->ctx->timestamp;
  4086. time = event->ctx->time + delta;
  4087. }
  4088. task_clock_event_update(event, time);
  4089. }
  4090. static int task_clock_event_init(struct perf_event *event)
  4091. {
  4092. if (event->attr.type != PERF_TYPE_SOFTWARE)
  4093. return -ENOENT;
  4094. if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK)
  4095. return -ENOENT;
  4096. return 0;
  4097. }
  4098. static struct pmu perf_task_clock = {
  4099. .task_ctx_nr = perf_sw_context,
  4100. .event_init = task_clock_event_init,
  4101. .add = task_clock_event_add,
  4102. .del = task_clock_event_del,
  4103. .start = task_clock_event_start,
  4104. .stop = task_clock_event_stop,
  4105. .read = task_clock_event_read,
  4106. };
  4107. static void perf_pmu_nop_void(struct pmu *pmu)
  4108. {
  4109. }
  4110. static int perf_pmu_nop_int(struct pmu *pmu)
  4111. {
  4112. return 0;
  4113. }
  4114. static void perf_pmu_start_txn(struct pmu *pmu)
  4115. {
  4116. perf_pmu_disable(pmu);
  4117. }
  4118. static int perf_pmu_commit_txn(struct pmu *pmu)
  4119. {
  4120. perf_pmu_enable(pmu);
  4121. return 0;
  4122. }
  4123. static void perf_pmu_cancel_txn(struct pmu *pmu)
  4124. {
  4125. perf_pmu_enable(pmu);
  4126. }
  4127. /*
  4128. * Ensures all contexts with the same task_ctx_nr have the same
  4129. * pmu_cpu_context too.
  4130. */
  4131. static void *find_pmu_context(int ctxn)
  4132. {
  4133. struct pmu *pmu;
  4134. if (ctxn < 0)
  4135. return NULL;
  4136. list_for_each_entry(pmu, &pmus, entry) {
  4137. if (pmu->task_ctx_nr == ctxn)
  4138. return pmu->pmu_cpu_context;
  4139. }
  4140. return NULL;
  4141. }
  4142. static void free_pmu_context(void * __percpu cpu_context)
  4143. {
  4144. struct pmu *pmu;
  4145. mutex_lock(&pmus_lock);
  4146. /*
  4147. * Like a real lame refcount.
  4148. */
  4149. list_for_each_entry(pmu, &pmus, entry) {
  4150. if (pmu->pmu_cpu_context == cpu_context)
  4151. goto out;
  4152. }
  4153. free_percpu(cpu_context);
  4154. out:
  4155. mutex_unlock(&pmus_lock);
  4156. }
  4157. int perf_pmu_register(struct pmu *pmu)
  4158. {
  4159. int cpu, ret;
  4160. mutex_lock(&pmus_lock);
  4161. ret = -ENOMEM;
  4162. pmu->pmu_disable_count = alloc_percpu(int);
  4163. if (!pmu->pmu_disable_count)
  4164. goto unlock;
  4165. pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr);
  4166. if (pmu->pmu_cpu_context)
  4167. goto got_cpu_context;
  4168. pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context);
  4169. if (!pmu->pmu_cpu_context)
  4170. goto free_pdc;
  4171. for_each_possible_cpu(cpu) {
  4172. struct perf_cpu_context *cpuctx;
  4173. cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
  4174. __perf_event_init_context(&cpuctx->ctx);
  4175. cpuctx->ctx.pmu = pmu;
  4176. cpuctx->timer_interval = TICK_NSEC;
  4177. hrtimer_init(&cpuctx->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  4178. cpuctx->timer.function = perf_event_context_tick;
  4179. }
  4180. got_cpu_context:
  4181. if (!pmu->start_txn) {
  4182. if (pmu->pmu_enable) {
  4183. /*
  4184. * If we have pmu_enable/pmu_disable calls, install
  4185. * transaction stubs that use that to try and batch
  4186. * hardware accesses.
  4187. */
  4188. pmu->start_txn = perf_pmu_start_txn;
  4189. pmu->commit_txn = perf_pmu_commit_txn;
  4190. pmu->cancel_txn = perf_pmu_cancel_txn;
  4191. } else {
  4192. pmu->start_txn = perf_pmu_nop_void;
  4193. pmu->commit_txn = perf_pmu_nop_int;
  4194. pmu->cancel_txn = perf_pmu_nop_void;
  4195. }
  4196. }
  4197. if (!pmu->pmu_enable) {
  4198. pmu->pmu_enable = perf_pmu_nop_void;
  4199. pmu->pmu_disable = perf_pmu_nop_void;
  4200. }
  4201. list_add_rcu(&pmu->entry, &pmus);
  4202. ret = 0;
  4203. unlock:
  4204. mutex_unlock(&pmus_lock);
  4205. return ret;
  4206. free_pdc:
  4207. free_percpu(pmu->pmu_disable_count);
  4208. goto unlock;
  4209. }
  4210. void perf_pmu_unregister(struct pmu *pmu)
  4211. {
  4212. mutex_lock(&pmus_lock);
  4213. list_del_rcu(&pmu->entry);
  4214. mutex_unlock(&pmus_lock);
  4215. /*
  4216. * We dereference the pmu list under both SRCU and regular RCU, so
  4217. * synchronize against both of those.
  4218. */
  4219. synchronize_srcu(&pmus_srcu);
  4220. synchronize_rcu();
  4221. free_percpu(pmu->pmu_disable_count);
  4222. free_pmu_context(pmu->pmu_cpu_context);
  4223. }
  4224. struct pmu *perf_init_event(struct perf_event *event)
  4225. {
  4226. struct pmu *pmu = NULL;
  4227. int idx;
  4228. idx = srcu_read_lock(&pmus_srcu);
  4229. list_for_each_entry_rcu(pmu, &pmus, entry) {
  4230. int ret = pmu->event_init(event);
  4231. if (!ret)
  4232. goto unlock;
  4233. if (ret != -ENOENT) {
  4234. pmu = ERR_PTR(ret);
  4235. goto unlock;
  4236. }
  4237. }
  4238. pmu = ERR_PTR(-ENOENT);
  4239. unlock:
  4240. srcu_read_unlock(&pmus_srcu, idx);
  4241. return pmu;
  4242. }
  4243. /*
  4244. * Allocate and initialize a event structure
  4245. */
  4246. static struct perf_event *
  4247. perf_event_alloc(struct perf_event_attr *attr, int cpu,
  4248. struct perf_event *group_leader,
  4249. struct perf_event *parent_event,
  4250. perf_overflow_handler_t overflow_handler)
  4251. {
  4252. struct pmu *pmu;
  4253. struct perf_event *event;
  4254. struct hw_perf_event *hwc;
  4255. long err;
  4256. event = kzalloc(sizeof(*event), GFP_KERNEL);
  4257. if (!event)
  4258. return ERR_PTR(-ENOMEM);
  4259. /*
  4260. * Single events are their own group leaders, with an
  4261. * empty sibling list:
  4262. */
  4263. if (!group_leader)
  4264. group_leader = event;
  4265. mutex_init(&event->child_mutex);
  4266. INIT_LIST_HEAD(&event->child_list);
  4267. INIT_LIST_HEAD(&event->group_entry);
  4268. INIT_LIST_HEAD(&event->event_entry);
  4269. INIT_LIST_HEAD(&event->sibling_list);
  4270. init_waitqueue_head(&event->waitq);
  4271. mutex_init(&event->mmap_mutex);
  4272. event->cpu = cpu;
  4273. event->attr = *attr;
  4274. event->group_leader = group_leader;
  4275. event->pmu = NULL;
  4276. event->oncpu = -1;
  4277. event->parent = parent_event;
  4278. event->ns = get_pid_ns(current->nsproxy->pid_ns);
  4279. event->id = atomic64_inc_return(&perf_event_id);
  4280. event->state = PERF_EVENT_STATE_INACTIVE;
  4281. if (!overflow_handler && parent_event)
  4282. overflow_handler = parent_event->overflow_handler;
  4283. event->overflow_handler = overflow_handler;
  4284. if (attr->disabled)
  4285. event->state = PERF_EVENT_STATE_OFF;
  4286. pmu = NULL;
  4287. hwc = &event->hw;
  4288. hwc->sample_period = attr->sample_period;
  4289. if (attr->freq && attr->sample_freq)
  4290. hwc->sample_period = 1;
  4291. hwc->last_period = hwc->sample_period;
  4292. local64_set(&hwc->period_left, hwc->sample_period);
  4293. /*
  4294. * we currently do not support PERF_FORMAT_GROUP on inherited events
  4295. */
  4296. if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
  4297. goto done;
  4298. pmu = perf_init_event(event);
  4299. done:
  4300. err = 0;
  4301. if (!pmu)
  4302. err = -EINVAL;
  4303. else if (IS_ERR(pmu))
  4304. err = PTR_ERR(pmu);
  4305. if (err) {
  4306. if (event->ns)
  4307. put_pid_ns(event->ns);
  4308. kfree(event);
  4309. return ERR_PTR(err);
  4310. }
  4311. event->pmu = pmu;
  4312. if (!event->parent) {
  4313. atomic_inc(&nr_events);
  4314. if (event->attr.mmap || event->attr.mmap_data)
  4315. atomic_inc(&nr_mmap_events);
  4316. if (event->attr.comm)
  4317. atomic_inc(&nr_comm_events);
  4318. if (event->attr.task)
  4319. atomic_inc(&nr_task_events);
  4320. if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
  4321. err = get_callchain_buffers();
  4322. if (err) {
  4323. free_event(event);
  4324. return ERR_PTR(err);
  4325. }
  4326. }
  4327. }
  4328. return event;
  4329. }
  4330. static int perf_copy_attr(struct perf_event_attr __user *uattr,
  4331. struct perf_event_attr *attr)
  4332. {
  4333. u32 size;
  4334. int ret;
  4335. if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0))
  4336. return -EFAULT;
  4337. /*
  4338. * zero the full structure, so that a short copy will be nice.
  4339. */
  4340. memset(attr, 0, sizeof(*attr));
  4341. ret = get_user(size, &uattr->size);
  4342. if (ret)
  4343. return ret;
  4344. if (size > PAGE_SIZE) /* silly large */
  4345. goto err_size;
  4346. if (!size) /* abi compat */
  4347. size = PERF_ATTR_SIZE_VER0;
  4348. if (size < PERF_ATTR_SIZE_VER0)
  4349. goto err_size;
  4350. /*
  4351. * If we're handed a bigger struct than we know of,
  4352. * ensure all the unknown bits are 0 - i.e. new
  4353. * user-space does not rely on any kernel feature
  4354. * extensions we dont know about yet.
  4355. */
  4356. if (size > sizeof(*attr)) {
  4357. unsigned char __user *addr;
  4358. unsigned char __user *end;
  4359. unsigned char val;
  4360. addr = (void __user *)uattr + sizeof(*attr);
  4361. end = (void __user *)uattr + size;
  4362. for (; addr < end; addr++) {
  4363. ret = get_user(val, addr);
  4364. if (ret)
  4365. return ret;
  4366. if (val)
  4367. goto err_size;
  4368. }
  4369. size = sizeof(*attr);
  4370. }
  4371. ret = copy_from_user(attr, uattr, size);
  4372. if (ret)
  4373. return -EFAULT;
  4374. /*
  4375. * If the type exists, the corresponding creation will verify
  4376. * the attr->config.
  4377. */
  4378. if (attr->type >= PERF_TYPE_MAX)
  4379. return -EINVAL;
  4380. if (attr->__reserved_1)
  4381. return -EINVAL;
  4382. if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
  4383. return -EINVAL;
  4384. if (attr->read_format & ~(PERF_FORMAT_MAX-1))
  4385. return -EINVAL;
  4386. out:
  4387. return ret;
  4388. err_size:
  4389. put_user(sizeof(*attr), &uattr->size);
  4390. ret = -E2BIG;
  4391. goto out;
  4392. }
  4393. static int
  4394. perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
  4395. {
  4396. struct perf_buffer *buffer = NULL, *old_buffer = NULL;
  4397. int ret = -EINVAL;
  4398. if (!output_event)
  4399. goto set;
  4400. /* don't allow circular references */
  4401. if (event == output_event)
  4402. goto out;
  4403. /*
  4404. * Don't allow cross-cpu buffers
  4405. */
  4406. if (output_event->cpu != event->cpu)
  4407. goto out;
  4408. /*
  4409. * If its not a per-cpu buffer, it must be the same task.
  4410. */
  4411. if (output_event->cpu == -1 && output_event->ctx != event->ctx)
  4412. goto out;
  4413. set:
  4414. mutex_lock(&event->mmap_mutex);
  4415. /* Can't redirect output if we've got an active mmap() */
  4416. if (atomic_read(&event->mmap_count))
  4417. goto unlock;
  4418. if (output_event) {
  4419. /* get the buffer we want to redirect to */
  4420. buffer = perf_buffer_get(output_event);
  4421. if (!buffer)
  4422. goto unlock;
  4423. }
  4424. old_buffer = event->buffer;
  4425. rcu_assign_pointer(event->buffer, buffer);
  4426. ret = 0;
  4427. unlock:
  4428. mutex_unlock(&event->mmap_mutex);
  4429. if (old_buffer)
  4430. perf_buffer_put(old_buffer);
  4431. out:
  4432. return ret;
  4433. }
  4434. /**
  4435. * sys_perf_event_open - open a performance event, associate it to a task/cpu
  4436. *
  4437. * @attr_uptr: event_id type attributes for monitoring/sampling
  4438. * @pid: target pid
  4439. * @cpu: target cpu
  4440. * @group_fd: group leader event fd
  4441. */
  4442. SYSCALL_DEFINE5(perf_event_open,
  4443. struct perf_event_attr __user *, attr_uptr,
  4444. pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
  4445. {
  4446. struct perf_event *event, *group_leader = NULL, *output_event = NULL;
  4447. struct perf_event_attr attr;
  4448. struct perf_event_context *ctx;
  4449. struct file *event_file = NULL;
  4450. struct file *group_file = NULL;
  4451. struct task_struct *task = NULL;
  4452. struct pmu *pmu;
  4453. int event_fd;
  4454. int fput_needed = 0;
  4455. int err;
  4456. /* for future expandability... */
  4457. if (flags & ~(PERF_FLAG_FD_NO_GROUP | PERF_FLAG_FD_OUTPUT))
  4458. return -EINVAL;
  4459. err = perf_copy_attr(attr_uptr, &attr);
  4460. if (err)
  4461. return err;
  4462. if (!attr.exclude_kernel) {
  4463. if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
  4464. return -EACCES;
  4465. }
  4466. if (attr.freq) {
  4467. if (attr.sample_freq > sysctl_perf_event_sample_rate)
  4468. return -EINVAL;
  4469. }
  4470. event_fd = get_unused_fd_flags(O_RDWR);
  4471. if (event_fd < 0)
  4472. return event_fd;
  4473. event = perf_event_alloc(&attr, cpu, group_leader, NULL, NULL);
  4474. if (IS_ERR(event)) {
  4475. err = PTR_ERR(event);
  4476. goto err_fd;
  4477. }
  4478. if (group_fd != -1) {
  4479. group_leader = perf_fget_light(group_fd, &fput_needed);
  4480. if (IS_ERR(group_leader)) {
  4481. err = PTR_ERR(group_leader);
  4482. goto err_alloc;
  4483. }
  4484. group_file = group_leader->filp;
  4485. if (flags & PERF_FLAG_FD_OUTPUT)
  4486. output_event = group_leader;
  4487. if (flags & PERF_FLAG_FD_NO_GROUP)
  4488. group_leader = NULL;
  4489. }
  4490. /*
  4491. * Special case software events and allow them to be part of
  4492. * any hardware group.
  4493. */
  4494. pmu = event->pmu;
  4495. if ((pmu->task_ctx_nr == perf_sw_context) && group_leader)
  4496. pmu = group_leader->pmu;
  4497. if (pid != -1)
  4498. task = find_lively_task_by_vpid(pid);
  4499. /*
  4500. * Get the target context (task or percpu):
  4501. */
  4502. ctx = find_get_context(pmu, task, cpu);
  4503. if (IS_ERR(ctx)) {
  4504. err = PTR_ERR(ctx);
  4505. goto err_group_fd;
  4506. }
  4507. /*
  4508. * Look up the group leader (we will attach this event to it):
  4509. */
  4510. if (group_leader) {
  4511. err = -EINVAL;
  4512. /*
  4513. * Do not allow a recursive hierarchy (this new sibling
  4514. * becoming part of another group-sibling):
  4515. */
  4516. if (group_leader->group_leader != group_leader)
  4517. goto err_context;
  4518. /*
  4519. * Do not allow to attach to a group in a different
  4520. * task or CPU context:
  4521. */
  4522. if (group_leader->ctx != ctx)
  4523. goto err_context;
  4524. /*
  4525. * Only a group leader can be exclusive or pinned
  4526. */
  4527. if (attr.exclusive || attr.pinned)
  4528. goto err_context;
  4529. }
  4530. if (output_event) {
  4531. err = perf_event_set_output(event, output_event);
  4532. if (err)
  4533. goto err_context;
  4534. }
  4535. event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, O_RDWR);
  4536. if (IS_ERR(event_file)) {
  4537. err = PTR_ERR(event_file);
  4538. goto err_context;
  4539. }
  4540. event->filp = event_file;
  4541. WARN_ON_ONCE(ctx->parent_ctx);
  4542. mutex_lock(&ctx->mutex);
  4543. perf_install_in_context(ctx, event, cpu);
  4544. ++ctx->generation;
  4545. mutex_unlock(&ctx->mutex);
  4546. event->owner = current;
  4547. get_task_struct(current);
  4548. mutex_lock(&current->perf_event_mutex);
  4549. list_add_tail(&event->owner_entry, &current->perf_event_list);
  4550. mutex_unlock(&current->perf_event_mutex);
  4551. /*
  4552. * Drop the reference on the group_event after placing the
  4553. * new event on the sibling_list. This ensures destruction
  4554. * of the group leader will find the pointer to itself in
  4555. * perf_group_detach().
  4556. */
  4557. fput_light(group_file, fput_needed);
  4558. fd_install(event_fd, event_file);
  4559. return event_fd;
  4560. err_context:
  4561. put_ctx(ctx);
  4562. err_group_fd:
  4563. fput_light(group_file, fput_needed);
  4564. err_alloc:
  4565. free_event(event);
  4566. err_fd:
  4567. put_unused_fd(event_fd);
  4568. return err;
  4569. }
  4570. /**
  4571. * perf_event_create_kernel_counter
  4572. *
  4573. * @attr: attributes of the counter to create
  4574. * @cpu: cpu in which the counter is bound
  4575. * @task: task to profile (NULL for percpu)
  4576. */
  4577. struct perf_event *
  4578. perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
  4579. struct task_struct *task,
  4580. perf_overflow_handler_t overflow_handler)
  4581. {
  4582. struct perf_event_context *ctx;
  4583. struct perf_event *event;
  4584. int err;
  4585. /*
  4586. * Get the target context (task or percpu):
  4587. */
  4588. event = perf_event_alloc(attr, cpu, NULL, NULL, overflow_handler);
  4589. if (IS_ERR(event)) {
  4590. err = PTR_ERR(event);
  4591. goto err;
  4592. }
  4593. ctx = find_get_context(event->pmu, task, cpu);
  4594. if (IS_ERR(ctx)) {
  4595. err = PTR_ERR(ctx);
  4596. goto err_free;
  4597. }
  4598. event->filp = NULL;
  4599. WARN_ON_ONCE(ctx->parent_ctx);
  4600. mutex_lock(&ctx->mutex);
  4601. perf_install_in_context(ctx, event, cpu);
  4602. ++ctx->generation;
  4603. mutex_unlock(&ctx->mutex);
  4604. event->owner = current;
  4605. get_task_struct(current);
  4606. mutex_lock(&current->perf_event_mutex);
  4607. list_add_tail(&event->owner_entry, &current->perf_event_list);
  4608. mutex_unlock(&current->perf_event_mutex);
  4609. return event;
  4610. err_free:
  4611. free_event(event);
  4612. err:
  4613. return ERR_PTR(err);
  4614. }
  4615. EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
  4616. static void sync_child_event(struct perf_event *child_event,
  4617. struct task_struct *child)
  4618. {
  4619. struct perf_event *parent_event = child_event->parent;
  4620. u64 child_val;
  4621. if (child_event->attr.inherit_stat)
  4622. perf_event_read_event(child_event, child);
  4623. child_val = perf_event_count(child_event);
  4624. /*
  4625. * Add back the child's count to the parent's count:
  4626. */
  4627. atomic64_add(child_val, &parent_event->child_count);
  4628. atomic64_add(child_event->total_time_enabled,
  4629. &parent_event->child_total_time_enabled);
  4630. atomic64_add(child_event->total_time_running,
  4631. &parent_event->child_total_time_running);
  4632. /*
  4633. * Remove this event from the parent's list
  4634. */
  4635. WARN_ON_ONCE(parent_event->ctx->parent_ctx);
  4636. mutex_lock(&parent_event->child_mutex);
  4637. list_del_init(&child_event->child_list);
  4638. mutex_unlock(&parent_event->child_mutex);
  4639. /*
  4640. * Release the parent event, if this was the last
  4641. * reference to it.
  4642. */
  4643. fput(parent_event->filp);
  4644. }
  4645. static void
  4646. __perf_event_exit_task(struct perf_event *child_event,
  4647. struct perf_event_context *child_ctx,
  4648. struct task_struct *child)
  4649. {
  4650. struct perf_event *parent_event;
  4651. perf_event_remove_from_context(child_event);
  4652. parent_event = child_event->parent;
  4653. /*
  4654. * It can happen that parent exits first, and has events
  4655. * that are still around due to the child reference. These
  4656. * events need to be zapped - but otherwise linger.
  4657. */
  4658. if (parent_event) {
  4659. sync_child_event(child_event, child);
  4660. free_event(child_event);
  4661. }
  4662. }
  4663. static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
  4664. {
  4665. struct perf_event *child_event, *tmp;
  4666. struct perf_event_context *child_ctx;
  4667. unsigned long flags;
  4668. if (likely(!child->perf_event_ctxp[ctxn])) {
  4669. perf_event_task(child, NULL, 0);
  4670. return;
  4671. }
  4672. local_irq_save(flags);
  4673. /*
  4674. * We can't reschedule here because interrupts are disabled,
  4675. * and either child is current or it is a task that can't be
  4676. * scheduled, so we are now safe from rescheduling changing
  4677. * our context.
  4678. */
  4679. child_ctx = child->perf_event_ctxp[ctxn];
  4680. __perf_event_task_sched_out(child_ctx);
  4681. /*
  4682. * Take the context lock here so that if find_get_context is
  4683. * reading child->perf_event_ctxp, we wait until it has
  4684. * incremented the context's refcount before we do put_ctx below.
  4685. */
  4686. raw_spin_lock(&child_ctx->lock);
  4687. child->perf_event_ctxp[ctxn] = NULL;
  4688. /*
  4689. * If this context is a clone; unclone it so it can't get
  4690. * swapped to another process while we're removing all
  4691. * the events from it.
  4692. */
  4693. unclone_ctx(child_ctx);
  4694. update_context_time(child_ctx);
  4695. raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
  4696. /*
  4697. * Report the task dead after unscheduling the events so that we
  4698. * won't get any samples after PERF_RECORD_EXIT. We can however still
  4699. * get a few PERF_RECORD_READ events.
  4700. */
  4701. perf_event_task(child, child_ctx, 0);
  4702. /*
  4703. * We can recurse on the same lock type through:
  4704. *
  4705. * __perf_event_exit_task()
  4706. * sync_child_event()
  4707. * fput(parent_event->filp)
  4708. * perf_release()
  4709. * mutex_lock(&ctx->mutex)
  4710. *
  4711. * But since its the parent context it won't be the same instance.
  4712. */
  4713. mutex_lock(&child_ctx->mutex);
  4714. again:
  4715. list_for_each_entry_safe(child_event, tmp, &child_ctx->pinned_groups,
  4716. group_entry)
  4717. __perf_event_exit_task(child_event, child_ctx, child);
  4718. list_for_each_entry_safe(child_event, tmp, &child_ctx->flexible_groups,
  4719. group_entry)
  4720. __perf_event_exit_task(child_event, child_ctx, child);
  4721. /*
  4722. * If the last event was a group event, it will have appended all
  4723. * its siblings to the list, but we obtained 'tmp' before that which
  4724. * will still point to the list head terminating the iteration.
  4725. */
  4726. if (!list_empty(&child_ctx->pinned_groups) ||
  4727. !list_empty(&child_ctx->flexible_groups))
  4728. goto again;
  4729. mutex_unlock(&child_ctx->mutex);
  4730. put_ctx(child_ctx);
  4731. }
  4732. /*
  4733. * When a child task exits, feed back event values to parent events.
  4734. */
  4735. void perf_event_exit_task(struct task_struct *child)
  4736. {
  4737. int ctxn;
  4738. for_each_task_context_nr(ctxn)
  4739. perf_event_exit_task_context(child, ctxn);
  4740. }
  4741. static void perf_free_event(struct perf_event *event,
  4742. struct perf_event_context *ctx)
  4743. {
  4744. struct perf_event *parent = event->parent;
  4745. if (WARN_ON_ONCE(!parent))
  4746. return;
  4747. mutex_lock(&parent->child_mutex);
  4748. list_del_init(&event->child_list);
  4749. mutex_unlock(&parent->child_mutex);
  4750. fput(parent->filp);
  4751. perf_group_detach(event);
  4752. list_del_event(event, ctx);
  4753. free_event(event);
  4754. }
  4755. /*
  4756. * free an unexposed, unused context as created by inheritance by
  4757. * perf_event_init_task below, used by fork() in case of fail.
  4758. */
  4759. void perf_event_free_task(struct task_struct *task)
  4760. {
  4761. struct perf_event_context *ctx;
  4762. struct perf_event *event, *tmp;
  4763. int ctxn;
  4764. for_each_task_context_nr(ctxn) {
  4765. ctx = task->perf_event_ctxp[ctxn];
  4766. if (!ctx)
  4767. continue;
  4768. mutex_lock(&ctx->mutex);
  4769. again:
  4770. list_for_each_entry_safe(event, tmp, &ctx->pinned_groups,
  4771. group_entry)
  4772. perf_free_event(event, ctx);
  4773. list_for_each_entry_safe(event, tmp, &ctx->flexible_groups,
  4774. group_entry)
  4775. perf_free_event(event, ctx);
  4776. if (!list_empty(&ctx->pinned_groups) ||
  4777. !list_empty(&ctx->flexible_groups))
  4778. goto again;
  4779. mutex_unlock(&ctx->mutex);
  4780. put_ctx(ctx);
  4781. }
  4782. }
  4783. void perf_event_delayed_put(struct task_struct *task)
  4784. {
  4785. int ctxn;
  4786. for_each_task_context_nr(ctxn)
  4787. WARN_ON_ONCE(task->perf_event_ctxp[ctxn]);
  4788. }
  4789. /*
  4790. * inherit a event from parent task to child task:
  4791. */
  4792. static struct perf_event *
  4793. inherit_event(struct perf_event *parent_event,
  4794. struct task_struct *parent,
  4795. struct perf_event_context *parent_ctx,
  4796. struct task_struct *child,
  4797. struct perf_event *group_leader,
  4798. struct perf_event_context *child_ctx)
  4799. {
  4800. struct perf_event *child_event;
  4801. unsigned long flags;
  4802. /*
  4803. * Instead of creating recursive hierarchies of events,
  4804. * we link inherited events back to the original parent,
  4805. * which has a filp for sure, which we use as the reference
  4806. * count:
  4807. */
  4808. if (parent_event->parent)
  4809. parent_event = parent_event->parent;
  4810. child_event = perf_event_alloc(&parent_event->attr,
  4811. parent_event->cpu,
  4812. group_leader, parent_event,
  4813. NULL);
  4814. if (IS_ERR(child_event))
  4815. return child_event;
  4816. get_ctx(child_ctx);
  4817. /*
  4818. * Make the child state follow the state of the parent event,
  4819. * not its attr.disabled bit. We hold the parent's mutex,
  4820. * so we won't race with perf_event_{en, dis}able_family.
  4821. */
  4822. if (parent_event->state >= PERF_EVENT_STATE_INACTIVE)
  4823. child_event->state = PERF_EVENT_STATE_INACTIVE;
  4824. else
  4825. child_event->state = PERF_EVENT_STATE_OFF;
  4826. if (parent_event->attr.freq) {
  4827. u64 sample_period = parent_event->hw.sample_period;
  4828. struct hw_perf_event *hwc = &child_event->hw;
  4829. hwc->sample_period = sample_period;
  4830. hwc->last_period = sample_period;
  4831. local64_set(&hwc->period_left, sample_period);
  4832. }
  4833. child_event->ctx = child_ctx;
  4834. child_event->overflow_handler = parent_event->overflow_handler;
  4835. /*
  4836. * Link it up in the child's context:
  4837. */
  4838. raw_spin_lock_irqsave(&child_ctx->lock, flags);
  4839. add_event_to_ctx(child_event, child_ctx);
  4840. raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
  4841. /*
  4842. * Get a reference to the parent filp - we will fput it
  4843. * when the child event exits. This is safe to do because
  4844. * we are in the parent and we know that the filp still
  4845. * exists and has a nonzero count:
  4846. */
  4847. atomic_long_inc(&parent_event->filp->f_count);
  4848. /*
  4849. * Link this into the parent event's child list
  4850. */
  4851. WARN_ON_ONCE(parent_event->ctx->parent_ctx);
  4852. mutex_lock(&parent_event->child_mutex);
  4853. list_add_tail(&child_event->child_list, &parent_event->child_list);
  4854. mutex_unlock(&parent_event->child_mutex);
  4855. return child_event;
  4856. }
  4857. static int inherit_group(struct perf_event *parent_event,
  4858. struct task_struct *parent,
  4859. struct perf_event_context *parent_ctx,
  4860. struct task_struct *child,
  4861. struct perf_event_context *child_ctx)
  4862. {
  4863. struct perf_event *leader;
  4864. struct perf_event *sub;
  4865. struct perf_event *child_ctr;
  4866. leader = inherit_event(parent_event, parent, parent_ctx,
  4867. child, NULL, child_ctx);
  4868. if (IS_ERR(leader))
  4869. return PTR_ERR(leader);
  4870. list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
  4871. child_ctr = inherit_event(sub, parent, parent_ctx,
  4872. child, leader, child_ctx);
  4873. if (IS_ERR(child_ctr))
  4874. return PTR_ERR(child_ctr);
  4875. }
  4876. return 0;
  4877. }
  4878. static int
  4879. inherit_task_group(struct perf_event *event, struct task_struct *parent,
  4880. struct perf_event_context *parent_ctx,
  4881. struct task_struct *child, int ctxn,
  4882. int *inherited_all)
  4883. {
  4884. int ret;
  4885. struct perf_event_context *child_ctx;
  4886. if (!event->attr.inherit) {
  4887. *inherited_all = 0;
  4888. return 0;
  4889. }
  4890. child_ctx = child->perf_event_ctxp[ctxn];
  4891. if (!child_ctx) {
  4892. /*
  4893. * This is executed from the parent task context, so
  4894. * inherit events that have been marked for cloning.
  4895. * First allocate and initialize a context for the
  4896. * child.
  4897. */
  4898. child_ctx = alloc_perf_context(event->pmu, child);
  4899. if (!child_ctx)
  4900. return -ENOMEM;
  4901. child->perf_event_ctxp[ctxn] = child_ctx;
  4902. }
  4903. ret = inherit_group(event, parent, parent_ctx,
  4904. child, child_ctx);
  4905. if (ret)
  4906. *inherited_all = 0;
  4907. return ret;
  4908. }
  4909. /*
  4910. * Initialize the perf_event context in task_struct
  4911. */
  4912. int perf_event_init_context(struct task_struct *child, int ctxn)
  4913. {
  4914. struct perf_event_context *child_ctx, *parent_ctx;
  4915. struct perf_event_context *cloned_ctx;
  4916. struct perf_event *event;
  4917. struct task_struct *parent = current;
  4918. int inherited_all = 1;
  4919. int ret = 0;
  4920. child->perf_event_ctxp[ctxn] = NULL;
  4921. mutex_init(&child->perf_event_mutex);
  4922. INIT_LIST_HEAD(&child->perf_event_list);
  4923. if (likely(!parent->perf_event_ctxp[ctxn]))
  4924. return 0;
  4925. /*
  4926. * If the parent's context is a clone, pin it so it won't get
  4927. * swapped under us.
  4928. */
  4929. parent_ctx = perf_pin_task_context(parent, ctxn);
  4930. /*
  4931. * No need to check if parent_ctx != NULL here; since we saw
  4932. * it non-NULL earlier, the only reason for it to become NULL
  4933. * is if we exit, and since we're currently in the middle of
  4934. * a fork we can't be exiting at the same time.
  4935. */
  4936. /*
  4937. * Lock the parent list. No need to lock the child - not PID
  4938. * hashed yet and not running, so nobody can access it.
  4939. */
  4940. mutex_lock(&parent_ctx->mutex);
  4941. /*
  4942. * We dont have to disable NMIs - we are only looking at
  4943. * the list, not manipulating it:
  4944. */
  4945. list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) {
  4946. ret = inherit_task_group(event, parent, parent_ctx,
  4947. child, ctxn, &inherited_all);
  4948. if (ret)
  4949. break;
  4950. }
  4951. list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) {
  4952. ret = inherit_task_group(event, parent, parent_ctx,
  4953. child, ctxn, &inherited_all);
  4954. if (ret)
  4955. break;
  4956. }
  4957. child_ctx = child->perf_event_ctxp[ctxn];
  4958. if (child_ctx && inherited_all) {
  4959. /*
  4960. * Mark the child context as a clone of the parent
  4961. * context, or of whatever the parent is a clone of.
  4962. * Note that if the parent is a clone, it could get
  4963. * uncloned at any point, but that doesn't matter
  4964. * because the list of events and the generation
  4965. * count can't have changed since we took the mutex.
  4966. */
  4967. cloned_ctx = rcu_dereference(parent_ctx->parent_ctx);
  4968. if (cloned_ctx) {
  4969. child_ctx->parent_ctx = cloned_ctx;
  4970. child_ctx->parent_gen = parent_ctx->parent_gen;
  4971. } else {
  4972. child_ctx->parent_ctx = parent_ctx;
  4973. child_ctx->parent_gen = parent_ctx->generation;
  4974. }
  4975. get_ctx(child_ctx->parent_ctx);
  4976. }
  4977. mutex_unlock(&parent_ctx->mutex);
  4978. perf_unpin_context(parent_ctx);
  4979. return ret;
  4980. }
  4981. /*
  4982. * Initialize the perf_event context in task_struct
  4983. */
  4984. int perf_event_init_task(struct task_struct *child)
  4985. {
  4986. int ctxn, ret;
  4987. for_each_task_context_nr(ctxn) {
  4988. ret = perf_event_init_context(child, ctxn);
  4989. if (ret)
  4990. return ret;
  4991. }
  4992. return 0;
  4993. }
  4994. static void __init perf_event_init_all_cpus(void)
  4995. {
  4996. struct swevent_htable *swhash;
  4997. int cpu;
  4998. for_each_possible_cpu(cpu) {
  4999. swhash = &per_cpu(swevent_htable, cpu);
  5000. mutex_init(&swhash->hlist_mutex);
  5001. }
  5002. }
  5003. static void __cpuinit perf_event_init_cpu(int cpu)
  5004. {
  5005. struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
  5006. mutex_lock(&swhash->hlist_mutex);
  5007. if (swhash->hlist_refcount > 0) {
  5008. struct swevent_hlist *hlist;
  5009. hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu));
  5010. WARN_ON(!hlist);
  5011. rcu_assign_pointer(swhash->swevent_hlist, hlist);
  5012. }
  5013. mutex_unlock(&swhash->hlist_mutex);
  5014. }
  5015. #ifdef CONFIG_HOTPLUG_CPU
  5016. static void __perf_event_exit_context(void *__info)
  5017. {
  5018. struct perf_event_context *ctx = __info;
  5019. struct perf_event *event, *tmp;
  5020. perf_pmu_rotate_stop(ctx->pmu);
  5021. list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry)
  5022. __perf_event_remove_from_context(event);
  5023. list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, group_entry)
  5024. __perf_event_remove_from_context(event);
  5025. }
  5026. static void perf_event_exit_cpu_context(int cpu)
  5027. {
  5028. struct perf_event_context *ctx;
  5029. struct pmu *pmu;
  5030. int idx;
  5031. idx = srcu_read_lock(&pmus_srcu);
  5032. list_for_each_entry_rcu(pmu, &pmus, entry) {
  5033. ctx = &this_cpu_ptr(pmu->pmu_cpu_context)->ctx;
  5034. mutex_lock(&ctx->mutex);
  5035. smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1);
  5036. mutex_unlock(&ctx->mutex);
  5037. }
  5038. srcu_read_unlock(&pmus_srcu, idx);
  5039. }
  5040. static void perf_event_exit_cpu(int cpu)
  5041. {
  5042. struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
  5043. mutex_lock(&swhash->hlist_mutex);
  5044. swevent_hlist_release(swhash);
  5045. mutex_unlock(&swhash->hlist_mutex);
  5046. perf_event_exit_cpu_context(cpu);
  5047. }
  5048. #else
  5049. static inline void perf_event_exit_cpu(int cpu) { }
  5050. #endif
  5051. static int __cpuinit
  5052. perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
  5053. {
  5054. unsigned int cpu = (long)hcpu;
  5055. switch (action & ~CPU_TASKS_FROZEN) {
  5056. case CPU_UP_PREPARE:
  5057. case CPU_DOWN_FAILED:
  5058. perf_event_init_cpu(cpu);
  5059. break;
  5060. case CPU_UP_CANCELED:
  5061. case CPU_DOWN_PREPARE:
  5062. perf_event_exit_cpu(cpu);
  5063. break;
  5064. default:
  5065. break;
  5066. }
  5067. return NOTIFY_OK;
  5068. }
  5069. void __init perf_event_init(void)
  5070. {
  5071. perf_event_init_all_cpus();
  5072. init_srcu_struct(&pmus_srcu);
  5073. perf_pmu_register(&perf_swevent);
  5074. perf_pmu_register(&perf_cpu_clock);
  5075. perf_pmu_register(&perf_task_clock);
  5076. perf_tp_register();
  5077. perf_cpu_notifier(perf_cpu_notify);
  5078. }