core.c 161 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973697469756976697769786979698069816982698369846985698669876988698969906991699269936994699569966997699869997000700170027003700470057006700770087009701070117012
  1. /*
  2. * Performance events core code:
  3. *
  4. * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
  5. * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
  6. * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
  7. * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
  8. *
  9. * For licensing details see kernel-base/COPYING
  10. */
  11. #include <linux/fs.h>
  12. #include <linux/mm.h>
  13. #include <linux/cpu.h>
  14. #include <linux/smp.h>
  15. #include <linux/idr.h>
  16. #include <linux/file.h>
  17. #include <linux/poll.h>
  18. #include <linux/slab.h>
  19. #include <linux/hash.h>
  20. #include <linux/sysfs.h>
  21. #include <linux/dcache.h>
  22. #include <linux/percpu.h>
  23. #include <linux/ptrace.h>
  24. #include <linux/reboot.h>
  25. #include <linux/vmstat.h>
  26. #include <linux/device.h>
  27. #include <linux/export.h>
  28. #include <linux/vmalloc.h>
  29. #include <linux/hardirq.h>
  30. #include <linux/rculist.h>
  31. #include <linux/uaccess.h>
  32. #include <linux/syscalls.h>
  33. #include <linux/anon_inodes.h>
  34. #include <linux/kernel_stat.h>
  35. #include <linux/perf_event.h>
  36. #include <linux/ftrace_event.h>
  37. #include <linux/hw_breakpoint.h>
  38. #include "internal.h"
  39. #include <asm/irq_regs.h>
  40. struct remote_function_call {
  41. struct task_struct *p;
  42. int (*func)(void *info);
  43. void *info;
  44. int ret;
  45. };
  46. static void remote_function(void *data)
  47. {
  48. struct remote_function_call *tfc = data;
  49. struct task_struct *p = tfc->p;
  50. if (p) {
  51. tfc->ret = -EAGAIN;
  52. if (task_cpu(p) != smp_processor_id() || !task_curr(p))
  53. return;
  54. }
  55. tfc->ret = tfc->func(tfc->info);
  56. }
  57. /**
  58. * task_function_call - call a function on the cpu on which a task runs
  59. * @p: the task to evaluate
  60. * @func: the function to be called
  61. * @info: the function call argument
  62. *
  63. * Calls the function @func when the task is currently running. This might
  64. * be on the current CPU, which just calls the function directly
  65. *
  66. * returns: @func return value, or
  67. * -ESRCH - when the process isn't running
  68. * -EAGAIN - when the process moved away
  69. */
  70. static int
  71. task_function_call(struct task_struct *p, int (*func) (void *info), void *info)
  72. {
  73. struct remote_function_call data = {
  74. .p = p,
  75. .func = func,
  76. .info = info,
  77. .ret = -ESRCH, /* No such (running) process */
  78. };
  79. if (task_curr(p))
  80. smp_call_function_single(task_cpu(p), remote_function, &data, 1);
  81. return data.ret;
  82. }
  83. /**
  84. * cpu_function_call - call a function on the cpu
  85. * @func: the function to be called
  86. * @info: the function call argument
  87. *
  88. * Calls the function @func on the remote cpu.
  89. *
  90. * returns: @func return value or -ENXIO when the cpu is offline
  91. */
  92. static int cpu_function_call(int cpu, int (*func) (void *info), void *info)
  93. {
  94. struct remote_function_call data = {
  95. .p = NULL,
  96. .func = func,
  97. .info = info,
  98. .ret = -ENXIO, /* No such CPU */
  99. };
  100. smp_call_function_single(cpu, remote_function, &data, 1);
  101. return data.ret;
  102. }
  103. #define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\
  104. PERF_FLAG_FD_OUTPUT |\
  105. PERF_FLAG_PID_CGROUP)
  106. enum event_type_t {
  107. EVENT_FLEXIBLE = 0x1,
  108. EVENT_PINNED = 0x2,
  109. EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
  110. };
  111. /*
  112. * perf_sched_events : >0 events exist
  113. * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu
  114. */
  115. struct jump_label_key_deferred perf_sched_events __read_mostly;
  116. static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);
  117. static atomic_t nr_mmap_events __read_mostly;
  118. static atomic_t nr_comm_events __read_mostly;
  119. static atomic_t nr_task_events __read_mostly;
  120. static LIST_HEAD(pmus);
  121. static DEFINE_MUTEX(pmus_lock);
  122. static struct srcu_struct pmus_srcu;
  123. /*
  124. * perf event paranoia level:
  125. * -1 - not paranoid at all
  126. * 0 - disallow raw tracepoint access for unpriv
  127. * 1 - disallow cpu events for unpriv
  128. * 2 - disallow kernel profiling for unpriv
  129. */
  130. int sysctl_perf_event_paranoid __read_mostly = 1;
  131. /* Minimum for 512 kiB + 1 user control page */
  132. int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
  133. /*
  134. * max perf event sample rate
  135. */
  136. #define DEFAULT_MAX_SAMPLE_RATE 100000
  137. int sysctl_perf_event_sample_rate __read_mostly = DEFAULT_MAX_SAMPLE_RATE;
  138. static int max_samples_per_tick __read_mostly =
  139. DIV_ROUND_UP(DEFAULT_MAX_SAMPLE_RATE, HZ);
  140. int perf_proc_update_handler(struct ctl_table *table, int write,
  141. void __user *buffer, size_t *lenp,
  142. loff_t *ppos)
  143. {
  144. int ret = proc_dointvec(table, write, buffer, lenp, ppos);
  145. if (ret || !write)
  146. return ret;
  147. max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ);
  148. return 0;
  149. }
  150. static atomic64_t perf_event_id;
  151. static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
  152. enum event_type_t event_type);
  153. static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
  154. enum event_type_t event_type,
  155. struct task_struct *task);
  156. static void update_context_time(struct perf_event_context *ctx);
  157. static u64 perf_event_time(struct perf_event *event);
  158. static void ring_buffer_attach(struct perf_event *event,
  159. struct ring_buffer *rb);
  160. void __weak perf_event_print_debug(void) { }
  161. extern __weak const char *perf_pmu_name(void)
  162. {
  163. return "pmu";
  164. }
  165. static inline u64 perf_clock(void)
  166. {
  167. return local_clock();
  168. }
  169. static inline struct perf_cpu_context *
  170. __get_cpu_context(struct perf_event_context *ctx)
  171. {
  172. return this_cpu_ptr(ctx->pmu->pmu_cpu_context);
  173. }
  174. static void perf_ctx_lock(struct perf_cpu_context *cpuctx,
  175. struct perf_event_context *ctx)
  176. {
  177. raw_spin_lock(&cpuctx->ctx.lock);
  178. if (ctx)
  179. raw_spin_lock(&ctx->lock);
  180. }
  181. static void perf_ctx_unlock(struct perf_cpu_context *cpuctx,
  182. struct perf_event_context *ctx)
  183. {
  184. if (ctx)
  185. raw_spin_unlock(&ctx->lock);
  186. raw_spin_unlock(&cpuctx->ctx.lock);
  187. }
  188. #ifdef CONFIG_CGROUP_PERF
  189. /*
  190. * Must ensure cgroup is pinned (css_get) before calling
  191. * this function. In other words, we cannot call this function
  192. * if there is no cgroup event for the current CPU context.
  193. */
  194. static inline struct perf_cgroup *
  195. perf_cgroup_from_task(struct task_struct *task)
  196. {
  197. return container_of(task_subsys_state(task, perf_subsys_id),
  198. struct perf_cgroup, css);
  199. }
  200. static inline bool
  201. perf_cgroup_match(struct perf_event *event)
  202. {
  203. struct perf_event_context *ctx = event->ctx;
  204. struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
  205. return !event->cgrp || event->cgrp == cpuctx->cgrp;
  206. }
  207. static inline void perf_get_cgroup(struct perf_event *event)
  208. {
  209. css_get(&event->cgrp->css);
  210. }
  211. static inline void perf_put_cgroup(struct perf_event *event)
  212. {
  213. css_put(&event->cgrp->css);
  214. }
  215. static inline void perf_detach_cgroup(struct perf_event *event)
  216. {
  217. perf_put_cgroup(event);
  218. event->cgrp = NULL;
  219. }
  220. static inline int is_cgroup_event(struct perf_event *event)
  221. {
  222. return event->cgrp != NULL;
  223. }
  224. static inline u64 perf_cgroup_event_time(struct perf_event *event)
  225. {
  226. struct perf_cgroup_info *t;
  227. t = per_cpu_ptr(event->cgrp->info, event->cpu);
  228. return t->time;
  229. }
  230. static inline void __update_cgrp_time(struct perf_cgroup *cgrp)
  231. {
  232. struct perf_cgroup_info *info;
  233. u64 now;
  234. now = perf_clock();
  235. info = this_cpu_ptr(cgrp->info);
  236. info->time += now - info->timestamp;
  237. info->timestamp = now;
  238. }
  239. static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
  240. {
  241. struct perf_cgroup *cgrp_out = cpuctx->cgrp;
  242. if (cgrp_out)
  243. __update_cgrp_time(cgrp_out);
  244. }
  245. static inline void update_cgrp_time_from_event(struct perf_event *event)
  246. {
  247. struct perf_cgroup *cgrp;
  248. /*
  249. * ensure we access cgroup data only when needed and
  250. * when we know the cgroup is pinned (css_get)
  251. */
  252. if (!is_cgroup_event(event))
  253. return;
  254. cgrp = perf_cgroup_from_task(current);
  255. /*
  256. * Do not update time when cgroup is not active
  257. */
  258. if (cgrp == event->cgrp)
  259. __update_cgrp_time(event->cgrp);
  260. }
  261. static inline void
  262. perf_cgroup_set_timestamp(struct task_struct *task,
  263. struct perf_event_context *ctx)
  264. {
  265. struct perf_cgroup *cgrp;
  266. struct perf_cgroup_info *info;
  267. /*
  268. * ctx->lock held by caller
  269. * ensure we do not access cgroup data
  270. * unless we have the cgroup pinned (css_get)
  271. */
  272. if (!task || !ctx->nr_cgroups)
  273. return;
  274. cgrp = perf_cgroup_from_task(task);
  275. info = this_cpu_ptr(cgrp->info);
  276. info->timestamp = ctx->timestamp;
  277. }
  278. #define PERF_CGROUP_SWOUT 0x1 /* cgroup switch out every event */
  279. #define PERF_CGROUP_SWIN 0x2 /* cgroup switch in events based on task */
  280. /*
  281. * reschedule events based on the cgroup constraint of task.
  282. *
  283. * mode SWOUT : schedule out everything
  284. * mode SWIN : schedule in based on cgroup for next
  285. */
  286. void perf_cgroup_switch(struct task_struct *task, int mode)
  287. {
  288. struct perf_cpu_context *cpuctx;
  289. struct pmu *pmu;
  290. unsigned long flags;
  291. /*
  292. * disable interrupts to avoid geting nr_cgroup
  293. * changes via __perf_event_disable(). Also
  294. * avoids preemption.
  295. */
  296. local_irq_save(flags);
  297. /*
  298. * we reschedule only in the presence of cgroup
  299. * constrained events.
  300. */
  301. rcu_read_lock();
  302. list_for_each_entry_rcu(pmu, &pmus, entry) {
  303. cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
  304. /*
  305. * perf_cgroup_events says at least one
  306. * context on this CPU has cgroup events.
  307. *
  308. * ctx->nr_cgroups reports the number of cgroup
  309. * events for a context.
  310. */
  311. if (cpuctx->ctx.nr_cgroups > 0) {
  312. perf_ctx_lock(cpuctx, cpuctx->task_ctx);
  313. perf_pmu_disable(cpuctx->ctx.pmu);
  314. if (mode & PERF_CGROUP_SWOUT) {
  315. cpu_ctx_sched_out(cpuctx, EVENT_ALL);
  316. /*
  317. * must not be done before ctxswout due
  318. * to event_filter_match() in event_sched_out()
  319. */
  320. cpuctx->cgrp = NULL;
  321. }
  322. if (mode & PERF_CGROUP_SWIN) {
  323. WARN_ON_ONCE(cpuctx->cgrp);
  324. /* set cgrp before ctxsw in to
  325. * allow event_filter_match() to not
  326. * have to pass task around
  327. */
  328. cpuctx->cgrp = perf_cgroup_from_task(task);
  329. cpu_ctx_sched_in(cpuctx, EVENT_ALL, task);
  330. }
  331. perf_pmu_enable(cpuctx->ctx.pmu);
  332. perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
  333. }
  334. }
  335. rcu_read_unlock();
  336. local_irq_restore(flags);
  337. }
  338. static inline void perf_cgroup_sched_out(struct task_struct *task,
  339. struct task_struct *next)
  340. {
  341. struct perf_cgroup *cgrp1;
  342. struct perf_cgroup *cgrp2 = NULL;
  343. /*
  344. * we come here when we know perf_cgroup_events > 0
  345. */
  346. cgrp1 = perf_cgroup_from_task(task);
  347. /*
  348. * next is NULL when called from perf_event_enable_on_exec()
  349. * that will systematically cause a cgroup_switch()
  350. */
  351. if (next)
  352. cgrp2 = perf_cgroup_from_task(next);
  353. /*
  354. * only schedule out current cgroup events if we know
  355. * that we are switching to a different cgroup. Otherwise,
  356. * do no touch the cgroup events.
  357. */
  358. if (cgrp1 != cgrp2)
  359. perf_cgroup_switch(task, PERF_CGROUP_SWOUT);
  360. }
  361. static inline void perf_cgroup_sched_in(struct task_struct *prev,
  362. struct task_struct *task)
  363. {
  364. struct perf_cgroup *cgrp1;
  365. struct perf_cgroup *cgrp2 = NULL;
  366. /*
  367. * we come here when we know perf_cgroup_events > 0
  368. */
  369. cgrp1 = perf_cgroup_from_task(task);
  370. /* prev can never be NULL */
  371. cgrp2 = perf_cgroup_from_task(prev);
  372. /*
  373. * only need to schedule in cgroup events if we are changing
  374. * cgroup during ctxsw. Cgroup events were not scheduled
  375. * out of ctxsw out if that was not the case.
  376. */
  377. if (cgrp1 != cgrp2)
  378. perf_cgroup_switch(task, PERF_CGROUP_SWIN);
  379. }
  380. static inline int perf_cgroup_connect(int fd, struct perf_event *event,
  381. struct perf_event_attr *attr,
  382. struct perf_event *group_leader)
  383. {
  384. struct perf_cgroup *cgrp;
  385. struct cgroup_subsys_state *css;
  386. struct file *file;
  387. int ret = 0, fput_needed;
  388. file = fget_light(fd, &fput_needed);
  389. if (!file)
  390. return -EBADF;
  391. css = cgroup_css_from_dir(file, perf_subsys_id);
  392. if (IS_ERR(css)) {
  393. ret = PTR_ERR(css);
  394. goto out;
  395. }
  396. cgrp = container_of(css, struct perf_cgroup, css);
  397. event->cgrp = cgrp;
  398. /* must be done before we fput() the file */
  399. perf_get_cgroup(event);
  400. /*
  401. * all events in a group must monitor
  402. * the same cgroup because a task belongs
  403. * to only one perf cgroup at a time
  404. */
  405. if (group_leader && group_leader->cgrp != cgrp) {
  406. perf_detach_cgroup(event);
  407. ret = -EINVAL;
  408. }
  409. out:
  410. fput_light(file, fput_needed);
  411. return ret;
  412. }
  413. static inline void
  414. perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
  415. {
  416. struct perf_cgroup_info *t;
  417. t = per_cpu_ptr(event->cgrp->info, event->cpu);
  418. event->shadow_ctx_time = now - t->timestamp;
  419. }
  420. static inline void
  421. perf_cgroup_defer_enabled(struct perf_event *event)
  422. {
  423. /*
  424. * when the current task's perf cgroup does not match
  425. * the event's, we need to remember to call the
  426. * perf_mark_enable() function the first time a task with
  427. * a matching perf cgroup is scheduled in.
  428. */
  429. if (is_cgroup_event(event) && !perf_cgroup_match(event))
  430. event->cgrp_defer_enabled = 1;
  431. }
  432. static inline void
  433. perf_cgroup_mark_enabled(struct perf_event *event,
  434. struct perf_event_context *ctx)
  435. {
  436. struct perf_event *sub;
  437. u64 tstamp = perf_event_time(event);
  438. if (!event->cgrp_defer_enabled)
  439. return;
  440. event->cgrp_defer_enabled = 0;
  441. event->tstamp_enabled = tstamp - event->total_time_enabled;
  442. list_for_each_entry(sub, &event->sibling_list, group_entry) {
  443. if (sub->state >= PERF_EVENT_STATE_INACTIVE) {
  444. sub->tstamp_enabled = tstamp - sub->total_time_enabled;
  445. sub->cgrp_defer_enabled = 0;
  446. }
  447. }
  448. }
  449. #else /* !CONFIG_CGROUP_PERF */
  450. static inline bool
  451. perf_cgroup_match(struct perf_event *event)
  452. {
  453. return true;
  454. }
  455. static inline void perf_detach_cgroup(struct perf_event *event)
  456. {}
  457. static inline int is_cgroup_event(struct perf_event *event)
  458. {
  459. return 0;
  460. }
  461. static inline u64 perf_cgroup_event_cgrp_time(struct perf_event *event)
  462. {
  463. return 0;
  464. }
  465. static inline void update_cgrp_time_from_event(struct perf_event *event)
  466. {
  467. }
  468. static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
  469. {
  470. }
  471. static inline void perf_cgroup_sched_out(struct task_struct *task,
  472. struct task_struct *next)
  473. {
  474. }
  475. static inline void perf_cgroup_sched_in(struct task_struct *prev,
  476. struct task_struct *task)
  477. {
  478. }
  479. static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event,
  480. struct perf_event_attr *attr,
  481. struct perf_event *group_leader)
  482. {
  483. return -EINVAL;
  484. }
  485. static inline void
  486. perf_cgroup_set_timestamp(struct task_struct *task,
  487. struct perf_event_context *ctx)
  488. {
  489. }
  490. void
  491. perf_cgroup_switch(struct task_struct *task, struct task_struct *next)
  492. {
  493. }
  494. static inline void
  495. perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
  496. {
  497. }
  498. static inline u64 perf_cgroup_event_time(struct perf_event *event)
  499. {
  500. return 0;
  501. }
  502. static inline void
  503. perf_cgroup_defer_enabled(struct perf_event *event)
  504. {
  505. }
  506. static inline void
  507. perf_cgroup_mark_enabled(struct perf_event *event,
  508. struct perf_event_context *ctx)
  509. {
  510. }
  511. #endif
  512. void perf_pmu_disable(struct pmu *pmu)
  513. {
  514. int *count = this_cpu_ptr(pmu->pmu_disable_count);
  515. if (!(*count)++)
  516. pmu->pmu_disable(pmu);
  517. }
  518. void perf_pmu_enable(struct pmu *pmu)
  519. {
  520. int *count = this_cpu_ptr(pmu->pmu_disable_count);
  521. if (!--(*count))
  522. pmu->pmu_enable(pmu);
  523. }
  524. static DEFINE_PER_CPU(struct list_head, rotation_list);
  525. /*
  526. * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized
  527. * because they're strictly cpu affine and rotate_start is called with IRQs
  528. * disabled, while rotate_context is called from IRQ context.
  529. */
  530. static void perf_pmu_rotate_start(struct pmu *pmu)
  531. {
  532. struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
  533. struct list_head *head = &__get_cpu_var(rotation_list);
  534. WARN_ON(!irqs_disabled());
  535. if (list_empty(&cpuctx->rotation_list))
  536. list_add(&cpuctx->rotation_list, head);
  537. }
  538. static void get_ctx(struct perf_event_context *ctx)
  539. {
  540. WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
  541. }
  542. static void put_ctx(struct perf_event_context *ctx)
  543. {
  544. if (atomic_dec_and_test(&ctx->refcount)) {
  545. if (ctx->parent_ctx)
  546. put_ctx(ctx->parent_ctx);
  547. if (ctx->task)
  548. put_task_struct(ctx->task);
  549. kfree_rcu(ctx, rcu_head);
  550. }
  551. }
  552. static void unclone_ctx(struct perf_event_context *ctx)
  553. {
  554. if (ctx->parent_ctx) {
  555. put_ctx(ctx->parent_ctx);
  556. ctx->parent_ctx = NULL;
  557. }
  558. }
  559. static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
  560. {
  561. /*
  562. * only top level events have the pid namespace they were created in
  563. */
  564. if (event->parent)
  565. event = event->parent;
  566. return task_tgid_nr_ns(p, event->ns);
  567. }
  568. static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
  569. {
  570. /*
  571. * only top level events have the pid namespace they were created in
  572. */
  573. if (event->parent)
  574. event = event->parent;
  575. return task_pid_nr_ns(p, event->ns);
  576. }
  577. /*
  578. * If we inherit events we want to return the parent event id
  579. * to userspace.
  580. */
  581. static u64 primary_event_id(struct perf_event *event)
  582. {
  583. u64 id = event->id;
  584. if (event->parent)
  585. id = event->parent->id;
  586. return id;
  587. }
  588. /*
  589. * Get the perf_event_context for a task and lock it.
  590. * This has to cope with with the fact that until it is locked,
  591. * the context could get moved to another task.
  592. */
  593. static struct perf_event_context *
  594. perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags)
  595. {
  596. struct perf_event_context *ctx;
  597. rcu_read_lock();
  598. retry:
  599. ctx = rcu_dereference(task->perf_event_ctxp[ctxn]);
  600. if (ctx) {
  601. /*
  602. * If this context is a clone of another, it might
  603. * get swapped for another underneath us by
  604. * perf_event_task_sched_out, though the
  605. * rcu_read_lock() protects us from any context
  606. * getting freed. Lock the context and check if it
  607. * got swapped before we could get the lock, and retry
  608. * if so. If we locked the right context, then it
  609. * can't get swapped on us any more.
  610. */
  611. raw_spin_lock_irqsave(&ctx->lock, *flags);
  612. if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) {
  613. raw_spin_unlock_irqrestore(&ctx->lock, *flags);
  614. goto retry;
  615. }
  616. if (!atomic_inc_not_zero(&ctx->refcount)) {
  617. raw_spin_unlock_irqrestore(&ctx->lock, *flags);
  618. ctx = NULL;
  619. }
  620. }
  621. rcu_read_unlock();
  622. return ctx;
  623. }
  624. /*
  625. * Get the context for a task and increment its pin_count so it
  626. * can't get swapped to another task. This also increments its
  627. * reference count so that the context can't get freed.
  628. */
  629. static struct perf_event_context *
  630. perf_pin_task_context(struct task_struct *task, int ctxn)
  631. {
  632. struct perf_event_context *ctx;
  633. unsigned long flags;
  634. ctx = perf_lock_task_context(task, ctxn, &flags);
  635. if (ctx) {
  636. ++ctx->pin_count;
  637. raw_spin_unlock_irqrestore(&ctx->lock, flags);
  638. }
  639. return ctx;
  640. }
  641. static void perf_unpin_context(struct perf_event_context *ctx)
  642. {
  643. unsigned long flags;
  644. raw_spin_lock_irqsave(&ctx->lock, flags);
  645. --ctx->pin_count;
  646. raw_spin_unlock_irqrestore(&ctx->lock, flags);
  647. }
  648. /*
  649. * Update the record of the current time in a context.
  650. */
  651. static void update_context_time(struct perf_event_context *ctx)
  652. {
  653. u64 now = perf_clock();
  654. ctx->time += now - ctx->timestamp;
  655. ctx->timestamp = now;
  656. }
  657. static u64 perf_event_time(struct perf_event *event)
  658. {
  659. struct perf_event_context *ctx = event->ctx;
  660. if (is_cgroup_event(event))
  661. return perf_cgroup_event_time(event);
  662. return ctx ? ctx->time : 0;
  663. }
  664. /*
  665. * Update the total_time_enabled and total_time_running fields for a event.
  666. * The caller of this function needs to hold the ctx->lock.
  667. */
  668. static void update_event_times(struct perf_event *event)
  669. {
  670. struct perf_event_context *ctx = event->ctx;
  671. u64 run_end;
  672. if (event->state < PERF_EVENT_STATE_INACTIVE ||
  673. event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
  674. return;
  675. /*
  676. * in cgroup mode, time_enabled represents
  677. * the time the event was enabled AND active
  678. * tasks were in the monitored cgroup. This is
  679. * independent of the activity of the context as
  680. * there may be a mix of cgroup and non-cgroup events.
  681. *
  682. * That is why we treat cgroup events differently
  683. * here.
  684. */
  685. if (is_cgroup_event(event))
  686. run_end = perf_cgroup_event_time(event);
  687. else if (ctx->is_active)
  688. run_end = ctx->time;
  689. else
  690. run_end = event->tstamp_stopped;
  691. event->total_time_enabled = run_end - event->tstamp_enabled;
  692. if (event->state == PERF_EVENT_STATE_INACTIVE)
  693. run_end = event->tstamp_stopped;
  694. else
  695. run_end = perf_event_time(event);
  696. event->total_time_running = run_end - event->tstamp_running;
  697. }
  698. /*
  699. * Update total_time_enabled and total_time_running for all events in a group.
  700. */
  701. static void update_group_times(struct perf_event *leader)
  702. {
  703. struct perf_event *event;
  704. update_event_times(leader);
  705. list_for_each_entry(event, &leader->sibling_list, group_entry)
  706. update_event_times(event);
  707. }
  708. static struct list_head *
  709. ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
  710. {
  711. if (event->attr.pinned)
  712. return &ctx->pinned_groups;
  713. else
  714. return &ctx->flexible_groups;
  715. }
  716. /*
  717. * Add a event from the lists for its context.
  718. * Must be called with ctx->mutex and ctx->lock held.
  719. */
  720. static void
  721. list_add_event(struct perf_event *event, struct perf_event_context *ctx)
  722. {
  723. WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
  724. event->attach_state |= PERF_ATTACH_CONTEXT;
  725. /*
  726. * If we're a stand alone event or group leader, we go to the context
  727. * list, group events are kept attached to the group so that
  728. * perf_group_detach can, at all times, locate all siblings.
  729. */
  730. if (event->group_leader == event) {
  731. struct list_head *list;
  732. if (is_software_event(event))
  733. event->group_flags |= PERF_GROUP_SOFTWARE;
  734. list = ctx_group_list(event, ctx);
  735. list_add_tail(&event->group_entry, list);
  736. }
  737. if (is_cgroup_event(event))
  738. ctx->nr_cgroups++;
  739. list_add_rcu(&event->event_entry, &ctx->event_list);
  740. if (!ctx->nr_events)
  741. perf_pmu_rotate_start(ctx->pmu);
  742. ctx->nr_events++;
  743. if (event->attr.inherit_stat)
  744. ctx->nr_stat++;
  745. }
  746. /*
  747. * Called at perf_event creation and when events are attached/detached from a
  748. * group.
  749. */
  750. static void perf_event__read_size(struct perf_event *event)
  751. {
  752. int entry = sizeof(u64); /* value */
  753. int size = 0;
  754. int nr = 1;
  755. if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
  756. size += sizeof(u64);
  757. if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
  758. size += sizeof(u64);
  759. if (event->attr.read_format & PERF_FORMAT_ID)
  760. entry += sizeof(u64);
  761. if (event->attr.read_format & PERF_FORMAT_GROUP) {
  762. nr += event->group_leader->nr_siblings;
  763. size += sizeof(u64);
  764. }
  765. size += entry * nr;
  766. event->read_size = size;
  767. }
  768. static void perf_event__header_size(struct perf_event *event)
  769. {
  770. struct perf_sample_data *data;
  771. u64 sample_type = event->attr.sample_type;
  772. u16 size = 0;
  773. perf_event__read_size(event);
  774. if (sample_type & PERF_SAMPLE_IP)
  775. size += sizeof(data->ip);
  776. if (sample_type & PERF_SAMPLE_ADDR)
  777. size += sizeof(data->addr);
  778. if (sample_type & PERF_SAMPLE_PERIOD)
  779. size += sizeof(data->period);
  780. if (sample_type & PERF_SAMPLE_READ)
  781. size += event->read_size;
  782. event->header_size = size;
  783. }
  784. static void perf_event__id_header_size(struct perf_event *event)
  785. {
  786. struct perf_sample_data *data;
  787. u64 sample_type = event->attr.sample_type;
  788. u16 size = 0;
  789. if (sample_type & PERF_SAMPLE_TID)
  790. size += sizeof(data->tid_entry);
  791. if (sample_type & PERF_SAMPLE_TIME)
  792. size += sizeof(data->time);
  793. if (sample_type & PERF_SAMPLE_ID)
  794. size += sizeof(data->id);
  795. if (sample_type & PERF_SAMPLE_STREAM_ID)
  796. size += sizeof(data->stream_id);
  797. if (sample_type & PERF_SAMPLE_CPU)
  798. size += sizeof(data->cpu_entry);
  799. event->id_header_size = size;
  800. }
  801. static void perf_group_attach(struct perf_event *event)
  802. {
  803. struct perf_event *group_leader = event->group_leader, *pos;
  804. /*
  805. * We can have double attach due to group movement in perf_event_open.
  806. */
  807. if (event->attach_state & PERF_ATTACH_GROUP)
  808. return;
  809. event->attach_state |= PERF_ATTACH_GROUP;
  810. if (group_leader == event)
  811. return;
  812. if (group_leader->group_flags & PERF_GROUP_SOFTWARE &&
  813. !is_software_event(event))
  814. group_leader->group_flags &= ~PERF_GROUP_SOFTWARE;
  815. list_add_tail(&event->group_entry, &group_leader->sibling_list);
  816. group_leader->nr_siblings++;
  817. perf_event__header_size(group_leader);
  818. list_for_each_entry(pos, &group_leader->sibling_list, group_entry)
  819. perf_event__header_size(pos);
  820. }
  821. /*
  822. * Remove a event from the lists for its context.
  823. * Must be called with ctx->mutex and ctx->lock held.
  824. */
  825. static void
  826. list_del_event(struct perf_event *event, struct perf_event_context *ctx)
  827. {
  828. struct perf_cpu_context *cpuctx;
  829. /*
  830. * We can have double detach due to exit/hot-unplug + close.
  831. */
  832. if (!(event->attach_state & PERF_ATTACH_CONTEXT))
  833. return;
  834. event->attach_state &= ~PERF_ATTACH_CONTEXT;
  835. if (is_cgroup_event(event)) {
  836. ctx->nr_cgroups--;
  837. cpuctx = __get_cpu_context(ctx);
  838. /*
  839. * if there are no more cgroup events
  840. * then cler cgrp to avoid stale pointer
  841. * in update_cgrp_time_from_cpuctx()
  842. */
  843. if (!ctx->nr_cgroups)
  844. cpuctx->cgrp = NULL;
  845. }
  846. ctx->nr_events--;
  847. if (event->attr.inherit_stat)
  848. ctx->nr_stat--;
  849. list_del_rcu(&event->event_entry);
  850. if (event->group_leader == event)
  851. list_del_init(&event->group_entry);
  852. update_group_times(event);
  853. /*
  854. * If event was in error state, then keep it
  855. * that way, otherwise bogus counts will be
  856. * returned on read(). The only way to get out
  857. * of error state is by explicit re-enabling
  858. * of the event
  859. */
  860. if (event->state > PERF_EVENT_STATE_OFF)
  861. event->state = PERF_EVENT_STATE_OFF;
  862. }
  863. static void perf_group_detach(struct perf_event *event)
  864. {
  865. struct perf_event *sibling, *tmp;
  866. struct list_head *list = NULL;
  867. /*
  868. * We can have double detach due to exit/hot-unplug + close.
  869. */
  870. if (!(event->attach_state & PERF_ATTACH_GROUP))
  871. return;
  872. event->attach_state &= ~PERF_ATTACH_GROUP;
  873. /*
  874. * If this is a sibling, remove it from its group.
  875. */
  876. if (event->group_leader != event) {
  877. list_del_init(&event->group_entry);
  878. event->group_leader->nr_siblings--;
  879. goto out;
  880. }
  881. if (!list_empty(&event->group_entry))
  882. list = &event->group_entry;
  883. /*
  884. * If this was a group event with sibling events then
  885. * upgrade the siblings to singleton events by adding them
  886. * to whatever list we are on.
  887. */
  888. list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) {
  889. if (list)
  890. list_move_tail(&sibling->group_entry, list);
  891. sibling->group_leader = sibling;
  892. /* Inherit group flags from the previous leader */
  893. sibling->group_flags = event->group_flags;
  894. }
  895. out:
  896. perf_event__header_size(event->group_leader);
  897. list_for_each_entry(tmp, &event->group_leader->sibling_list, group_entry)
  898. perf_event__header_size(tmp);
  899. }
  900. static inline int
  901. event_filter_match(struct perf_event *event)
  902. {
  903. return (event->cpu == -1 || event->cpu == smp_processor_id())
  904. && perf_cgroup_match(event);
  905. }
  906. static void
  907. event_sched_out(struct perf_event *event,
  908. struct perf_cpu_context *cpuctx,
  909. struct perf_event_context *ctx)
  910. {
  911. u64 tstamp = perf_event_time(event);
  912. u64 delta;
  913. /*
  914. * An event which could not be activated because of
  915. * filter mismatch still needs to have its timings
  916. * maintained, otherwise bogus information is return
  917. * via read() for time_enabled, time_running:
  918. */
  919. if (event->state == PERF_EVENT_STATE_INACTIVE
  920. && !event_filter_match(event)) {
  921. delta = tstamp - event->tstamp_stopped;
  922. event->tstamp_running += delta;
  923. event->tstamp_stopped = tstamp;
  924. }
  925. if (event->state != PERF_EVENT_STATE_ACTIVE)
  926. return;
  927. event->state = PERF_EVENT_STATE_INACTIVE;
  928. if (event->pending_disable) {
  929. event->pending_disable = 0;
  930. event->state = PERF_EVENT_STATE_OFF;
  931. }
  932. event->tstamp_stopped = tstamp;
  933. event->pmu->del(event, 0);
  934. event->oncpu = -1;
  935. if (!is_software_event(event))
  936. cpuctx->active_oncpu--;
  937. ctx->nr_active--;
  938. if (event->attr.freq && event->attr.sample_freq)
  939. ctx->nr_freq--;
  940. if (event->attr.exclusive || !cpuctx->active_oncpu)
  941. cpuctx->exclusive = 0;
  942. }
  943. static void
  944. group_sched_out(struct perf_event *group_event,
  945. struct perf_cpu_context *cpuctx,
  946. struct perf_event_context *ctx)
  947. {
  948. struct perf_event *event;
  949. int state = group_event->state;
  950. event_sched_out(group_event, cpuctx, ctx);
  951. /*
  952. * Schedule out siblings (if any):
  953. */
  954. list_for_each_entry(event, &group_event->sibling_list, group_entry)
  955. event_sched_out(event, cpuctx, ctx);
  956. if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive)
  957. cpuctx->exclusive = 0;
  958. }
  959. /*
  960. * Cross CPU call to remove a performance event
  961. *
  962. * We disable the event on the hardware level first. After that we
  963. * remove it from the context list.
  964. */
  965. static int __perf_remove_from_context(void *info)
  966. {
  967. struct perf_event *event = info;
  968. struct perf_event_context *ctx = event->ctx;
  969. struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
  970. raw_spin_lock(&ctx->lock);
  971. event_sched_out(event, cpuctx, ctx);
  972. list_del_event(event, ctx);
  973. if (!ctx->nr_events && cpuctx->task_ctx == ctx) {
  974. ctx->is_active = 0;
  975. cpuctx->task_ctx = NULL;
  976. }
  977. raw_spin_unlock(&ctx->lock);
  978. return 0;
  979. }
  980. /*
  981. * Remove the event from a task's (or a CPU's) list of events.
  982. *
  983. * CPU events are removed with a smp call. For task events we only
  984. * call when the task is on a CPU.
  985. *
  986. * If event->ctx is a cloned context, callers must make sure that
  987. * every task struct that event->ctx->task could possibly point to
  988. * remains valid. This is OK when called from perf_release since
  989. * that only calls us on the top-level context, which can't be a clone.
  990. * When called from perf_event_exit_task, it's OK because the
  991. * context has been detached from its task.
  992. */
  993. static void perf_remove_from_context(struct perf_event *event)
  994. {
  995. struct perf_event_context *ctx = event->ctx;
  996. struct task_struct *task = ctx->task;
  997. lockdep_assert_held(&ctx->mutex);
  998. if (!task) {
  999. /*
  1000. * Per cpu events are removed via an smp call and
  1001. * the removal is always successful.
  1002. */
  1003. cpu_function_call(event->cpu, __perf_remove_from_context, event);
  1004. return;
  1005. }
  1006. retry:
  1007. if (!task_function_call(task, __perf_remove_from_context, event))
  1008. return;
  1009. raw_spin_lock_irq(&ctx->lock);
  1010. /*
  1011. * If we failed to find a running task, but find the context active now
  1012. * that we've acquired the ctx->lock, retry.
  1013. */
  1014. if (ctx->is_active) {
  1015. raw_spin_unlock_irq(&ctx->lock);
  1016. goto retry;
  1017. }
  1018. /*
  1019. * Since the task isn't running, its safe to remove the event, us
  1020. * holding the ctx->lock ensures the task won't get scheduled in.
  1021. */
  1022. list_del_event(event, ctx);
  1023. raw_spin_unlock_irq(&ctx->lock);
  1024. }
  1025. /*
  1026. * Cross CPU call to disable a performance event
  1027. */
  1028. static int __perf_event_disable(void *info)
  1029. {
  1030. struct perf_event *event = info;
  1031. struct perf_event_context *ctx = event->ctx;
  1032. struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
  1033. /*
  1034. * If this is a per-task event, need to check whether this
  1035. * event's task is the current task on this cpu.
  1036. *
  1037. * Can trigger due to concurrent perf_event_context_sched_out()
  1038. * flipping contexts around.
  1039. */
  1040. if (ctx->task && cpuctx->task_ctx != ctx)
  1041. return -EINVAL;
  1042. raw_spin_lock(&ctx->lock);
  1043. /*
  1044. * If the event is on, turn it off.
  1045. * If it is in error state, leave it in error state.
  1046. */
  1047. if (event->state >= PERF_EVENT_STATE_INACTIVE) {
  1048. update_context_time(ctx);
  1049. update_cgrp_time_from_event(event);
  1050. update_group_times(event);
  1051. if (event == event->group_leader)
  1052. group_sched_out(event, cpuctx, ctx);
  1053. else
  1054. event_sched_out(event, cpuctx, ctx);
  1055. event->state = PERF_EVENT_STATE_OFF;
  1056. }
  1057. raw_spin_unlock(&ctx->lock);
  1058. return 0;
  1059. }
  1060. /*
  1061. * Disable a event.
  1062. *
  1063. * If event->ctx is a cloned context, callers must make sure that
  1064. * every task struct that event->ctx->task could possibly point to
  1065. * remains valid. This condition is satisifed when called through
  1066. * perf_event_for_each_child or perf_event_for_each because they
  1067. * hold the top-level event's child_mutex, so any descendant that
  1068. * goes to exit will block in sync_child_event.
  1069. * When called from perf_pending_event it's OK because event->ctx
  1070. * is the current context on this CPU and preemption is disabled,
  1071. * hence we can't get into perf_event_task_sched_out for this context.
  1072. */
  1073. void perf_event_disable(struct perf_event *event)
  1074. {
  1075. struct perf_event_context *ctx = event->ctx;
  1076. struct task_struct *task = ctx->task;
  1077. if (!task) {
  1078. /*
  1079. * Disable the event on the cpu that it's on
  1080. */
  1081. cpu_function_call(event->cpu, __perf_event_disable, event);
  1082. return;
  1083. }
  1084. retry:
  1085. if (!task_function_call(task, __perf_event_disable, event))
  1086. return;
  1087. raw_spin_lock_irq(&ctx->lock);
  1088. /*
  1089. * If the event is still active, we need to retry the cross-call.
  1090. */
  1091. if (event->state == PERF_EVENT_STATE_ACTIVE) {
  1092. raw_spin_unlock_irq(&ctx->lock);
  1093. /*
  1094. * Reload the task pointer, it might have been changed by
  1095. * a concurrent perf_event_context_sched_out().
  1096. */
  1097. task = ctx->task;
  1098. goto retry;
  1099. }
  1100. /*
  1101. * Since we have the lock this context can't be scheduled
  1102. * in, so we can change the state safely.
  1103. */
  1104. if (event->state == PERF_EVENT_STATE_INACTIVE) {
  1105. update_group_times(event);
  1106. event->state = PERF_EVENT_STATE_OFF;
  1107. }
  1108. raw_spin_unlock_irq(&ctx->lock);
  1109. }
  1110. EXPORT_SYMBOL_GPL(perf_event_disable);
  1111. static void perf_set_shadow_time(struct perf_event *event,
  1112. struct perf_event_context *ctx,
  1113. u64 tstamp)
  1114. {
  1115. /*
  1116. * use the correct time source for the time snapshot
  1117. *
  1118. * We could get by without this by leveraging the
  1119. * fact that to get to this function, the caller
  1120. * has most likely already called update_context_time()
  1121. * and update_cgrp_time_xx() and thus both timestamp
  1122. * are identical (or very close). Given that tstamp is,
  1123. * already adjusted for cgroup, we could say that:
  1124. * tstamp - ctx->timestamp
  1125. * is equivalent to
  1126. * tstamp - cgrp->timestamp.
  1127. *
  1128. * Then, in perf_output_read(), the calculation would
  1129. * work with no changes because:
  1130. * - event is guaranteed scheduled in
  1131. * - no scheduled out in between
  1132. * - thus the timestamp would be the same
  1133. *
  1134. * But this is a bit hairy.
  1135. *
  1136. * So instead, we have an explicit cgroup call to remain
  1137. * within the time time source all along. We believe it
  1138. * is cleaner and simpler to understand.
  1139. */
  1140. if (is_cgroup_event(event))
  1141. perf_cgroup_set_shadow_time(event, tstamp);
  1142. else
  1143. event->shadow_ctx_time = tstamp - ctx->timestamp;
  1144. }
  1145. #define MAX_INTERRUPTS (~0ULL)
  1146. static void perf_log_throttle(struct perf_event *event, int enable);
  1147. static int
  1148. event_sched_in(struct perf_event *event,
  1149. struct perf_cpu_context *cpuctx,
  1150. struct perf_event_context *ctx)
  1151. {
  1152. u64 tstamp = perf_event_time(event);
  1153. if (event->state <= PERF_EVENT_STATE_OFF)
  1154. return 0;
  1155. event->state = PERF_EVENT_STATE_ACTIVE;
  1156. event->oncpu = smp_processor_id();
  1157. /*
  1158. * Unthrottle events, since we scheduled we might have missed several
  1159. * ticks already, also for a heavily scheduling task there is little
  1160. * guarantee it'll get a tick in a timely manner.
  1161. */
  1162. if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) {
  1163. perf_log_throttle(event, 1);
  1164. event->hw.interrupts = 0;
  1165. }
  1166. /*
  1167. * The new state must be visible before we turn it on in the hardware:
  1168. */
  1169. smp_wmb();
  1170. if (event->pmu->add(event, PERF_EF_START)) {
  1171. event->state = PERF_EVENT_STATE_INACTIVE;
  1172. event->oncpu = -1;
  1173. return -EAGAIN;
  1174. }
  1175. event->tstamp_running += tstamp - event->tstamp_stopped;
  1176. perf_set_shadow_time(event, ctx, tstamp);
  1177. if (!is_software_event(event))
  1178. cpuctx->active_oncpu++;
  1179. ctx->nr_active++;
  1180. if (event->attr.freq && event->attr.sample_freq)
  1181. ctx->nr_freq++;
  1182. if (event->attr.exclusive)
  1183. cpuctx->exclusive = 1;
  1184. return 0;
  1185. }
  1186. static int
  1187. group_sched_in(struct perf_event *group_event,
  1188. struct perf_cpu_context *cpuctx,
  1189. struct perf_event_context *ctx)
  1190. {
  1191. struct perf_event *event, *partial_group = NULL;
  1192. struct pmu *pmu = group_event->pmu;
  1193. u64 now = ctx->time;
  1194. bool simulate = false;
  1195. if (group_event->state == PERF_EVENT_STATE_OFF)
  1196. return 0;
  1197. pmu->start_txn(pmu);
  1198. if (event_sched_in(group_event, cpuctx, ctx)) {
  1199. pmu->cancel_txn(pmu);
  1200. return -EAGAIN;
  1201. }
  1202. /*
  1203. * Schedule in siblings as one group (if any):
  1204. */
  1205. list_for_each_entry(event, &group_event->sibling_list, group_entry) {
  1206. if (event_sched_in(event, cpuctx, ctx)) {
  1207. partial_group = event;
  1208. goto group_error;
  1209. }
  1210. }
  1211. if (!pmu->commit_txn(pmu))
  1212. return 0;
  1213. group_error:
  1214. /*
  1215. * Groups can be scheduled in as one unit only, so undo any
  1216. * partial group before returning:
  1217. * The events up to the failed event are scheduled out normally,
  1218. * tstamp_stopped will be updated.
  1219. *
  1220. * The failed events and the remaining siblings need to have
  1221. * their timings updated as if they had gone thru event_sched_in()
  1222. * and event_sched_out(). This is required to get consistent timings
  1223. * across the group. This also takes care of the case where the group
  1224. * could never be scheduled by ensuring tstamp_stopped is set to mark
  1225. * the time the event was actually stopped, such that time delta
  1226. * calculation in update_event_times() is correct.
  1227. */
  1228. list_for_each_entry(event, &group_event->sibling_list, group_entry) {
  1229. if (event == partial_group)
  1230. simulate = true;
  1231. if (simulate) {
  1232. event->tstamp_running += now - event->tstamp_stopped;
  1233. event->tstamp_stopped = now;
  1234. } else {
  1235. event_sched_out(event, cpuctx, ctx);
  1236. }
  1237. }
  1238. event_sched_out(group_event, cpuctx, ctx);
  1239. pmu->cancel_txn(pmu);
  1240. return -EAGAIN;
  1241. }
  1242. /*
  1243. * Work out whether we can put this event group on the CPU now.
  1244. */
  1245. static int group_can_go_on(struct perf_event *event,
  1246. struct perf_cpu_context *cpuctx,
  1247. int can_add_hw)
  1248. {
  1249. /*
  1250. * Groups consisting entirely of software events can always go on.
  1251. */
  1252. if (event->group_flags & PERF_GROUP_SOFTWARE)
  1253. return 1;
  1254. /*
  1255. * If an exclusive group is already on, no other hardware
  1256. * events can go on.
  1257. */
  1258. if (cpuctx->exclusive)
  1259. return 0;
  1260. /*
  1261. * If this group is exclusive and there are already
  1262. * events on the CPU, it can't go on.
  1263. */
  1264. if (event->attr.exclusive && cpuctx->active_oncpu)
  1265. return 0;
  1266. /*
  1267. * Otherwise, try to add it if all previous groups were able
  1268. * to go on.
  1269. */
  1270. return can_add_hw;
  1271. }
  1272. static void add_event_to_ctx(struct perf_event *event,
  1273. struct perf_event_context *ctx)
  1274. {
  1275. u64 tstamp = perf_event_time(event);
  1276. list_add_event(event, ctx);
  1277. perf_group_attach(event);
  1278. event->tstamp_enabled = tstamp;
  1279. event->tstamp_running = tstamp;
  1280. event->tstamp_stopped = tstamp;
  1281. }
  1282. static void task_ctx_sched_out(struct perf_event_context *ctx);
  1283. static void
  1284. ctx_sched_in(struct perf_event_context *ctx,
  1285. struct perf_cpu_context *cpuctx,
  1286. enum event_type_t event_type,
  1287. struct task_struct *task);
  1288. static void perf_event_sched_in(struct perf_cpu_context *cpuctx,
  1289. struct perf_event_context *ctx,
  1290. struct task_struct *task)
  1291. {
  1292. cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task);
  1293. if (ctx)
  1294. ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task);
  1295. cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task);
  1296. if (ctx)
  1297. ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task);
  1298. }
  1299. /*
  1300. * Cross CPU call to install and enable a performance event
  1301. *
  1302. * Must be called with ctx->mutex held
  1303. */
  1304. static int __perf_install_in_context(void *info)
  1305. {
  1306. struct perf_event *event = info;
  1307. struct perf_event_context *ctx = event->ctx;
  1308. struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
  1309. struct perf_event_context *task_ctx = cpuctx->task_ctx;
  1310. struct task_struct *task = current;
  1311. perf_ctx_lock(cpuctx, task_ctx);
  1312. perf_pmu_disable(cpuctx->ctx.pmu);
  1313. /*
  1314. * If there was an active task_ctx schedule it out.
  1315. */
  1316. if (task_ctx)
  1317. task_ctx_sched_out(task_ctx);
  1318. /*
  1319. * If the context we're installing events in is not the
  1320. * active task_ctx, flip them.
  1321. */
  1322. if (ctx->task && task_ctx != ctx) {
  1323. if (task_ctx)
  1324. raw_spin_unlock(&task_ctx->lock);
  1325. raw_spin_lock(&ctx->lock);
  1326. task_ctx = ctx;
  1327. }
  1328. if (task_ctx) {
  1329. cpuctx->task_ctx = task_ctx;
  1330. task = task_ctx->task;
  1331. }
  1332. cpu_ctx_sched_out(cpuctx, EVENT_ALL);
  1333. update_context_time(ctx);
  1334. /*
  1335. * update cgrp time only if current cgrp
  1336. * matches event->cgrp. Must be done before
  1337. * calling add_event_to_ctx()
  1338. */
  1339. update_cgrp_time_from_event(event);
  1340. add_event_to_ctx(event, ctx);
  1341. /*
  1342. * Schedule everything back in
  1343. */
  1344. perf_event_sched_in(cpuctx, task_ctx, task);
  1345. perf_pmu_enable(cpuctx->ctx.pmu);
  1346. perf_ctx_unlock(cpuctx, task_ctx);
  1347. return 0;
  1348. }
  1349. /*
  1350. * Attach a performance event to a context
  1351. *
  1352. * First we add the event to the list with the hardware enable bit
  1353. * in event->hw_config cleared.
  1354. *
  1355. * If the event is attached to a task which is on a CPU we use a smp
  1356. * call to enable it in the task context. The task might have been
  1357. * scheduled away, but we check this in the smp call again.
  1358. */
  1359. static void
  1360. perf_install_in_context(struct perf_event_context *ctx,
  1361. struct perf_event *event,
  1362. int cpu)
  1363. {
  1364. struct task_struct *task = ctx->task;
  1365. lockdep_assert_held(&ctx->mutex);
  1366. event->ctx = ctx;
  1367. if (!task) {
  1368. /*
  1369. * Per cpu events are installed via an smp call and
  1370. * the install is always successful.
  1371. */
  1372. cpu_function_call(cpu, __perf_install_in_context, event);
  1373. return;
  1374. }
  1375. retry:
  1376. if (!task_function_call(task, __perf_install_in_context, event))
  1377. return;
  1378. raw_spin_lock_irq(&ctx->lock);
  1379. /*
  1380. * If we failed to find a running task, but find the context active now
  1381. * that we've acquired the ctx->lock, retry.
  1382. */
  1383. if (ctx->is_active) {
  1384. raw_spin_unlock_irq(&ctx->lock);
  1385. goto retry;
  1386. }
  1387. /*
  1388. * Since the task isn't running, its safe to add the event, us holding
  1389. * the ctx->lock ensures the task won't get scheduled in.
  1390. */
  1391. add_event_to_ctx(event, ctx);
  1392. raw_spin_unlock_irq(&ctx->lock);
  1393. }
  1394. /*
  1395. * Put a event into inactive state and update time fields.
  1396. * Enabling the leader of a group effectively enables all
  1397. * the group members that aren't explicitly disabled, so we
  1398. * have to update their ->tstamp_enabled also.
  1399. * Note: this works for group members as well as group leaders
  1400. * since the non-leader members' sibling_lists will be empty.
  1401. */
  1402. static void __perf_event_mark_enabled(struct perf_event *event)
  1403. {
  1404. struct perf_event *sub;
  1405. u64 tstamp = perf_event_time(event);
  1406. event->state = PERF_EVENT_STATE_INACTIVE;
  1407. event->tstamp_enabled = tstamp - event->total_time_enabled;
  1408. list_for_each_entry(sub, &event->sibling_list, group_entry) {
  1409. if (sub->state >= PERF_EVENT_STATE_INACTIVE)
  1410. sub->tstamp_enabled = tstamp - sub->total_time_enabled;
  1411. }
  1412. }
  1413. /*
  1414. * Cross CPU call to enable a performance event
  1415. */
  1416. static int __perf_event_enable(void *info)
  1417. {
  1418. struct perf_event *event = info;
  1419. struct perf_event_context *ctx = event->ctx;
  1420. struct perf_event *leader = event->group_leader;
  1421. struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
  1422. int err;
  1423. if (WARN_ON_ONCE(!ctx->is_active))
  1424. return -EINVAL;
  1425. raw_spin_lock(&ctx->lock);
  1426. update_context_time(ctx);
  1427. if (event->state >= PERF_EVENT_STATE_INACTIVE)
  1428. goto unlock;
  1429. /*
  1430. * set current task's cgroup time reference point
  1431. */
  1432. perf_cgroup_set_timestamp(current, ctx);
  1433. __perf_event_mark_enabled(event);
  1434. if (!event_filter_match(event)) {
  1435. if (is_cgroup_event(event))
  1436. perf_cgroup_defer_enabled(event);
  1437. goto unlock;
  1438. }
  1439. /*
  1440. * If the event is in a group and isn't the group leader,
  1441. * then don't put it on unless the group is on.
  1442. */
  1443. if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE)
  1444. goto unlock;
  1445. if (!group_can_go_on(event, cpuctx, 1)) {
  1446. err = -EEXIST;
  1447. } else {
  1448. if (event == leader)
  1449. err = group_sched_in(event, cpuctx, ctx);
  1450. else
  1451. err = event_sched_in(event, cpuctx, ctx);
  1452. }
  1453. if (err) {
  1454. /*
  1455. * If this event can't go on and it's part of a
  1456. * group, then the whole group has to come off.
  1457. */
  1458. if (leader != event)
  1459. group_sched_out(leader, cpuctx, ctx);
  1460. if (leader->attr.pinned) {
  1461. update_group_times(leader);
  1462. leader->state = PERF_EVENT_STATE_ERROR;
  1463. }
  1464. }
  1465. unlock:
  1466. raw_spin_unlock(&ctx->lock);
  1467. return 0;
  1468. }
  1469. /*
  1470. * Enable a event.
  1471. *
  1472. * If event->ctx is a cloned context, callers must make sure that
  1473. * every task struct that event->ctx->task could possibly point to
  1474. * remains valid. This condition is satisfied when called through
  1475. * perf_event_for_each_child or perf_event_for_each as described
  1476. * for perf_event_disable.
  1477. */
  1478. void perf_event_enable(struct perf_event *event)
  1479. {
  1480. struct perf_event_context *ctx = event->ctx;
  1481. struct task_struct *task = ctx->task;
  1482. if (!task) {
  1483. /*
  1484. * Enable the event on the cpu that it's on
  1485. */
  1486. cpu_function_call(event->cpu, __perf_event_enable, event);
  1487. return;
  1488. }
  1489. raw_spin_lock_irq(&ctx->lock);
  1490. if (event->state >= PERF_EVENT_STATE_INACTIVE)
  1491. goto out;
  1492. /*
  1493. * If the event is in error state, clear that first.
  1494. * That way, if we see the event in error state below, we
  1495. * know that it has gone back into error state, as distinct
  1496. * from the task having been scheduled away before the
  1497. * cross-call arrived.
  1498. */
  1499. if (event->state == PERF_EVENT_STATE_ERROR)
  1500. event->state = PERF_EVENT_STATE_OFF;
  1501. retry:
  1502. if (!ctx->is_active) {
  1503. __perf_event_mark_enabled(event);
  1504. goto out;
  1505. }
  1506. raw_spin_unlock_irq(&ctx->lock);
  1507. if (!task_function_call(task, __perf_event_enable, event))
  1508. return;
  1509. raw_spin_lock_irq(&ctx->lock);
  1510. /*
  1511. * If the context is active and the event is still off,
  1512. * we need to retry the cross-call.
  1513. */
  1514. if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF) {
  1515. /*
  1516. * task could have been flipped by a concurrent
  1517. * perf_event_context_sched_out()
  1518. */
  1519. task = ctx->task;
  1520. goto retry;
  1521. }
  1522. out:
  1523. raw_spin_unlock_irq(&ctx->lock);
  1524. }
  1525. EXPORT_SYMBOL_GPL(perf_event_enable);
  1526. int perf_event_refresh(struct perf_event *event, int refresh)
  1527. {
  1528. /*
  1529. * not supported on inherited events
  1530. */
  1531. if (event->attr.inherit || !is_sampling_event(event))
  1532. return -EINVAL;
  1533. atomic_add(refresh, &event->event_limit);
  1534. perf_event_enable(event);
  1535. return 0;
  1536. }
  1537. EXPORT_SYMBOL_GPL(perf_event_refresh);
  1538. static void ctx_sched_out(struct perf_event_context *ctx,
  1539. struct perf_cpu_context *cpuctx,
  1540. enum event_type_t event_type)
  1541. {
  1542. struct perf_event *event;
  1543. int is_active = ctx->is_active;
  1544. ctx->is_active &= ~event_type;
  1545. if (likely(!ctx->nr_events))
  1546. return;
  1547. update_context_time(ctx);
  1548. update_cgrp_time_from_cpuctx(cpuctx);
  1549. if (!ctx->nr_active)
  1550. return;
  1551. perf_pmu_disable(ctx->pmu);
  1552. if ((is_active & EVENT_PINNED) && (event_type & EVENT_PINNED)) {
  1553. list_for_each_entry(event, &ctx->pinned_groups, group_entry)
  1554. group_sched_out(event, cpuctx, ctx);
  1555. }
  1556. if ((is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE)) {
  1557. list_for_each_entry(event, &ctx->flexible_groups, group_entry)
  1558. group_sched_out(event, cpuctx, ctx);
  1559. }
  1560. perf_pmu_enable(ctx->pmu);
  1561. }
  1562. /*
  1563. * Test whether two contexts are equivalent, i.e. whether they
  1564. * have both been cloned from the same version of the same context
  1565. * and they both have the same number of enabled events.
  1566. * If the number of enabled events is the same, then the set
  1567. * of enabled events should be the same, because these are both
  1568. * inherited contexts, therefore we can't access individual events
  1569. * in them directly with an fd; we can only enable/disable all
  1570. * events via prctl, or enable/disable all events in a family
  1571. * via ioctl, which will have the same effect on both contexts.
  1572. */
  1573. static int context_equiv(struct perf_event_context *ctx1,
  1574. struct perf_event_context *ctx2)
  1575. {
  1576. return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx
  1577. && ctx1->parent_gen == ctx2->parent_gen
  1578. && !ctx1->pin_count && !ctx2->pin_count;
  1579. }
  1580. static void __perf_event_sync_stat(struct perf_event *event,
  1581. struct perf_event *next_event)
  1582. {
  1583. u64 value;
  1584. if (!event->attr.inherit_stat)
  1585. return;
  1586. /*
  1587. * Update the event value, we cannot use perf_event_read()
  1588. * because we're in the middle of a context switch and have IRQs
  1589. * disabled, which upsets smp_call_function_single(), however
  1590. * we know the event must be on the current CPU, therefore we
  1591. * don't need to use it.
  1592. */
  1593. switch (event->state) {
  1594. case PERF_EVENT_STATE_ACTIVE:
  1595. event->pmu->read(event);
  1596. /* fall-through */
  1597. case PERF_EVENT_STATE_INACTIVE:
  1598. update_event_times(event);
  1599. break;
  1600. default:
  1601. break;
  1602. }
  1603. /*
  1604. * In order to keep per-task stats reliable we need to flip the event
  1605. * values when we flip the contexts.
  1606. */
  1607. value = local64_read(&next_event->count);
  1608. value = local64_xchg(&event->count, value);
  1609. local64_set(&next_event->count, value);
  1610. swap(event->total_time_enabled, next_event->total_time_enabled);
  1611. swap(event->total_time_running, next_event->total_time_running);
  1612. /*
  1613. * Since we swizzled the values, update the user visible data too.
  1614. */
  1615. perf_event_update_userpage(event);
  1616. perf_event_update_userpage(next_event);
  1617. }
  1618. #define list_next_entry(pos, member) \
  1619. list_entry(pos->member.next, typeof(*pos), member)
  1620. static void perf_event_sync_stat(struct perf_event_context *ctx,
  1621. struct perf_event_context *next_ctx)
  1622. {
  1623. struct perf_event *event, *next_event;
  1624. if (!ctx->nr_stat)
  1625. return;
  1626. update_context_time(ctx);
  1627. event = list_first_entry(&ctx->event_list,
  1628. struct perf_event, event_entry);
  1629. next_event = list_first_entry(&next_ctx->event_list,
  1630. struct perf_event, event_entry);
  1631. while (&event->event_entry != &ctx->event_list &&
  1632. &next_event->event_entry != &next_ctx->event_list) {
  1633. __perf_event_sync_stat(event, next_event);
  1634. event = list_next_entry(event, event_entry);
  1635. next_event = list_next_entry(next_event, event_entry);
  1636. }
  1637. }
  1638. static void perf_event_context_sched_out(struct task_struct *task, int ctxn,
  1639. struct task_struct *next)
  1640. {
  1641. struct perf_event_context *ctx = task->perf_event_ctxp[ctxn];
  1642. struct perf_event_context *next_ctx;
  1643. struct perf_event_context *parent;
  1644. struct perf_cpu_context *cpuctx;
  1645. int do_switch = 1;
  1646. if (likely(!ctx))
  1647. return;
  1648. cpuctx = __get_cpu_context(ctx);
  1649. if (!cpuctx->task_ctx)
  1650. return;
  1651. rcu_read_lock();
  1652. parent = rcu_dereference(ctx->parent_ctx);
  1653. next_ctx = next->perf_event_ctxp[ctxn];
  1654. if (parent && next_ctx &&
  1655. rcu_dereference(next_ctx->parent_ctx) == parent) {
  1656. /*
  1657. * Looks like the two contexts are clones, so we might be
  1658. * able to optimize the context switch. We lock both
  1659. * contexts and check that they are clones under the
  1660. * lock (including re-checking that neither has been
  1661. * uncloned in the meantime). It doesn't matter which
  1662. * order we take the locks because no other cpu could
  1663. * be trying to lock both of these tasks.
  1664. */
  1665. raw_spin_lock(&ctx->lock);
  1666. raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
  1667. if (context_equiv(ctx, next_ctx)) {
  1668. /*
  1669. * XXX do we need a memory barrier of sorts
  1670. * wrt to rcu_dereference() of perf_event_ctxp
  1671. */
  1672. task->perf_event_ctxp[ctxn] = next_ctx;
  1673. next->perf_event_ctxp[ctxn] = ctx;
  1674. ctx->task = next;
  1675. next_ctx->task = task;
  1676. do_switch = 0;
  1677. perf_event_sync_stat(ctx, next_ctx);
  1678. }
  1679. raw_spin_unlock(&next_ctx->lock);
  1680. raw_spin_unlock(&ctx->lock);
  1681. }
  1682. rcu_read_unlock();
  1683. if (do_switch) {
  1684. raw_spin_lock(&ctx->lock);
  1685. ctx_sched_out(ctx, cpuctx, EVENT_ALL);
  1686. cpuctx->task_ctx = NULL;
  1687. raw_spin_unlock(&ctx->lock);
  1688. }
  1689. }
  1690. #define for_each_task_context_nr(ctxn) \
  1691. for ((ctxn) = 0; (ctxn) < perf_nr_task_contexts; (ctxn)++)
  1692. /*
  1693. * Called from scheduler to remove the events of the current task,
  1694. * with interrupts disabled.
  1695. *
  1696. * We stop each event and update the event value in event->count.
  1697. *
  1698. * This does not protect us against NMI, but disable()
  1699. * sets the disabled bit in the control field of event _before_
  1700. * accessing the event control register. If a NMI hits, then it will
  1701. * not restart the event.
  1702. */
  1703. void __perf_event_task_sched_out(struct task_struct *task,
  1704. struct task_struct *next)
  1705. {
  1706. int ctxn;
  1707. for_each_task_context_nr(ctxn)
  1708. perf_event_context_sched_out(task, ctxn, next);
  1709. /*
  1710. * if cgroup events exist on this CPU, then we need
  1711. * to check if we have to switch out PMU state.
  1712. * cgroup event are system-wide mode only
  1713. */
  1714. if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
  1715. perf_cgroup_sched_out(task, next);
  1716. }
  1717. static void task_ctx_sched_out(struct perf_event_context *ctx)
  1718. {
  1719. struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
  1720. if (!cpuctx->task_ctx)
  1721. return;
  1722. if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
  1723. return;
  1724. ctx_sched_out(ctx, cpuctx, EVENT_ALL);
  1725. cpuctx->task_ctx = NULL;
  1726. }
  1727. /*
  1728. * Called with IRQs disabled
  1729. */
  1730. static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
  1731. enum event_type_t event_type)
  1732. {
  1733. ctx_sched_out(&cpuctx->ctx, cpuctx, event_type);
  1734. }
  1735. static void
  1736. ctx_pinned_sched_in(struct perf_event_context *ctx,
  1737. struct perf_cpu_context *cpuctx)
  1738. {
  1739. struct perf_event *event;
  1740. list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
  1741. if (event->state <= PERF_EVENT_STATE_OFF)
  1742. continue;
  1743. if (!event_filter_match(event))
  1744. continue;
  1745. /* may need to reset tstamp_enabled */
  1746. if (is_cgroup_event(event))
  1747. perf_cgroup_mark_enabled(event, ctx);
  1748. if (group_can_go_on(event, cpuctx, 1))
  1749. group_sched_in(event, cpuctx, ctx);
  1750. /*
  1751. * If this pinned group hasn't been scheduled,
  1752. * put it in error state.
  1753. */
  1754. if (event->state == PERF_EVENT_STATE_INACTIVE) {
  1755. update_group_times(event);
  1756. event->state = PERF_EVENT_STATE_ERROR;
  1757. }
  1758. }
  1759. }
  1760. static void
  1761. ctx_flexible_sched_in(struct perf_event_context *ctx,
  1762. struct perf_cpu_context *cpuctx)
  1763. {
  1764. struct perf_event *event;
  1765. int can_add_hw = 1;
  1766. list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
  1767. /* Ignore events in OFF or ERROR state */
  1768. if (event->state <= PERF_EVENT_STATE_OFF)
  1769. continue;
  1770. /*
  1771. * Listen to the 'cpu' scheduling filter constraint
  1772. * of events:
  1773. */
  1774. if (!event_filter_match(event))
  1775. continue;
  1776. /* may need to reset tstamp_enabled */
  1777. if (is_cgroup_event(event))
  1778. perf_cgroup_mark_enabled(event, ctx);
  1779. if (group_can_go_on(event, cpuctx, can_add_hw)) {
  1780. if (group_sched_in(event, cpuctx, ctx))
  1781. can_add_hw = 0;
  1782. }
  1783. }
  1784. }
  1785. static void
  1786. ctx_sched_in(struct perf_event_context *ctx,
  1787. struct perf_cpu_context *cpuctx,
  1788. enum event_type_t event_type,
  1789. struct task_struct *task)
  1790. {
  1791. u64 now;
  1792. int is_active = ctx->is_active;
  1793. ctx->is_active |= event_type;
  1794. if (likely(!ctx->nr_events))
  1795. return;
  1796. now = perf_clock();
  1797. ctx->timestamp = now;
  1798. perf_cgroup_set_timestamp(task, ctx);
  1799. /*
  1800. * First go through the list and put on any pinned groups
  1801. * in order to give them the best chance of going on.
  1802. */
  1803. if (!(is_active & EVENT_PINNED) && (event_type & EVENT_PINNED))
  1804. ctx_pinned_sched_in(ctx, cpuctx);
  1805. /* Then walk through the lower prio flexible groups */
  1806. if (!(is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE))
  1807. ctx_flexible_sched_in(ctx, cpuctx);
  1808. }
  1809. static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
  1810. enum event_type_t event_type,
  1811. struct task_struct *task)
  1812. {
  1813. struct perf_event_context *ctx = &cpuctx->ctx;
  1814. ctx_sched_in(ctx, cpuctx, event_type, task);
  1815. }
  1816. static void perf_event_context_sched_in(struct perf_event_context *ctx,
  1817. struct task_struct *task)
  1818. {
  1819. struct perf_cpu_context *cpuctx;
  1820. cpuctx = __get_cpu_context(ctx);
  1821. if (cpuctx->task_ctx == ctx)
  1822. return;
  1823. perf_ctx_lock(cpuctx, ctx);
  1824. perf_pmu_disable(ctx->pmu);
  1825. /*
  1826. * We want to keep the following priority order:
  1827. * cpu pinned (that don't need to move), task pinned,
  1828. * cpu flexible, task flexible.
  1829. */
  1830. cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
  1831. if (ctx->nr_events)
  1832. cpuctx->task_ctx = ctx;
  1833. perf_event_sched_in(cpuctx, cpuctx->task_ctx, task);
  1834. perf_pmu_enable(ctx->pmu);
  1835. perf_ctx_unlock(cpuctx, ctx);
  1836. /*
  1837. * Since these rotations are per-cpu, we need to ensure the
  1838. * cpu-context we got scheduled on is actually rotating.
  1839. */
  1840. perf_pmu_rotate_start(ctx->pmu);
  1841. }
  1842. /*
  1843. * Called from scheduler to add the events of the current task
  1844. * with interrupts disabled.
  1845. *
  1846. * We restore the event value and then enable it.
  1847. *
  1848. * This does not protect us against NMI, but enable()
  1849. * sets the enabled bit in the control field of event _before_
  1850. * accessing the event control register. If a NMI hits, then it will
  1851. * keep the event running.
  1852. */
  1853. void __perf_event_task_sched_in(struct task_struct *prev,
  1854. struct task_struct *task)
  1855. {
  1856. struct perf_event_context *ctx;
  1857. int ctxn;
  1858. for_each_task_context_nr(ctxn) {
  1859. ctx = task->perf_event_ctxp[ctxn];
  1860. if (likely(!ctx))
  1861. continue;
  1862. perf_event_context_sched_in(ctx, task);
  1863. }
  1864. /*
  1865. * if cgroup events exist on this CPU, then we need
  1866. * to check if we have to switch in PMU state.
  1867. * cgroup event are system-wide mode only
  1868. */
  1869. if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
  1870. perf_cgroup_sched_in(prev, task);
  1871. }
  1872. static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
  1873. {
  1874. u64 frequency = event->attr.sample_freq;
  1875. u64 sec = NSEC_PER_SEC;
  1876. u64 divisor, dividend;
  1877. int count_fls, nsec_fls, frequency_fls, sec_fls;
  1878. count_fls = fls64(count);
  1879. nsec_fls = fls64(nsec);
  1880. frequency_fls = fls64(frequency);
  1881. sec_fls = 30;
  1882. /*
  1883. * We got @count in @nsec, with a target of sample_freq HZ
  1884. * the target period becomes:
  1885. *
  1886. * @count * 10^9
  1887. * period = -------------------
  1888. * @nsec * sample_freq
  1889. *
  1890. */
  1891. /*
  1892. * Reduce accuracy by one bit such that @a and @b converge
  1893. * to a similar magnitude.
  1894. */
  1895. #define REDUCE_FLS(a, b) \
  1896. do { \
  1897. if (a##_fls > b##_fls) { \
  1898. a >>= 1; \
  1899. a##_fls--; \
  1900. } else { \
  1901. b >>= 1; \
  1902. b##_fls--; \
  1903. } \
  1904. } while (0)
  1905. /*
  1906. * Reduce accuracy until either term fits in a u64, then proceed with
  1907. * the other, so that finally we can do a u64/u64 division.
  1908. */
  1909. while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) {
  1910. REDUCE_FLS(nsec, frequency);
  1911. REDUCE_FLS(sec, count);
  1912. }
  1913. if (count_fls + sec_fls > 64) {
  1914. divisor = nsec * frequency;
  1915. while (count_fls + sec_fls > 64) {
  1916. REDUCE_FLS(count, sec);
  1917. divisor >>= 1;
  1918. }
  1919. dividend = count * sec;
  1920. } else {
  1921. dividend = count * sec;
  1922. while (nsec_fls + frequency_fls > 64) {
  1923. REDUCE_FLS(nsec, frequency);
  1924. dividend >>= 1;
  1925. }
  1926. divisor = nsec * frequency;
  1927. }
  1928. if (!divisor)
  1929. return dividend;
  1930. return div64_u64(dividend, divisor);
  1931. }
  1932. static DEFINE_PER_CPU(int, perf_throttled_count);
  1933. static DEFINE_PER_CPU(u64, perf_throttled_seq);
  1934. static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable)
  1935. {
  1936. struct hw_perf_event *hwc = &event->hw;
  1937. s64 period, sample_period;
  1938. s64 delta;
  1939. period = perf_calculate_period(event, nsec, count);
  1940. delta = (s64)(period - hwc->sample_period);
  1941. delta = (delta + 7) / 8; /* low pass filter */
  1942. sample_period = hwc->sample_period + delta;
  1943. if (!sample_period)
  1944. sample_period = 1;
  1945. hwc->sample_period = sample_period;
  1946. if (local64_read(&hwc->period_left) > 8*sample_period) {
  1947. if (disable)
  1948. event->pmu->stop(event, PERF_EF_UPDATE);
  1949. local64_set(&hwc->period_left, 0);
  1950. if (disable)
  1951. event->pmu->start(event, PERF_EF_RELOAD);
  1952. }
  1953. }
  1954. /*
  1955. * combine freq adjustment with unthrottling to avoid two passes over the
  1956. * events. At the same time, make sure, having freq events does not change
  1957. * the rate of unthrottling as that would introduce bias.
  1958. */
  1959. static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
  1960. int needs_unthr)
  1961. {
  1962. struct perf_event *event;
  1963. struct hw_perf_event *hwc;
  1964. u64 now, period = TICK_NSEC;
  1965. s64 delta;
  1966. /*
  1967. * only need to iterate over all events iff:
  1968. * - context have events in frequency mode (needs freq adjust)
  1969. * - there are events to unthrottle on this cpu
  1970. */
  1971. if (!(ctx->nr_freq || needs_unthr))
  1972. return;
  1973. raw_spin_lock(&ctx->lock);
  1974. perf_pmu_disable(ctx->pmu);
  1975. list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
  1976. if (event->state != PERF_EVENT_STATE_ACTIVE)
  1977. continue;
  1978. if (!event_filter_match(event))
  1979. continue;
  1980. hwc = &event->hw;
  1981. if (needs_unthr && hwc->interrupts == MAX_INTERRUPTS) {
  1982. hwc->interrupts = 0;
  1983. perf_log_throttle(event, 1);
  1984. event->pmu->start(event, 0);
  1985. }
  1986. if (!event->attr.freq || !event->attr.sample_freq)
  1987. continue;
  1988. /*
  1989. * stop the event and update event->count
  1990. */
  1991. event->pmu->stop(event, PERF_EF_UPDATE);
  1992. now = local64_read(&event->count);
  1993. delta = now - hwc->freq_count_stamp;
  1994. hwc->freq_count_stamp = now;
  1995. /*
  1996. * restart the event
  1997. * reload only if value has changed
  1998. * we have stopped the event so tell that
  1999. * to perf_adjust_period() to avoid stopping it
  2000. * twice.
  2001. */
  2002. if (delta > 0)
  2003. perf_adjust_period(event, period, delta, false);
  2004. event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0);
  2005. }
  2006. perf_pmu_enable(ctx->pmu);
  2007. raw_spin_unlock(&ctx->lock);
  2008. }
  2009. /*
  2010. * Round-robin a context's events:
  2011. */
  2012. static void rotate_ctx(struct perf_event_context *ctx)
  2013. {
  2014. /*
  2015. * Rotate the first entry last of non-pinned groups. Rotation might be
  2016. * disabled by the inheritance code.
  2017. */
  2018. if (!ctx->rotate_disable)
  2019. list_rotate_left(&ctx->flexible_groups);
  2020. }
  2021. /*
  2022. * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized
  2023. * because they're strictly cpu affine and rotate_start is called with IRQs
  2024. * disabled, while rotate_context is called from IRQ context.
  2025. */
  2026. static void perf_rotate_context(struct perf_cpu_context *cpuctx)
  2027. {
  2028. struct perf_event_context *ctx = NULL;
  2029. int rotate = 0, remove = 1;
  2030. if (cpuctx->ctx.nr_events) {
  2031. remove = 0;
  2032. if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
  2033. rotate = 1;
  2034. }
  2035. ctx = cpuctx->task_ctx;
  2036. if (ctx && ctx->nr_events) {
  2037. remove = 0;
  2038. if (ctx->nr_events != ctx->nr_active)
  2039. rotate = 1;
  2040. }
  2041. if (!rotate)
  2042. goto done;
  2043. perf_ctx_lock(cpuctx, cpuctx->task_ctx);
  2044. perf_pmu_disable(cpuctx->ctx.pmu);
  2045. cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
  2046. if (ctx)
  2047. ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE);
  2048. rotate_ctx(&cpuctx->ctx);
  2049. if (ctx)
  2050. rotate_ctx(ctx);
  2051. perf_event_sched_in(cpuctx, ctx, current);
  2052. perf_pmu_enable(cpuctx->ctx.pmu);
  2053. perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
  2054. done:
  2055. if (remove)
  2056. list_del_init(&cpuctx->rotation_list);
  2057. }
  2058. void perf_event_task_tick(void)
  2059. {
  2060. struct list_head *head = &__get_cpu_var(rotation_list);
  2061. struct perf_cpu_context *cpuctx, *tmp;
  2062. struct perf_event_context *ctx;
  2063. int throttled;
  2064. WARN_ON(!irqs_disabled());
  2065. __this_cpu_inc(perf_throttled_seq);
  2066. throttled = __this_cpu_xchg(perf_throttled_count, 0);
  2067. list_for_each_entry_safe(cpuctx, tmp, head, rotation_list) {
  2068. ctx = &cpuctx->ctx;
  2069. perf_adjust_freq_unthr_context(ctx, throttled);
  2070. ctx = cpuctx->task_ctx;
  2071. if (ctx)
  2072. perf_adjust_freq_unthr_context(ctx, throttled);
  2073. if (cpuctx->jiffies_interval == 1 ||
  2074. !(jiffies % cpuctx->jiffies_interval))
  2075. perf_rotate_context(cpuctx);
  2076. }
  2077. }
  2078. static int event_enable_on_exec(struct perf_event *event,
  2079. struct perf_event_context *ctx)
  2080. {
  2081. if (!event->attr.enable_on_exec)
  2082. return 0;
  2083. event->attr.enable_on_exec = 0;
  2084. if (event->state >= PERF_EVENT_STATE_INACTIVE)
  2085. return 0;
  2086. __perf_event_mark_enabled(event);
  2087. return 1;
  2088. }
  2089. /*
  2090. * Enable all of a task's events that have been marked enable-on-exec.
  2091. * This expects task == current.
  2092. */
  2093. static void perf_event_enable_on_exec(struct perf_event_context *ctx)
  2094. {
  2095. struct perf_event *event;
  2096. unsigned long flags;
  2097. int enabled = 0;
  2098. int ret;
  2099. local_irq_save(flags);
  2100. if (!ctx || !ctx->nr_events)
  2101. goto out;
  2102. /*
  2103. * We must ctxsw out cgroup events to avoid conflict
  2104. * when invoking perf_task_event_sched_in() later on
  2105. * in this function. Otherwise we end up trying to
  2106. * ctxswin cgroup events which are already scheduled
  2107. * in.
  2108. */
  2109. perf_cgroup_sched_out(current, NULL);
  2110. raw_spin_lock(&ctx->lock);
  2111. task_ctx_sched_out(ctx);
  2112. list_for_each_entry(event, &ctx->event_list, event_entry) {
  2113. ret = event_enable_on_exec(event, ctx);
  2114. if (ret)
  2115. enabled = 1;
  2116. }
  2117. /*
  2118. * Unclone this context if we enabled any event.
  2119. */
  2120. if (enabled)
  2121. unclone_ctx(ctx);
  2122. raw_spin_unlock(&ctx->lock);
  2123. /*
  2124. * Also calls ctxswin for cgroup events, if any:
  2125. */
  2126. perf_event_context_sched_in(ctx, ctx->task);
  2127. out:
  2128. local_irq_restore(flags);
  2129. }
  2130. /*
  2131. * Cross CPU call to read the hardware event
  2132. */
  2133. static void __perf_event_read(void *info)
  2134. {
  2135. struct perf_event *event = info;
  2136. struct perf_event_context *ctx = event->ctx;
  2137. struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
  2138. /*
  2139. * If this is a task context, we need to check whether it is
  2140. * the current task context of this cpu. If not it has been
  2141. * scheduled out before the smp call arrived. In that case
  2142. * event->count would have been updated to a recent sample
  2143. * when the event was scheduled out.
  2144. */
  2145. if (ctx->task && cpuctx->task_ctx != ctx)
  2146. return;
  2147. raw_spin_lock(&ctx->lock);
  2148. if (ctx->is_active) {
  2149. update_context_time(ctx);
  2150. update_cgrp_time_from_event(event);
  2151. }
  2152. update_event_times(event);
  2153. if (event->state == PERF_EVENT_STATE_ACTIVE)
  2154. event->pmu->read(event);
  2155. raw_spin_unlock(&ctx->lock);
  2156. }
  2157. static inline u64 perf_event_count(struct perf_event *event)
  2158. {
  2159. return local64_read(&event->count) + atomic64_read(&event->child_count);
  2160. }
  2161. static u64 perf_event_read(struct perf_event *event)
  2162. {
  2163. /*
  2164. * If event is enabled and currently active on a CPU, update the
  2165. * value in the event structure:
  2166. */
  2167. if (event->state == PERF_EVENT_STATE_ACTIVE) {
  2168. smp_call_function_single(event->oncpu,
  2169. __perf_event_read, event, 1);
  2170. } else if (event->state == PERF_EVENT_STATE_INACTIVE) {
  2171. struct perf_event_context *ctx = event->ctx;
  2172. unsigned long flags;
  2173. raw_spin_lock_irqsave(&ctx->lock, flags);
  2174. /*
  2175. * may read while context is not active
  2176. * (e.g., thread is blocked), in that case
  2177. * we cannot update context time
  2178. */
  2179. if (ctx->is_active) {
  2180. update_context_time(ctx);
  2181. update_cgrp_time_from_event(event);
  2182. }
  2183. update_event_times(event);
  2184. raw_spin_unlock_irqrestore(&ctx->lock, flags);
  2185. }
  2186. return perf_event_count(event);
  2187. }
  2188. /*
  2189. * Initialize the perf_event context in a task_struct:
  2190. */
  2191. static void __perf_event_init_context(struct perf_event_context *ctx)
  2192. {
  2193. raw_spin_lock_init(&ctx->lock);
  2194. mutex_init(&ctx->mutex);
  2195. INIT_LIST_HEAD(&ctx->pinned_groups);
  2196. INIT_LIST_HEAD(&ctx->flexible_groups);
  2197. INIT_LIST_HEAD(&ctx->event_list);
  2198. atomic_set(&ctx->refcount, 1);
  2199. }
  2200. static struct perf_event_context *
  2201. alloc_perf_context(struct pmu *pmu, struct task_struct *task)
  2202. {
  2203. struct perf_event_context *ctx;
  2204. ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL);
  2205. if (!ctx)
  2206. return NULL;
  2207. __perf_event_init_context(ctx);
  2208. if (task) {
  2209. ctx->task = task;
  2210. get_task_struct(task);
  2211. }
  2212. ctx->pmu = pmu;
  2213. return ctx;
  2214. }
  2215. static struct task_struct *
  2216. find_lively_task_by_vpid(pid_t vpid)
  2217. {
  2218. struct task_struct *task;
  2219. int err;
  2220. rcu_read_lock();
  2221. if (!vpid)
  2222. task = current;
  2223. else
  2224. task = find_task_by_vpid(vpid);
  2225. if (task)
  2226. get_task_struct(task);
  2227. rcu_read_unlock();
  2228. if (!task)
  2229. return ERR_PTR(-ESRCH);
  2230. /* Reuse ptrace permission checks for now. */
  2231. err = -EACCES;
  2232. if (!ptrace_may_access(task, PTRACE_MODE_READ))
  2233. goto errout;
  2234. return task;
  2235. errout:
  2236. put_task_struct(task);
  2237. return ERR_PTR(err);
  2238. }
  2239. /*
  2240. * Returns a matching context with refcount and pincount.
  2241. */
  2242. static struct perf_event_context *
  2243. find_get_context(struct pmu *pmu, struct task_struct *task, int cpu)
  2244. {
  2245. struct perf_event_context *ctx;
  2246. struct perf_cpu_context *cpuctx;
  2247. unsigned long flags;
  2248. int ctxn, err;
  2249. if (!task) {
  2250. /* Must be root to operate on a CPU event: */
  2251. if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
  2252. return ERR_PTR(-EACCES);
  2253. /*
  2254. * We could be clever and allow to attach a event to an
  2255. * offline CPU and activate it when the CPU comes up, but
  2256. * that's for later.
  2257. */
  2258. if (!cpu_online(cpu))
  2259. return ERR_PTR(-ENODEV);
  2260. cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
  2261. ctx = &cpuctx->ctx;
  2262. get_ctx(ctx);
  2263. ++ctx->pin_count;
  2264. return ctx;
  2265. }
  2266. err = -EINVAL;
  2267. ctxn = pmu->task_ctx_nr;
  2268. if (ctxn < 0)
  2269. goto errout;
  2270. retry:
  2271. ctx = perf_lock_task_context(task, ctxn, &flags);
  2272. if (ctx) {
  2273. unclone_ctx(ctx);
  2274. ++ctx->pin_count;
  2275. raw_spin_unlock_irqrestore(&ctx->lock, flags);
  2276. } else {
  2277. ctx = alloc_perf_context(pmu, task);
  2278. err = -ENOMEM;
  2279. if (!ctx)
  2280. goto errout;
  2281. err = 0;
  2282. mutex_lock(&task->perf_event_mutex);
  2283. /*
  2284. * If it has already passed perf_event_exit_task().
  2285. * we must see PF_EXITING, it takes this mutex too.
  2286. */
  2287. if (task->flags & PF_EXITING)
  2288. err = -ESRCH;
  2289. else if (task->perf_event_ctxp[ctxn])
  2290. err = -EAGAIN;
  2291. else {
  2292. get_ctx(ctx);
  2293. ++ctx->pin_count;
  2294. rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx);
  2295. }
  2296. mutex_unlock(&task->perf_event_mutex);
  2297. if (unlikely(err)) {
  2298. put_ctx(ctx);
  2299. if (err == -EAGAIN)
  2300. goto retry;
  2301. goto errout;
  2302. }
  2303. }
  2304. return ctx;
  2305. errout:
  2306. return ERR_PTR(err);
  2307. }
  2308. static void perf_event_free_filter(struct perf_event *event);
  2309. static void free_event_rcu(struct rcu_head *head)
  2310. {
  2311. struct perf_event *event;
  2312. event = container_of(head, struct perf_event, rcu_head);
  2313. if (event->ns)
  2314. put_pid_ns(event->ns);
  2315. perf_event_free_filter(event);
  2316. kfree(event);
  2317. }
  2318. static void ring_buffer_put(struct ring_buffer *rb);
  2319. static void free_event(struct perf_event *event)
  2320. {
  2321. irq_work_sync(&event->pending);
  2322. if (!event->parent) {
  2323. if (event->attach_state & PERF_ATTACH_TASK)
  2324. jump_label_dec_deferred(&perf_sched_events);
  2325. if (event->attr.mmap || event->attr.mmap_data)
  2326. atomic_dec(&nr_mmap_events);
  2327. if (event->attr.comm)
  2328. atomic_dec(&nr_comm_events);
  2329. if (event->attr.task)
  2330. atomic_dec(&nr_task_events);
  2331. if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
  2332. put_callchain_buffers();
  2333. if (is_cgroup_event(event)) {
  2334. atomic_dec(&per_cpu(perf_cgroup_events, event->cpu));
  2335. jump_label_dec_deferred(&perf_sched_events);
  2336. }
  2337. }
  2338. if (event->rb) {
  2339. ring_buffer_put(event->rb);
  2340. event->rb = NULL;
  2341. }
  2342. if (is_cgroup_event(event))
  2343. perf_detach_cgroup(event);
  2344. if (event->destroy)
  2345. event->destroy(event);
  2346. if (event->ctx)
  2347. put_ctx(event->ctx);
  2348. call_rcu(&event->rcu_head, free_event_rcu);
  2349. }
  2350. int perf_event_release_kernel(struct perf_event *event)
  2351. {
  2352. struct perf_event_context *ctx = event->ctx;
  2353. WARN_ON_ONCE(ctx->parent_ctx);
  2354. /*
  2355. * There are two ways this annotation is useful:
  2356. *
  2357. * 1) there is a lock recursion from perf_event_exit_task
  2358. * see the comment there.
  2359. *
  2360. * 2) there is a lock-inversion with mmap_sem through
  2361. * perf_event_read_group(), which takes faults while
  2362. * holding ctx->mutex, however this is called after
  2363. * the last filedesc died, so there is no possibility
  2364. * to trigger the AB-BA case.
  2365. */
  2366. mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING);
  2367. raw_spin_lock_irq(&ctx->lock);
  2368. perf_group_detach(event);
  2369. raw_spin_unlock_irq(&ctx->lock);
  2370. perf_remove_from_context(event);
  2371. mutex_unlock(&ctx->mutex);
  2372. free_event(event);
  2373. return 0;
  2374. }
  2375. EXPORT_SYMBOL_GPL(perf_event_release_kernel);
  2376. /*
  2377. * Called when the last reference to the file is gone.
  2378. */
  2379. static int perf_release(struct inode *inode, struct file *file)
  2380. {
  2381. struct perf_event *event = file->private_data;
  2382. struct task_struct *owner;
  2383. file->private_data = NULL;
  2384. rcu_read_lock();
  2385. owner = ACCESS_ONCE(event->owner);
  2386. /*
  2387. * Matches the smp_wmb() in perf_event_exit_task(). If we observe
  2388. * !owner it means the list deletion is complete and we can indeed
  2389. * free this event, otherwise we need to serialize on
  2390. * owner->perf_event_mutex.
  2391. */
  2392. smp_read_barrier_depends();
  2393. if (owner) {
  2394. /*
  2395. * Since delayed_put_task_struct() also drops the last
  2396. * task reference we can safely take a new reference
  2397. * while holding the rcu_read_lock().
  2398. */
  2399. get_task_struct(owner);
  2400. }
  2401. rcu_read_unlock();
  2402. if (owner) {
  2403. mutex_lock(&owner->perf_event_mutex);
  2404. /*
  2405. * We have to re-check the event->owner field, if it is cleared
  2406. * we raced with perf_event_exit_task(), acquiring the mutex
  2407. * ensured they're done, and we can proceed with freeing the
  2408. * event.
  2409. */
  2410. if (event->owner)
  2411. list_del_init(&event->owner_entry);
  2412. mutex_unlock(&owner->perf_event_mutex);
  2413. put_task_struct(owner);
  2414. }
  2415. return perf_event_release_kernel(event);
  2416. }
  2417. u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
  2418. {
  2419. struct perf_event *child;
  2420. u64 total = 0;
  2421. *enabled = 0;
  2422. *running = 0;
  2423. mutex_lock(&event->child_mutex);
  2424. total += perf_event_read(event);
  2425. *enabled += event->total_time_enabled +
  2426. atomic64_read(&event->child_total_time_enabled);
  2427. *running += event->total_time_running +
  2428. atomic64_read(&event->child_total_time_running);
  2429. list_for_each_entry(child, &event->child_list, child_list) {
  2430. total += perf_event_read(child);
  2431. *enabled += child->total_time_enabled;
  2432. *running += child->total_time_running;
  2433. }
  2434. mutex_unlock(&event->child_mutex);
  2435. return total;
  2436. }
  2437. EXPORT_SYMBOL_GPL(perf_event_read_value);
  2438. static int perf_event_read_group(struct perf_event *event,
  2439. u64 read_format, char __user *buf)
  2440. {
  2441. struct perf_event *leader = event->group_leader, *sub;
  2442. int n = 0, size = 0, ret = -EFAULT;
  2443. struct perf_event_context *ctx = leader->ctx;
  2444. u64 values[5];
  2445. u64 count, enabled, running;
  2446. mutex_lock(&ctx->mutex);
  2447. count = perf_event_read_value(leader, &enabled, &running);
  2448. values[n++] = 1 + leader->nr_siblings;
  2449. if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
  2450. values[n++] = enabled;
  2451. if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
  2452. values[n++] = running;
  2453. values[n++] = count;
  2454. if (read_format & PERF_FORMAT_ID)
  2455. values[n++] = primary_event_id(leader);
  2456. size = n * sizeof(u64);
  2457. if (copy_to_user(buf, values, size))
  2458. goto unlock;
  2459. ret = size;
  2460. list_for_each_entry(sub, &leader->sibling_list, group_entry) {
  2461. n = 0;
  2462. values[n++] = perf_event_read_value(sub, &enabled, &running);
  2463. if (read_format & PERF_FORMAT_ID)
  2464. values[n++] = primary_event_id(sub);
  2465. size = n * sizeof(u64);
  2466. if (copy_to_user(buf + ret, values, size)) {
  2467. ret = -EFAULT;
  2468. goto unlock;
  2469. }
  2470. ret += size;
  2471. }
  2472. unlock:
  2473. mutex_unlock(&ctx->mutex);
  2474. return ret;
  2475. }
  2476. static int perf_event_read_one(struct perf_event *event,
  2477. u64 read_format, char __user *buf)
  2478. {
  2479. u64 enabled, running;
  2480. u64 values[4];
  2481. int n = 0;
  2482. values[n++] = perf_event_read_value(event, &enabled, &running);
  2483. if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
  2484. values[n++] = enabled;
  2485. if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
  2486. values[n++] = running;
  2487. if (read_format & PERF_FORMAT_ID)
  2488. values[n++] = primary_event_id(event);
  2489. if (copy_to_user(buf, values, n * sizeof(u64)))
  2490. return -EFAULT;
  2491. return n * sizeof(u64);
  2492. }
  2493. /*
  2494. * Read the performance event - simple non blocking version for now
  2495. */
  2496. static ssize_t
  2497. perf_read_hw(struct perf_event *event, char __user *buf, size_t count)
  2498. {
  2499. u64 read_format = event->attr.read_format;
  2500. int ret;
  2501. /*
  2502. * Return end-of-file for a read on a event that is in
  2503. * error state (i.e. because it was pinned but it couldn't be
  2504. * scheduled on to the CPU at some point).
  2505. */
  2506. if (event->state == PERF_EVENT_STATE_ERROR)
  2507. return 0;
  2508. if (count < event->read_size)
  2509. return -ENOSPC;
  2510. WARN_ON_ONCE(event->ctx->parent_ctx);
  2511. if (read_format & PERF_FORMAT_GROUP)
  2512. ret = perf_event_read_group(event, read_format, buf);
  2513. else
  2514. ret = perf_event_read_one(event, read_format, buf);
  2515. return ret;
  2516. }
  2517. static ssize_t
  2518. perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
  2519. {
  2520. struct perf_event *event = file->private_data;
  2521. return perf_read_hw(event, buf, count);
  2522. }
  2523. static unsigned int perf_poll(struct file *file, poll_table *wait)
  2524. {
  2525. struct perf_event *event = file->private_data;
  2526. struct ring_buffer *rb;
  2527. unsigned int events = POLL_HUP;
  2528. /*
  2529. * Race between perf_event_set_output() and perf_poll(): perf_poll()
  2530. * grabs the rb reference but perf_event_set_output() overrides it.
  2531. * Here is the timeline for two threads T1, T2:
  2532. * t0: T1, rb = rcu_dereference(event->rb)
  2533. * t1: T2, old_rb = event->rb
  2534. * t2: T2, event->rb = new rb
  2535. * t3: T2, ring_buffer_detach(old_rb)
  2536. * t4: T1, ring_buffer_attach(rb1)
  2537. * t5: T1, poll_wait(event->waitq)
  2538. *
  2539. * To avoid this problem, we grab mmap_mutex in perf_poll()
  2540. * thereby ensuring that the assignment of the new ring buffer
  2541. * and the detachment of the old buffer appear atomic to perf_poll()
  2542. */
  2543. mutex_lock(&event->mmap_mutex);
  2544. rcu_read_lock();
  2545. rb = rcu_dereference(event->rb);
  2546. if (rb) {
  2547. ring_buffer_attach(event, rb);
  2548. events = atomic_xchg(&rb->poll, 0);
  2549. }
  2550. rcu_read_unlock();
  2551. mutex_unlock(&event->mmap_mutex);
  2552. poll_wait(file, &event->waitq, wait);
  2553. return events;
  2554. }
  2555. static void perf_event_reset(struct perf_event *event)
  2556. {
  2557. (void)perf_event_read(event);
  2558. local64_set(&event->count, 0);
  2559. perf_event_update_userpage(event);
  2560. }
  2561. /*
  2562. * Holding the top-level event's child_mutex means that any
  2563. * descendant process that has inherited this event will block
  2564. * in sync_child_event if it goes to exit, thus satisfying the
  2565. * task existence requirements of perf_event_enable/disable.
  2566. */
  2567. static void perf_event_for_each_child(struct perf_event *event,
  2568. void (*func)(struct perf_event *))
  2569. {
  2570. struct perf_event *child;
  2571. WARN_ON_ONCE(event->ctx->parent_ctx);
  2572. mutex_lock(&event->child_mutex);
  2573. func(event);
  2574. list_for_each_entry(child, &event->child_list, child_list)
  2575. func(child);
  2576. mutex_unlock(&event->child_mutex);
  2577. }
  2578. static void perf_event_for_each(struct perf_event *event,
  2579. void (*func)(struct perf_event *))
  2580. {
  2581. struct perf_event_context *ctx = event->ctx;
  2582. struct perf_event *sibling;
  2583. WARN_ON_ONCE(ctx->parent_ctx);
  2584. mutex_lock(&ctx->mutex);
  2585. event = event->group_leader;
  2586. perf_event_for_each_child(event, func);
  2587. func(event);
  2588. list_for_each_entry(sibling, &event->sibling_list, group_entry)
  2589. perf_event_for_each_child(event, func);
  2590. mutex_unlock(&ctx->mutex);
  2591. }
  2592. static int perf_event_period(struct perf_event *event, u64 __user *arg)
  2593. {
  2594. struct perf_event_context *ctx = event->ctx;
  2595. int ret = 0;
  2596. u64 value;
  2597. if (!is_sampling_event(event))
  2598. return -EINVAL;
  2599. if (copy_from_user(&value, arg, sizeof(value)))
  2600. return -EFAULT;
  2601. if (!value)
  2602. return -EINVAL;
  2603. raw_spin_lock_irq(&ctx->lock);
  2604. if (event->attr.freq) {
  2605. if (value > sysctl_perf_event_sample_rate) {
  2606. ret = -EINVAL;
  2607. goto unlock;
  2608. }
  2609. event->attr.sample_freq = value;
  2610. } else {
  2611. event->attr.sample_period = value;
  2612. event->hw.sample_period = value;
  2613. }
  2614. unlock:
  2615. raw_spin_unlock_irq(&ctx->lock);
  2616. return ret;
  2617. }
  2618. static const struct file_operations perf_fops;
  2619. static struct perf_event *perf_fget_light(int fd, int *fput_needed)
  2620. {
  2621. struct file *file;
  2622. file = fget_light(fd, fput_needed);
  2623. if (!file)
  2624. return ERR_PTR(-EBADF);
  2625. if (file->f_op != &perf_fops) {
  2626. fput_light(file, *fput_needed);
  2627. *fput_needed = 0;
  2628. return ERR_PTR(-EBADF);
  2629. }
  2630. return file->private_data;
  2631. }
  2632. static int perf_event_set_output(struct perf_event *event,
  2633. struct perf_event *output_event);
  2634. static int perf_event_set_filter(struct perf_event *event, void __user *arg);
  2635. static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
  2636. {
  2637. struct perf_event *event = file->private_data;
  2638. void (*func)(struct perf_event *);
  2639. u32 flags = arg;
  2640. switch (cmd) {
  2641. case PERF_EVENT_IOC_ENABLE:
  2642. func = perf_event_enable;
  2643. break;
  2644. case PERF_EVENT_IOC_DISABLE:
  2645. func = perf_event_disable;
  2646. break;
  2647. case PERF_EVENT_IOC_RESET:
  2648. func = perf_event_reset;
  2649. break;
  2650. case PERF_EVENT_IOC_REFRESH:
  2651. return perf_event_refresh(event, arg);
  2652. case PERF_EVENT_IOC_PERIOD:
  2653. return perf_event_period(event, (u64 __user *)arg);
  2654. case PERF_EVENT_IOC_SET_OUTPUT:
  2655. {
  2656. struct perf_event *output_event = NULL;
  2657. int fput_needed = 0;
  2658. int ret;
  2659. if (arg != -1) {
  2660. output_event = perf_fget_light(arg, &fput_needed);
  2661. if (IS_ERR(output_event))
  2662. return PTR_ERR(output_event);
  2663. }
  2664. ret = perf_event_set_output(event, output_event);
  2665. if (output_event)
  2666. fput_light(output_event->filp, fput_needed);
  2667. return ret;
  2668. }
  2669. case PERF_EVENT_IOC_SET_FILTER:
  2670. return perf_event_set_filter(event, (void __user *)arg);
  2671. default:
  2672. return -ENOTTY;
  2673. }
  2674. if (flags & PERF_IOC_FLAG_GROUP)
  2675. perf_event_for_each(event, func);
  2676. else
  2677. perf_event_for_each_child(event, func);
  2678. return 0;
  2679. }
  2680. int perf_event_task_enable(void)
  2681. {
  2682. struct perf_event *event;
  2683. mutex_lock(&current->perf_event_mutex);
  2684. list_for_each_entry(event, &current->perf_event_list, owner_entry)
  2685. perf_event_for_each_child(event, perf_event_enable);
  2686. mutex_unlock(&current->perf_event_mutex);
  2687. return 0;
  2688. }
  2689. int perf_event_task_disable(void)
  2690. {
  2691. struct perf_event *event;
  2692. mutex_lock(&current->perf_event_mutex);
  2693. list_for_each_entry(event, &current->perf_event_list, owner_entry)
  2694. perf_event_for_each_child(event, perf_event_disable);
  2695. mutex_unlock(&current->perf_event_mutex);
  2696. return 0;
  2697. }
  2698. #ifndef PERF_EVENT_INDEX_OFFSET
  2699. # define PERF_EVENT_INDEX_OFFSET 0
  2700. #endif
  2701. static int perf_event_index(struct perf_event *event)
  2702. {
  2703. if (event->hw.state & PERF_HES_STOPPED)
  2704. return 0;
  2705. if (event->state != PERF_EVENT_STATE_ACTIVE)
  2706. return 0;
  2707. return event->hw.idx + 1 - PERF_EVENT_INDEX_OFFSET;
  2708. }
  2709. static void calc_timer_values(struct perf_event *event,
  2710. u64 *enabled,
  2711. u64 *running)
  2712. {
  2713. u64 now, ctx_time;
  2714. now = perf_clock();
  2715. ctx_time = event->shadow_ctx_time + now;
  2716. *enabled = ctx_time - event->tstamp_enabled;
  2717. *running = ctx_time - event->tstamp_running;
  2718. }
  2719. /*
  2720. * Callers need to ensure there can be no nesting of this function, otherwise
  2721. * the seqlock logic goes bad. We can not serialize this because the arch
  2722. * code calls this from NMI context.
  2723. */
  2724. void perf_event_update_userpage(struct perf_event *event)
  2725. {
  2726. struct perf_event_mmap_page *userpg;
  2727. struct ring_buffer *rb;
  2728. u64 enabled, running;
  2729. rcu_read_lock();
  2730. /*
  2731. * compute total_time_enabled, total_time_running
  2732. * based on snapshot values taken when the event
  2733. * was last scheduled in.
  2734. *
  2735. * we cannot simply called update_context_time()
  2736. * because of locking issue as we can be called in
  2737. * NMI context
  2738. */
  2739. calc_timer_values(event, &enabled, &running);
  2740. rb = rcu_dereference(event->rb);
  2741. if (!rb)
  2742. goto unlock;
  2743. userpg = rb->user_page;
  2744. /*
  2745. * Disable preemption so as to not let the corresponding user-space
  2746. * spin too long if we get preempted.
  2747. */
  2748. preempt_disable();
  2749. ++userpg->lock;
  2750. barrier();
  2751. userpg->index = perf_event_index(event);
  2752. userpg->offset = perf_event_count(event);
  2753. if (event->state == PERF_EVENT_STATE_ACTIVE)
  2754. userpg->offset -= local64_read(&event->hw.prev_count);
  2755. userpg->time_enabled = enabled +
  2756. atomic64_read(&event->child_total_time_enabled);
  2757. userpg->time_running = running +
  2758. atomic64_read(&event->child_total_time_running);
  2759. barrier();
  2760. ++userpg->lock;
  2761. preempt_enable();
  2762. unlock:
  2763. rcu_read_unlock();
  2764. }
  2765. static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  2766. {
  2767. struct perf_event *event = vma->vm_file->private_data;
  2768. struct ring_buffer *rb;
  2769. int ret = VM_FAULT_SIGBUS;
  2770. if (vmf->flags & FAULT_FLAG_MKWRITE) {
  2771. if (vmf->pgoff == 0)
  2772. ret = 0;
  2773. return ret;
  2774. }
  2775. rcu_read_lock();
  2776. rb = rcu_dereference(event->rb);
  2777. if (!rb)
  2778. goto unlock;
  2779. if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE))
  2780. goto unlock;
  2781. vmf->page = perf_mmap_to_page(rb, vmf->pgoff);
  2782. if (!vmf->page)
  2783. goto unlock;
  2784. get_page(vmf->page);
  2785. vmf->page->mapping = vma->vm_file->f_mapping;
  2786. vmf->page->index = vmf->pgoff;
  2787. ret = 0;
  2788. unlock:
  2789. rcu_read_unlock();
  2790. return ret;
  2791. }
  2792. static void ring_buffer_attach(struct perf_event *event,
  2793. struct ring_buffer *rb)
  2794. {
  2795. unsigned long flags;
  2796. if (!list_empty(&event->rb_entry))
  2797. return;
  2798. spin_lock_irqsave(&rb->event_lock, flags);
  2799. if (!list_empty(&event->rb_entry))
  2800. goto unlock;
  2801. list_add(&event->rb_entry, &rb->event_list);
  2802. unlock:
  2803. spin_unlock_irqrestore(&rb->event_lock, flags);
  2804. }
  2805. static void ring_buffer_detach(struct perf_event *event,
  2806. struct ring_buffer *rb)
  2807. {
  2808. unsigned long flags;
  2809. if (list_empty(&event->rb_entry))
  2810. return;
  2811. spin_lock_irqsave(&rb->event_lock, flags);
  2812. list_del_init(&event->rb_entry);
  2813. wake_up_all(&event->waitq);
  2814. spin_unlock_irqrestore(&rb->event_lock, flags);
  2815. }
  2816. static void ring_buffer_wakeup(struct perf_event *event)
  2817. {
  2818. struct ring_buffer *rb;
  2819. rcu_read_lock();
  2820. rb = rcu_dereference(event->rb);
  2821. if (!rb)
  2822. goto unlock;
  2823. list_for_each_entry_rcu(event, &rb->event_list, rb_entry)
  2824. wake_up_all(&event->waitq);
  2825. unlock:
  2826. rcu_read_unlock();
  2827. }
  2828. static void rb_free_rcu(struct rcu_head *rcu_head)
  2829. {
  2830. struct ring_buffer *rb;
  2831. rb = container_of(rcu_head, struct ring_buffer, rcu_head);
  2832. rb_free(rb);
  2833. }
  2834. static struct ring_buffer *ring_buffer_get(struct perf_event *event)
  2835. {
  2836. struct ring_buffer *rb;
  2837. rcu_read_lock();
  2838. rb = rcu_dereference(event->rb);
  2839. if (rb) {
  2840. if (!atomic_inc_not_zero(&rb->refcount))
  2841. rb = NULL;
  2842. }
  2843. rcu_read_unlock();
  2844. return rb;
  2845. }
  2846. static void ring_buffer_put(struct ring_buffer *rb)
  2847. {
  2848. struct perf_event *event, *n;
  2849. unsigned long flags;
  2850. if (!atomic_dec_and_test(&rb->refcount))
  2851. return;
  2852. spin_lock_irqsave(&rb->event_lock, flags);
  2853. list_for_each_entry_safe(event, n, &rb->event_list, rb_entry) {
  2854. list_del_init(&event->rb_entry);
  2855. wake_up_all(&event->waitq);
  2856. }
  2857. spin_unlock_irqrestore(&rb->event_lock, flags);
  2858. call_rcu(&rb->rcu_head, rb_free_rcu);
  2859. }
  2860. static void perf_mmap_open(struct vm_area_struct *vma)
  2861. {
  2862. struct perf_event *event = vma->vm_file->private_data;
  2863. atomic_inc(&event->mmap_count);
  2864. }
  2865. static void perf_mmap_close(struct vm_area_struct *vma)
  2866. {
  2867. struct perf_event *event = vma->vm_file->private_data;
  2868. if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) {
  2869. unsigned long size = perf_data_size(event->rb);
  2870. struct user_struct *user = event->mmap_user;
  2871. struct ring_buffer *rb = event->rb;
  2872. atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm);
  2873. vma->vm_mm->pinned_vm -= event->mmap_locked;
  2874. rcu_assign_pointer(event->rb, NULL);
  2875. ring_buffer_detach(event, rb);
  2876. mutex_unlock(&event->mmap_mutex);
  2877. ring_buffer_put(rb);
  2878. free_uid(user);
  2879. }
  2880. }
  2881. static const struct vm_operations_struct perf_mmap_vmops = {
  2882. .open = perf_mmap_open,
  2883. .close = perf_mmap_close,
  2884. .fault = perf_mmap_fault,
  2885. .page_mkwrite = perf_mmap_fault,
  2886. };
  2887. static int perf_mmap(struct file *file, struct vm_area_struct *vma)
  2888. {
  2889. struct perf_event *event = file->private_data;
  2890. unsigned long user_locked, user_lock_limit;
  2891. struct user_struct *user = current_user();
  2892. unsigned long locked, lock_limit;
  2893. struct ring_buffer *rb;
  2894. unsigned long vma_size;
  2895. unsigned long nr_pages;
  2896. long user_extra, extra;
  2897. int ret = 0, flags = 0;
  2898. /*
  2899. * Don't allow mmap() of inherited per-task counters. This would
  2900. * create a performance issue due to all children writing to the
  2901. * same rb.
  2902. */
  2903. if (event->cpu == -1 && event->attr.inherit)
  2904. return -EINVAL;
  2905. if (!(vma->vm_flags & VM_SHARED))
  2906. return -EINVAL;
  2907. vma_size = vma->vm_end - vma->vm_start;
  2908. nr_pages = (vma_size / PAGE_SIZE) - 1;
  2909. /*
  2910. * If we have rb pages ensure they're a power-of-two number, so we
  2911. * can do bitmasks instead of modulo.
  2912. */
  2913. if (nr_pages != 0 && !is_power_of_2(nr_pages))
  2914. return -EINVAL;
  2915. if (vma_size != PAGE_SIZE * (1 + nr_pages))
  2916. return -EINVAL;
  2917. if (vma->vm_pgoff != 0)
  2918. return -EINVAL;
  2919. WARN_ON_ONCE(event->ctx->parent_ctx);
  2920. mutex_lock(&event->mmap_mutex);
  2921. if (event->rb) {
  2922. if (event->rb->nr_pages == nr_pages)
  2923. atomic_inc(&event->rb->refcount);
  2924. else
  2925. ret = -EINVAL;
  2926. goto unlock;
  2927. }
  2928. user_extra = nr_pages + 1;
  2929. user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10);
  2930. /*
  2931. * Increase the limit linearly with more CPUs:
  2932. */
  2933. user_lock_limit *= num_online_cpus();
  2934. user_locked = atomic_long_read(&user->locked_vm) + user_extra;
  2935. extra = 0;
  2936. if (user_locked > user_lock_limit)
  2937. extra = user_locked - user_lock_limit;
  2938. lock_limit = rlimit(RLIMIT_MEMLOCK);
  2939. lock_limit >>= PAGE_SHIFT;
  2940. locked = vma->vm_mm->pinned_vm + extra;
  2941. if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() &&
  2942. !capable(CAP_IPC_LOCK)) {
  2943. ret = -EPERM;
  2944. goto unlock;
  2945. }
  2946. WARN_ON(event->rb);
  2947. if (vma->vm_flags & VM_WRITE)
  2948. flags |= RING_BUFFER_WRITABLE;
  2949. rb = rb_alloc(nr_pages,
  2950. event->attr.watermark ? event->attr.wakeup_watermark : 0,
  2951. event->cpu, flags);
  2952. if (!rb) {
  2953. ret = -ENOMEM;
  2954. goto unlock;
  2955. }
  2956. rcu_assign_pointer(event->rb, rb);
  2957. atomic_long_add(user_extra, &user->locked_vm);
  2958. event->mmap_locked = extra;
  2959. event->mmap_user = get_current_user();
  2960. vma->vm_mm->pinned_vm += event->mmap_locked;
  2961. unlock:
  2962. if (!ret)
  2963. atomic_inc(&event->mmap_count);
  2964. mutex_unlock(&event->mmap_mutex);
  2965. vma->vm_flags |= VM_RESERVED;
  2966. vma->vm_ops = &perf_mmap_vmops;
  2967. return ret;
  2968. }
  2969. static int perf_fasync(int fd, struct file *filp, int on)
  2970. {
  2971. struct inode *inode = filp->f_path.dentry->d_inode;
  2972. struct perf_event *event = filp->private_data;
  2973. int retval;
  2974. mutex_lock(&inode->i_mutex);
  2975. retval = fasync_helper(fd, filp, on, &event->fasync);
  2976. mutex_unlock(&inode->i_mutex);
  2977. if (retval < 0)
  2978. return retval;
  2979. return 0;
  2980. }
  2981. static const struct file_operations perf_fops = {
  2982. .llseek = no_llseek,
  2983. .release = perf_release,
  2984. .read = perf_read,
  2985. .poll = perf_poll,
  2986. .unlocked_ioctl = perf_ioctl,
  2987. .compat_ioctl = perf_ioctl,
  2988. .mmap = perf_mmap,
  2989. .fasync = perf_fasync,
  2990. };
  2991. /*
  2992. * Perf event wakeup
  2993. *
  2994. * If there's data, ensure we set the poll() state and publish everything
  2995. * to user-space before waking everybody up.
  2996. */
  2997. void perf_event_wakeup(struct perf_event *event)
  2998. {
  2999. ring_buffer_wakeup(event);
  3000. if (event->pending_kill) {
  3001. kill_fasync(&event->fasync, SIGIO, event->pending_kill);
  3002. event->pending_kill = 0;
  3003. }
  3004. }
  3005. static void perf_pending_event(struct irq_work *entry)
  3006. {
  3007. struct perf_event *event = container_of(entry,
  3008. struct perf_event, pending);
  3009. if (event->pending_disable) {
  3010. event->pending_disable = 0;
  3011. __perf_event_disable(event);
  3012. }
  3013. if (event->pending_wakeup) {
  3014. event->pending_wakeup = 0;
  3015. perf_event_wakeup(event);
  3016. }
  3017. }
  3018. /*
  3019. * We assume there is only KVM supporting the callbacks.
  3020. * Later on, we might change it to a list if there is
  3021. * another virtualization implementation supporting the callbacks.
  3022. */
  3023. struct perf_guest_info_callbacks *perf_guest_cbs;
  3024. int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
  3025. {
  3026. perf_guest_cbs = cbs;
  3027. return 0;
  3028. }
  3029. EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks);
  3030. int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
  3031. {
  3032. perf_guest_cbs = NULL;
  3033. return 0;
  3034. }
  3035. EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks);
  3036. static void __perf_event_header__init_id(struct perf_event_header *header,
  3037. struct perf_sample_data *data,
  3038. struct perf_event *event)
  3039. {
  3040. u64 sample_type = event->attr.sample_type;
  3041. data->type = sample_type;
  3042. header->size += event->id_header_size;
  3043. if (sample_type & PERF_SAMPLE_TID) {
  3044. /* namespace issues */
  3045. data->tid_entry.pid = perf_event_pid(event, current);
  3046. data->tid_entry.tid = perf_event_tid(event, current);
  3047. }
  3048. if (sample_type & PERF_SAMPLE_TIME)
  3049. data->time = perf_clock();
  3050. if (sample_type & PERF_SAMPLE_ID)
  3051. data->id = primary_event_id(event);
  3052. if (sample_type & PERF_SAMPLE_STREAM_ID)
  3053. data->stream_id = event->id;
  3054. if (sample_type & PERF_SAMPLE_CPU) {
  3055. data->cpu_entry.cpu = raw_smp_processor_id();
  3056. data->cpu_entry.reserved = 0;
  3057. }
  3058. }
  3059. void perf_event_header__init_id(struct perf_event_header *header,
  3060. struct perf_sample_data *data,
  3061. struct perf_event *event)
  3062. {
  3063. if (event->attr.sample_id_all)
  3064. __perf_event_header__init_id(header, data, event);
  3065. }
  3066. static void __perf_event__output_id_sample(struct perf_output_handle *handle,
  3067. struct perf_sample_data *data)
  3068. {
  3069. u64 sample_type = data->type;
  3070. if (sample_type & PERF_SAMPLE_TID)
  3071. perf_output_put(handle, data->tid_entry);
  3072. if (sample_type & PERF_SAMPLE_TIME)
  3073. perf_output_put(handle, data->time);
  3074. if (sample_type & PERF_SAMPLE_ID)
  3075. perf_output_put(handle, data->id);
  3076. if (sample_type & PERF_SAMPLE_STREAM_ID)
  3077. perf_output_put(handle, data->stream_id);
  3078. if (sample_type & PERF_SAMPLE_CPU)
  3079. perf_output_put(handle, data->cpu_entry);
  3080. }
  3081. void perf_event__output_id_sample(struct perf_event *event,
  3082. struct perf_output_handle *handle,
  3083. struct perf_sample_data *sample)
  3084. {
  3085. if (event->attr.sample_id_all)
  3086. __perf_event__output_id_sample(handle, sample);
  3087. }
  3088. static void perf_output_read_one(struct perf_output_handle *handle,
  3089. struct perf_event *event,
  3090. u64 enabled, u64 running)
  3091. {
  3092. u64 read_format = event->attr.read_format;
  3093. u64 values[4];
  3094. int n = 0;
  3095. values[n++] = perf_event_count(event);
  3096. if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
  3097. values[n++] = enabled +
  3098. atomic64_read(&event->child_total_time_enabled);
  3099. }
  3100. if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
  3101. values[n++] = running +
  3102. atomic64_read(&event->child_total_time_running);
  3103. }
  3104. if (read_format & PERF_FORMAT_ID)
  3105. values[n++] = primary_event_id(event);
  3106. __output_copy(handle, values, n * sizeof(u64));
  3107. }
  3108. /*
  3109. * XXX PERF_FORMAT_GROUP vs inherited events seems difficult.
  3110. */
  3111. static void perf_output_read_group(struct perf_output_handle *handle,
  3112. struct perf_event *event,
  3113. u64 enabled, u64 running)
  3114. {
  3115. struct perf_event *leader = event->group_leader, *sub;
  3116. u64 read_format = event->attr.read_format;
  3117. u64 values[5];
  3118. int n = 0;
  3119. values[n++] = 1 + leader->nr_siblings;
  3120. if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
  3121. values[n++] = enabled;
  3122. if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
  3123. values[n++] = running;
  3124. if (leader != event)
  3125. leader->pmu->read(leader);
  3126. values[n++] = perf_event_count(leader);
  3127. if (read_format & PERF_FORMAT_ID)
  3128. values[n++] = primary_event_id(leader);
  3129. __output_copy(handle, values, n * sizeof(u64));
  3130. list_for_each_entry(sub, &leader->sibling_list, group_entry) {
  3131. n = 0;
  3132. if (sub != event)
  3133. sub->pmu->read(sub);
  3134. values[n++] = perf_event_count(sub);
  3135. if (read_format & PERF_FORMAT_ID)
  3136. values[n++] = primary_event_id(sub);
  3137. __output_copy(handle, values, n * sizeof(u64));
  3138. }
  3139. }
  3140. #define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\
  3141. PERF_FORMAT_TOTAL_TIME_RUNNING)
  3142. static void perf_output_read(struct perf_output_handle *handle,
  3143. struct perf_event *event)
  3144. {
  3145. u64 enabled = 0, running = 0;
  3146. u64 read_format = event->attr.read_format;
  3147. /*
  3148. * compute total_time_enabled, total_time_running
  3149. * based on snapshot values taken when the event
  3150. * was last scheduled in.
  3151. *
  3152. * we cannot simply called update_context_time()
  3153. * because of locking issue as we are called in
  3154. * NMI context
  3155. */
  3156. if (read_format & PERF_FORMAT_TOTAL_TIMES)
  3157. calc_timer_values(event, &enabled, &running);
  3158. if (event->attr.read_format & PERF_FORMAT_GROUP)
  3159. perf_output_read_group(handle, event, enabled, running);
  3160. else
  3161. perf_output_read_one(handle, event, enabled, running);
  3162. }
  3163. void perf_output_sample(struct perf_output_handle *handle,
  3164. struct perf_event_header *header,
  3165. struct perf_sample_data *data,
  3166. struct perf_event *event)
  3167. {
  3168. u64 sample_type = data->type;
  3169. perf_output_put(handle, *header);
  3170. if (sample_type & PERF_SAMPLE_IP)
  3171. perf_output_put(handle, data->ip);
  3172. if (sample_type & PERF_SAMPLE_TID)
  3173. perf_output_put(handle, data->tid_entry);
  3174. if (sample_type & PERF_SAMPLE_TIME)
  3175. perf_output_put(handle, data->time);
  3176. if (sample_type & PERF_SAMPLE_ADDR)
  3177. perf_output_put(handle, data->addr);
  3178. if (sample_type & PERF_SAMPLE_ID)
  3179. perf_output_put(handle, data->id);
  3180. if (sample_type & PERF_SAMPLE_STREAM_ID)
  3181. perf_output_put(handle, data->stream_id);
  3182. if (sample_type & PERF_SAMPLE_CPU)
  3183. perf_output_put(handle, data->cpu_entry);
  3184. if (sample_type & PERF_SAMPLE_PERIOD)
  3185. perf_output_put(handle, data->period);
  3186. if (sample_type & PERF_SAMPLE_READ)
  3187. perf_output_read(handle, event);
  3188. if (sample_type & PERF_SAMPLE_CALLCHAIN) {
  3189. if (data->callchain) {
  3190. int size = 1;
  3191. if (data->callchain)
  3192. size += data->callchain->nr;
  3193. size *= sizeof(u64);
  3194. __output_copy(handle, data->callchain, size);
  3195. } else {
  3196. u64 nr = 0;
  3197. perf_output_put(handle, nr);
  3198. }
  3199. }
  3200. if (sample_type & PERF_SAMPLE_RAW) {
  3201. if (data->raw) {
  3202. perf_output_put(handle, data->raw->size);
  3203. __output_copy(handle, data->raw->data,
  3204. data->raw->size);
  3205. } else {
  3206. struct {
  3207. u32 size;
  3208. u32 data;
  3209. } raw = {
  3210. .size = sizeof(u32),
  3211. .data = 0,
  3212. };
  3213. perf_output_put(handle, raw);
  3214. }
  3215. }
  3216. if (!event->attr.watermark) {
  3217. int wakeup_events = event->attr.wakeup_events;
  3218. if (wakeup_events) {
  3219. struct ring_buffer *rb = handle->rb;
  3220. int events = local_inc_return(&rb->events);
  3221. if (events >= wakeup_events) {
  3222. local_sub(wakeup_events, &rb->events);
  3223. local_inc(&rb->wakeup);
  3224. }
  3225. }
  3226. }
  3227. }
  3228. void perf_prepare_sample(struct perf_event_header *header,
  3229. struct perf_sample_data *data,
  3230. struct perf_event *event,
  3231. struct pt_regs *regs)
  3232. {
  3233. u64 sample_type = event->attr.sample_type;
  3234. header->type = PERF_RECORD_SAMPLE;
  3235. header->size = sizeof(*header) + event->header_size;
  3236. header->misc = 0;
  3237. header->misc |= perf_misc_flags(regs);
  3238. __perf_event_header__init_id(header, data, event);
  3239. if (sample_type & PERF_SAMPLE_IP)
  3240. data->ip = perf_instruction_pointer(regs);
  3241. if (sample_type & PERF_SAMPLE_CALLCHAIN) {
  3242. int size = 1;
  3243. data->callchain = perf_callchain(regs);
  3244. if (data->callchain)
  3245. size += data->callchain->nr;
  3246. header->size += size * sizeof(u64);
  3247. }
  3248. if (sample_type & PERF_SAMPLE_RAW) {
  3249. int size = sizeof(u32);
  3250. if (data->raw)
  3251. size += data->raw->size;
  3252. else
  3253. size += sizeof(u32);
  3254. WARN_ON_ONCE(size & (sizeof(u64)-1));
  3255. header->size += size;
  3256. }
  3257. }
  3258. static void perf_event_output(struct perf_event *event,
  3259. struct perf_sample_data *data,
  3260. struct pt_regs *regs)
  3261. {
  3262. struct perf_output_handle handle;
  3263. struct perf_event_header header;
  3264. /* protect the callchain buffers */
  3265. rcu_read_lock();
  3266. perf_prepare_sample(&header, data, event, regs);
  3267. if (perf_output_begin(&handle, event, header.size))
  3268. goto exit;
  3269. perf_output_sample(&handle, &header, data, event);
  3270. perf_output_end(&handle);
  3271. exit:
  3272. rcu_read_unlock();
  3273. }
  3274. /*
  3275. * read event_id
  3276. */
  3277. struct perf_read_event {
  3278. struct perf_event_header header;
  3279. u32 pid;
  3280. u32 tid;
  3281. };
  3282. static void
  3283. perf_event_read_event(struct perf_event *event,
  3284. struct task_struct *task)
  3285. {
  3286. struct perf_output_handle handle;
  3287. struct perf_sample_data sample;
  3288. struct perf_read_event read_event = {
  3289. .header = {
  3290. .type = PERF_RECORD_READ,
  3291. .misc = 0,
  3292. .size = sizeof(read_event) + event->read_size,
  3293. },
  3294. .pid = perf_event_pid(event, task),
  3295. .tid = perf_event_tid(event, task),
  3296. };
  3297. int ret;
  3298. perf_event_header__init_id(&read_event.header, &sample, event);
  3299. ret = perf_output_begin(&handle, event, read_event.header.size);
  3300. if (ret)
  3301. return;
  3302. perf_output_put(&handle, read_event);
  3303. perf_output_read(&handle, event);
  3304. perf_event__output_id_sample(event, &handle, &sample);
  3305. perf_output_end(&handle);
  3306. }
  3307. /*
  3308. * task tracking -- fork/exit
  3309. *
  3310. * enabled by: attr.comm | attr.mmap | attr.mmap_data | attr.task
  3311. */
  3312. struct perf_task_event {
  3313. struct task_struct *task;
  3314. struct perf_event_context *task_ctx;
  3315. struct {
  3316. struct perf_event_header header;
  3317. u32 pid;
  3318. u32 ppid;
  3319. u32 tid;
  3320. u32 ptid;
  3321. u64 time;
  3322. } event_id;
  3323. };
  3324. static void perf_event_task_output(struct perf_event *event,
  3325. struct perf_task_event *task_event)
  3326. {
  3327. struct perf_output_handle handle;
  3328. struct perf_sample_data sample;
  3329. struct task_struct *task = task_event->task;
  3330. int ret, size = task_event->event_id.header.size;
  3331. perf_event_header__init_id(&task_event->event_id.header, &sample, event);
  3332. ret = perf_output_begin(&handle, event,
  3333. task_event->event_id.header.size);
  3334. if (ret)
  3335. goto out;
  3336. task_event->event_id.pid = perf_event_pid(event, task);
  3337. task_event->event_id.ppid = perf_event_pid(event, current);
  3338. task_event->event_id.tid = perf_event_tid(event, task);
  3339. task_event->event_id.ptid = perf_event_tid(event, current);
  3340. perf_output_put(&handle, task_event->event_id);
  3341. perf_event__output_id_sample(event, &handle, &sample);
  3342. perf_output_end(&handle);
  3343. out:
  3344. task_event->event_id.header.size = size;
  3345. }
  3346. static int perf_event_task_match(struct perf_event *event)
  3347. {
  3348. if (event->state < PERF_EVENT_STATE_INACTIVE)
  3349. return 0;
  3350. if (!event_filter_match(event))
  3351. return 0;
  3352. if (event->attr.comm || event->attr.mmap ||
  3353. event->attr.mmap_data || event->attr.task)
  3354. return 1;
  3355. return 0;
  3356. }
  3357. static void perf_event_task_ctx(struct perf_event_context *ctx,
  3358. struct perf_task_event *task_event)
  3359. {
  3360. struct perf_event *event;
  3361. list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
  3362. if (perf_event_task_match(event))
  3363. perf_event_task_output(event, task_event);
  3364. }
  3365. }
  3366. static void perf_event_task_event(struct perf_task_event *task_event)
  3367. {
  3368. struct perf_cpu_context *cpuctx;
  3369. struct perf_event_context *ctx;
  3370. struct pmu *pmu;
  3371. int ctxn;
  3372. rcu_read_lock();
  3373. list_for_each_entry_rcu(pmu, &pmus, entry) {
  3374. cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
  3375. if (cpuctx->active_pmu != pmu)
  3376. goto next;
  3377. perf_event_task_ctx(&cpuctx->ctx, task_event);
  3378. ctx = task_event->task_ctx;
  3379. if (!ctx) {
  3380. ctxn = pmu->task_ctx_nr;
  3381. if (ctxn < 0)
  3382. goto next;
  3383. ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
  3384. }
  3385. if (ctx)
  3386. perf_event_task_ctx(ctx, task_event);
  3387. next:
  3388. put_cpu_ptr(pmu->pmu_cpu_context);
  3389. }
  3390. rcu_read_unlock();
  3391. }
  3392. static void perf_event_task(struct task_struct *task,
  3393. struct perf_event_context *task_ctx,
  3394. int new)
  3395. {
  3396. struct perf_task_event task_event;
  3397. if (!atomic_read(&nr_comm_events) &&
  3398. !atomic_read(&nr_mmap_events) &&
  3399. !atomic_read(&nr_task_events))
  3400. return;
  3401. task_event = (struct perf_task_event){
  3402. .task = task,
  3403. .task_ctx = task_ctx,
  3404. .event_id = {
  3405. .header = {
  3406. .type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT,
  3407. .misc = 0,
  3408. .size = sizeof(task_event.event_id),
  3409. },
  3410. /* .pid */
  3411. /* .ppid */
  3412. /* .tid */
  3413. /* .ptid */
  3414. .time = perf_clock(),
  3415. },
  3416. };
  3417. perf_event_task_event(&task_event);
  3418. }
  3419. void perf_event_fork(struct task_struct *task)
  3420. {
  3421. perf_event_task(task, NULL, 1);
  3422. }
  3423. /*
  3424. * comm tracking
  3425. */
  3426. struct perf_comm_event {
  3427. struct task_struct *task;
  3428. char *comm;
  3429. int comm_size;
  3430. struct {
  3431. struct perf_event_header header;
  3432. u32 pid;
  3433. u32 tid;
  3434. } event_id;
  3435. };
  3436. static void perf_event_comm_output(struct perf_event *event,
  3437. struct perf_comm_event *comm_event)
  3438. {
  3439. struct perf_output_handle handle;
  3440. struct perf_sample_data sample;
  3441. int size = comm_event->event_id.header.size;
  3442. int ret;
  3443. perf_event_header__init_id(&comm_event->event_id.header, &sample, event);
  3444. ret = perf_output_begin(&handle, event,
  3445. comm_event->event_id.header.size);
  3446. if (ret)
  3447. goto out;
  3448. comm_event->event_id.pid = perf_event_pid(event, comm_event->task);
  3449. comm_event->event_id.tid = perf_event_tid(event, comm_event->task);
  3450. perf_output_put(&handle, comm_event->event_id);
  3451. __output_copy(&handle, comm_event->comm,
  3452. comm_event->comm_size);
  3453. perf_event__output_id_sample(event, &handle, &sample);
  3454. perf_output_end(&handle);
  3455. out:
  3456. comm_event->event_id.header.size = size;
  3457. }
  3458. static int perf_event_comm_match(struct perf_event *event)
  3459. {
  3460. if (event->state < PERF_EVENT_STATE_INACTIVE)
  3461. return 0;
  3462. if (!event_filter_match(event))
  3463. return 0;
  3464. if (event->attr.comm)
  3465. return 1;
  3466. return 0;
  3467. }
  3468. static void perf_event_comm_ctx(struct perf_event_context *ctx,
  3469. struct perf_comm_event *comm_event)
  3470. {
  3471. struct perf_event *event;
  3472. list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
  3473. if (perf_event_comm_match(event))
  3474. perf_event_comm_output(event, comm_event);
  3475. }
  3476. }
  3477. static void perf_event_comm_event(struct perf_comm_event *comm_event)
  3478. {
  3479. struct perf_cpu_context *cpuctx;
  3480. struct perf_event_context *ctx;
  3481. char comm[TASK_COMM_LEN];
  3482. unsigned int size;
  3483. struct pmu *pmu;
  3484. int ctxn;
  3485. memset(comm, 0, sizeof(comm));
  3486. strlcpy(comm, comm_event->task->comm, sizeof(comm));
  3487. size = ALIGN(strlen(comm)+1, sizeof(u64));
  3488. comm_event->comm = comm;
  3489. comm_event->comm_size = size;
  3490. comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
  3491. rcu_read_lock();
  3492. list_for_each_entry_rcu(pmu, &pmus, entry) {
  3493. cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
  3494. if (cpuctx->active_pmu != pmu)
  3495. goto next;
  3496. perf_event_comm_ctx(&cpuctx->ctx, comm_event);
  3497. ctxn = pmu->task_ctx_nr;
  3498. if (ctxn < 0)
  3499. goto next;
  3500. ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
  3501. if (ctx)
  3502. perf_event_comm_ctx(ctx, comm_event);
  3503. next:
  3504. put_cpu_ptr(pmu->pmu_cpu_context);
  3505. }
  3506. rcu_read_unlock();
  3507. }
  3508. void perf_event_comm(struct task_struct *task)
  3509. {
  3510. struct perf_comm_event comm_event;
  3511. struct perf_event_context *ctx;
  3512. int ctxn;
  3513. for_each_task_context_nr(ctxn) {
  3514. ctx = task->perf_event_ctxp[ctxn];
  3515. if (!ctx)
  3516. continue;
  3517. perf_event_enable_on_exec(ctx);
  3518. }
  3519. if (!atomic_read(&nr_comm_events))
  3520. return;
  3521. comm_event = (struct perf_comm_event){
  3522. .task = task,
  3523. /* .comm */
  3524. /* .comm_size */
  3525. .event_id = {
  3526. .header = {
  3527. .type = PERF_RECORD_COMM,
  3528. .misc = 0,
  3529. /* .size */
  3530. },
  3531. /* .pid */
  3532. /* .tid */
  3533. },
  3534. };
  3535. perf_event_comm_event(&comm_event);
  3536. }
  3537. /*
  3538. * mmap tracking
  3539. */
  3540. struct perf_mmap_event {
  3541. struct vm_area_struct *vma;
  3542. const char *file_name;
  3543. int file_size;
  3544. struct {
  3545. struct perf_event_header header;
  3546. u32 pid;
  3547. u32 tid;
  3548. u64 start;
  3549. u64 len;
  3550. u64 pgoff;
  3551. } event_id;
  3552. };
  3553. static void perf_event_mmap_output(struct perf_event *event,
  3554. struct perf_mmap_event *mmap_event)
  3555. {
  3556. struct perf_output_handle handle;
  3557. struct perf_sample_data sample;
  3558. int size = mmap_event->event_id.header.size;
  3559. int ret;
  3560. perf_event_header__init_id(&mmap_event->event_id.header, &sample, event);
  3561. ret = perf_output_begin(&handle, event,
  3562. mmap_event->event_id.header.size);
  3563. if (ret)
  3564. goto out;
  3565. mmap_event->event_id.pid = perf_event_pid(event, current);
  3566. mmap_event->event_id.tid = perf_event_tid(event, current);
  3567. perf_output_put(&handle, mmap_event->event_id);
  3568. __output_copy(&handle, mmap_event->file_name,
  3569. mmap_event->file_size);
  3570. perf_event__output_id_sample(event, &handle, &sample);
  3571. perf_output_end(&handle);
  3572. out:
  3573. mmap_event->event_id.header.size = size;
  3574. }
  3575. static int perf_event_mmap_match(struct perf_event *event,
  3576. struct perf_mmap_event *mmap_event,
  3577. int executable)
  3578. {
  3579. if (event->state < PERF_EVENT_STATE_INACTIVE)
  3580. return 0;
  3581. if (!event_filter_match(event))
  3582. return 0;
  3583. if ((!executable && event->attr.mmap_data) ||
  3584. (executable && event->attr.mmap))
  3585. return 1;
  3586. return 0;
  3587. }
  3588. static void perf_event_mmap_ctx(struct perf_event_context *ctx,
  3589. struct perf_mmap_event *mmap_event,
  3590. int executable)
  3591. {
  3592. struct perf_event *event;
  3593. list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
  3594. if (perf_event_mmap_match(event, mmap_event, executable))
  3595. perf_event_mmap_output(event, mmap_event);
  3596. }
  3597. }
  3598. static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
  3599. {
  3600. struct perf_cpu_context *cpuctx;
  3601. struct perf_event_context *ctx;
  3602. struct vm_area_struct *vma = mmap_event->vma;
  3603. struct file *file = vma->vm_file;
  3604. unsigned int size;
  3605. char tmp[16];
  3606. char *buf = NULL;
  3607. const char *name;
  3608. struct pmu *pmu;
  3609. int ctxn;
  3610. memset(tmp, 0, sizeof(tmp));
  3611. if (file) {
  3612. /*
  3613. * d_path works from the end of the rb backwards, so we
  3614. * need to add enough zero bytes after the string to handle
  3615. * the 64bit alignment we do later.
  3616. */
  3617. buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
  3618. if (!buf) {
  3619. name = strncpy(tmp, "//enomem", sizeof(tmp));
  3620. goto got_name;
  3621. }
  3622. name = d_path(&file->f_path, buf, PATH_MAX);
  3623. if (IS_ERR(name)) {
  3624. name = strncpy(tmp, "//toolong", sizeof(tmp));
  3625. goto got_name;
  3626. }
  3627. } else {
  3628. if (arch_vma_name(mmap_event->vma)) {
  3629. name = strncpy(tmp, arch_vma_name(mmap_event->vma),
  3630. sizeof(tmp));
  3631. goto got_name;
  3632. }
  3633. if (!vma->vm_mm) {
  3634. name = strncpy(tmp, "[vdso]", sizeof(tmp));
  3635. goto got_name;
  3636. } else if (vma->vm_start <= vma->vm_mm->start_brk &&
  3637. vma->vm_end >= vma->vm_mm->brk) {
  3638. name = strncpy(tmp, "[heap]", sizeof(tmp));
  3639. goto got_name;
  3640. } else if (vma->vm_start <= vma->vm_mm->start_stack &&
  3641. vma->vm_end >= vma->vm_mm->start_stack) {
  3642. name = strncpy(tmp, "[stack]", sizeof(tmp));
  3643. goto got_name;
  3644. }
  3645. name = strncpy(tmp, "//anon", sizeof(tmp));
  3646. goto got_name;
  3647. }
  3648. got_name:
  3649. size = ALIGN(strlen(name)+1, sizeof(u64));
  3650. mmap_event->file_name = name;
  3651. mmap_event->file_size = size;
  3652. mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
  3653. rcu_read_lock();
  3654. list_for_each_entry_rcu(pmu, &pmus, entry) {
  3655. cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
  3656. if (cpuctx->active_pmu != pmu)
  3657. goto next;
  3658. perf_event_mmap_ctx(&cpuctx->ctx, mmap_event,
  3659. vma->vm_flags & VM_EXEC);
  3660. ctxn = pmu->task_ctx_nr;
  3661. if (ctxn < 0)
  3662. goto next;
  3663. ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
  3664. if (ctx) {
  3665. perf_event_mmap_ctx(ctx, mmap_event,
  3666. vma->vm_flags & VM_EXEC);
  3667. }
  3668. next:
  3669. put_cpu_ptr(pmu->pmu_cpu_context);
  3670. }
  3671. rcu_read_unlock();
  3672. kfree(buf);
  3673. }
  3674. void perf_event_mmap(struct vm_area_struct *vma)
  3675. {
  3676. struct perf_mmap_event mmap_event;
  3677. if (!atomic_read(&nr_mmap_events))
  3678. return;
  3679. mmap_event = (struct perf_mmap_event){
  3680. .vma = vma,
  3681. /* .file_name */
  3682. /* .file_size */
  3683. .event_id = {
  3684. .header = {
  3685. .type = PERF_RECORD_MMAP,
  3686. .misc = PERF_RECORD_MISC_USER,
  3687. /* .size */
  3688. },
  3689. /* .pid */
  3690. /* .tid */
  3691. .start = vma->vm_start,
  3692. .len = vma->vm_end - vma->vm_start,
  3693. .pgoff = (u64)vma->vm_pgoff << PAGE_SHIFT,
  3694. },
  3695. };
  3696. perf_event_mmap_event(&mmap_event);
  3697. }
  3698. /*
  3699. * IRQ throttle logging
  3700. */
  3701. static void perf_log_throttle(struct perf_event *event, int enable)
  3702. {
  3703. struct perf_output_handle handle;
  3704. struct perf_sample_data sample;
  3705. int ret;
  3706. struct {
  3707. struct perf_event_header header;
  3708. u64 time;
  3709. u64 id;
  3710. u64 stream_id;
  3711. } throttle_event = {
  3712. .header = {
  3713. .type = PERF_RECORD_THROTTLE,
  3714. .misc = 0,
  3715. .size = sizeof(throttle_event),
  3716. },
  3717. .time = perf_clock(),
  3718. .id = primary_event_id(event),
  3719. .stream_id = event->id,
  3720. };
  3721. if (enable)
  3722. throttle_event.header.type = PERF_RECORD_UNTHROTTLE;
  3723. perf_event_header__init_id(&throttle_event.header, &sample, event);
  3724. ret = perf_output_begin(&handle, event,
  3725. throttle_event.header.size);
  3726. if (ret)
  3727. return;
  3728. perf_output_put(&handle, throttle_event);
  3729. perf_event__output_id_sample(event, &handle, &sample);
  3730. perf_output_end(&handle);
  3731. }
  3732. /*
  3733. * Generic event overflow handling, sampling.
  3734. */
  3735. static int __perf_event_overflow(struct perf_event *event,
  3736. int throttle, struct perf_sample_data *data,
  3737. struct pt_regs *regs)
  3738. {
  3739. int events = atomic_read(&event->event_limit);
  3740. struct hw_perf_event *hwc = &event->hw;
  3741. u64 seq;
  3742. int ret = 0;
  3743. /*
  3744. * Non-sampling counters might still use the PMI to fold short
  3745. * hardware counters, ignore those.
  3746. */
  3747. if (unlikely(!is_sampling_event(event)))
  3748. return 0;
  3749. seq = __this_cpu_read(perf_throttled_seq);
  3750. if (seq != hwc->interrupts_seq) {
  3751. hwc->interrupts_seq = seq;
  3752. hwc->interrupts = 1;
  3753. } else {
  3754. hwc->interrupts++;
  3755. if (unlikely(throttle
  3756. && hwc->interrupts >= max_samples_per_tick)) {
  3757. __this_cpu_inc(perf_throttled_count);
  3758. hwc->interrupts = MAX_INTERRUPTS;
  3759. perf_log_throttle(event, 0);
  3760. ret = 1;
  3761. }
  3762. }
  3763. if (event->attr.freq) {
  3764. u64 now = perf_clock();
  3765. s64 delta = now - hwc->freq_time_stamp;
  3766. hwc->freq_time_stamp = now;
  3767. if (delta > 0 && delta < 2*TICK_NSEC)
  3768. perf_adjust_period(event, delta, hwc->last_period, true);
  3769. }
  3770. /*
  3771. * XXX event_limit might not quite work as expected on inherited
  3772. * events
  3773. */
  3774. event->pending_kill = POLL_IN;
  3775. if (events && atomic_dec_and_test(&event->event_limit)) {
  3776. ret = 1;
  3777. event->pending_kill = POLL_HUP;
  3778. event->pending_disable = 1;
  3779. irq_work_queue(&event->pending);
  3780. }
  3781. if (event->overflow_handler)
  3782. event->overflow_handler(event, data, regs);
  3783. else
  3784. perf_event_output(event, data, regs);
  3785. if (event->fasync && event->pending_kill) {
  3786. event->pending_wakeup = 1;
  3787. irq_work_queue(&event->pending);
  3788. }
  3789. return ret;
  3790. }
  3791. int perf_event_overflow(struct perf_event *event,
  3792. struct perf_sample_data *data,
  3793. struct pt_regs *regs)
  3794. {
  3795. return __perf_event_overflow(event, 1, data, regs);
  3796. }
  3797. /*
  3798. * Generic software event infrastructure
  3799. */
  3800. struct swevent_htable {
  3801. struct swevent_hlist *swevent_hlist;
  3802. struct mutex hlist_mutex;
  3803. int hlist_refcount;
  3804. /* Recursion avoidance in each contexts */
  3805. int recursion[PERF_NR_CONTEXTS];
  3806. };
  3807. static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);
  3808. /*
  3809. * We directly increment event->count and keep a second value in
  3810. * event->hw.period_left to count intervals. This period event
  3811. * is kept in the range [-sample_period, 0] so that we can use the
  3812. * sign as trigger.
  3813. */
  3814. static u64 perf_swevent_set_period(struct perf_event *event)
  3815. {
  3816. struct hw_perf_event *hwc = &event->hw;
  3817. u64 period = hwc->last_period;
  3818. u64 nr, offset;
  3819. s64 old, val;
  3820. hwc->last_period = hwc->sample_period;
  3821. again:
  3822. old = val = local64_read(&hwc->period_left);
  3823. if (val < 0)
  3824. return 0;
  3825. nr = div64_u64(period + val, period);
  3826. offset = nr * period;
  3827. val -= offset;
  3828. if (local64_cmpxchg(&hwc->period_left, old, val) != old)
  3829. goto again;
  3830. return nr;
  3831. }
  3832. static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
  3833. struct perf_sample_data *data,
  3834. struct pt_regs *regs)
  3835. {
  3836. struct hw_perf_event *hwc = &event->hw;
  3837. int throttle = 0;
  3838. if (!overflow)
  3839. overflow = perf_swevent_set_period(event);
  3840. if (hwc->interrupts == MAX_INTERRUPTS)
  3841. return;
  3842. for (; overflow; overflow--) {
  3843. if (__perf_event_overflow(event, throttle,
  3844. data, regs)) {
  3845. /*
  3846. * We inhibit the overflow from happening when
  3847. * hwc->interrupts == MAX_INTERRUPTS.
  3848. */
  3849. break;
  3850. }
  3851. throttle = 1;
  3852. }
  3853. }
  3854. static void perf_swevent_event(struct perf_event *event, u64 nr,
  3855. struct perf_sample_data *data,
  3856. struct pt_regs *regs)
  3857. {
  3858. struct hw_perf_event *hwc = &event->hw;
  3859. local64_add(nr, &event->count);
  3860. if (!regs)
  3861. return;
  3862. if (!is_sampling_event(event))
  3863. return;
  3864. if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) {
  3865. data->period = nr;
  3866. return perf_swevent_overflow(event, 1, data, regs);
  3867. } else
  3868. data->period = event->hw.last_period;
  3869. if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
  3870. return perf_swevent_overflow(event, 1, data, regs);
  3871. if (local64_add_negative(nr, &hwc->period_left))
  3872. return;
  3873. perf_swevent_overflow(event, 0, data, regs);
  3874. }
  3875. static int perf_exclude_event(struct perf_event *event,
  3876. struct pt_regs *regs)
  3877. {
  3878. if (event->hw.state & PERF_HES_STOPPED)
  3879. return 1;
  3880. if (regs) {
  3881. if (event->attr.exclude_user && user_mode(regs))
  3882. return 1;
  3883. if (event->attr.exclude_kernel && !user_mode(regs))
  3884. return 1;
  3885. }
  3886. return 0;
  3887. }
  3888. static int perf_swevent_match(struct perf_event *event,
  3889. enum perf_type_id type,
  3890. u32 event_id,
  3891. struct perf_sample_data *data,
  3892. struct pt_regs *regs)
  3893. {
  3894. if (event->attr.type != type)
  3895. return 0;
  3896. if (event->attr.config != event_id)
  3897. return 0;
  3898. if (perf_exclude_event(event, regs))
  3899. return 0;
  3900. return 1;
  3901. }
  3902. static inline u64 swevent_hash(u64 type, u32 event_id)
  3903. {
  3904. u64 val = event_id | (type << 32);
  3905. return hash_64(val, SWEVENT_HLIST_BITS);
  3906. }
  3907. static inline struct hlist_head *
  3908. __find_swevent_head(struct swevent_hlist *hlist, u64 type, u32 event_id)
  3909. {
  3910. u64 hash = swevent_hash(type, event_id);
  3911. return &hlist->heads[hash];
  3912. }
  3913. /* For the read side: events when they trigger */
  3914. static inline struct hlist_head *
  3915. find_swevent_head_rcu(struct swevent_htable *swhash, u64 type, u32 event_id)
  3916. {
  3917. struct swevent_hlist *hlist;
  3918. hlist = rcu_dereference(swhash->swevent_hlist);
  3919. if (!hlist)
  3920. return NULL;
  3921. return __find_swevent_head(hlist, type, event_id);
  3922. }
  3923. /* For the event head insertion and removal in the hlist */
  3924. static inline struct hlist_head *
  3925. find_swevent_head(struct swevent_htable *swhash, struct perf_event *event)
  3926. {
  3927. struct swevent_hlist *hlist;
  3928. u32 event_id = event->attr.config;
  3929. u64 type = event->attr.type;
  3930. /*
  3931. * Event scheduling is always serialized against hlist allocation
  3932. * and release. Which makes the protected version suitable here.
  3933. * The context lock guarantees that.
  3934. */
  3935. hlist = rcu_dereference_protected(swhash->swevent_hlist,
  3936. lockdep_is_held(&event->ctx->lock));
  3937. if (!hlist)
  3938. return NULL;
  3939. return __find_swevent_head(hlist, type, event_id);
  3940. }
  3941. static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
  3942. u64 nr,
  3943. struct perf_sample_data *data,
  3944. struct pt_regs *regs)
  3945. {
  3946. struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
  3947. struct perf_event *event;
  3948. struct hlist_node *node;
  3949. struct hlist_head *head;
  3950. rcu_read_lock();
  3951. head = find_swevent_head_rcu(swhash, type, event_id);
  3952. if (!head)
  3953. goto end;
  3954. hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
  3955. if (perf_swevent_match(event, type, event_id, data, regs))
  3956. perf_swevent_event(event, nr, data, regs);
  3957. }
  3958. end:
  3959. rcu_read_unlock();
  3960. }
  3961. int perf_swevent_get_recursion_context(void)
  3962. {
  3963. struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
  3964. return get_recursion_context(swhash->recursion);
  3965. }
  3966. EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
  3967. inline void perf_swevent_put_recursion_context(int rctx)
  3968. {
  3969. struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
  3970. put_recursion_context(swhash->recursion, rctx);
  3971. }
  3972. void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
  3973. {
  3974. struct perf_sample_data data;
  3975. int rctx;
  3976. preempt_disable_notrace();
  3977. rctx = perf_swevent_get_recursion_context();
  3978. if (rctx < 0)
  3979. return;
  3980. perf_sample_data_init(&data, addr);
  3981. do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs);
  3982. perf_swevent_put_recursion_context(rctx);
  3983. preempt_enable_notrace();
  3984. }
  3985. static void perf_swevent_read(struct perf_event *event)
  3986. {
  3987. }
  3988. static int perf_swevent_add(struct perf_event *event, int flags)
  3989. {
  3990. struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
  3991. struct hw_perf_event *hwc = &event->hw;
  3992. struct hlist_head *head;
  3993. if (is_sampling_event(event)) {
  3994. hwc->last_period = hwc->sample_period;
  3995. perf_swevent_set_period(event);
  3996. }
  3997. hwc->state = !(flags & PERF_EF_START);
  3998. head = find_swevent_head(swhash, event);
  3999. if (WARN_ON_ONCE(!head))
  4000. return -EINVAL;
  4001. hlist_add_head_rcu(&event->hlist_entry, head);
  4002. return 0;
  4003. }
  4004. static void perf_swevent_del(struct perf_event *event, int flags)
  4005. {
  4006. hlist_del_rcu(&event->hlist_entry);
  4007. }
  4008. static void perf_swevent_start(struct perf_event *event, int flags)
  4009. {
  4010. event->hw.state = 0;
  4011. }
  4012. static void perf_swevent_stop(struct perf_event *event, int flags)
  4013. {
  4014. event->hw.state = PERF_HES_STOPPED;
  4015. }
  4016. /* Deref the hlist from the update side */
  4017. static inline struct swevent_hlist *
  4018. swevent_hlist_deref(struct swevent_htable *swhash)
  4019. {
  4020. return rcu_dereference_protected(swhash->swevent_hlist,
  4021. lockdep_is_held(&swhash->hlist_mutex));
  4022. }
  4023. static void swevent_hlist_release(struct swevent_htable *swhash)
  4024. {
  4025. struct swevent_hlist *hlist = swevent_hlist_deref(swhash);
  4026. if (!hlist)
  4027. return;
  4028. rcu_assign_pointer(swhash->swevent_hlist, NULL);
  4029. kfree_rcu(hlist, rcu_head);
  4030. }
  4031. static void swevent_hlist_put_cpu(struct perf_event *event, int cpu)
  4032. {
  4033. struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
  4034. mutex_lock(&swhash->hlist_mutex);
  4035. if (!--swhash->hlist_refcount)
  4036. swevent_hlist_release(swhash);
  4037. mutex_unlock(&swhash->hlist_mutex);
  4038. }
  4039. static void swevent_hlist_put(struct perf_event *event)
  4040. {
  4041. int cpu;
  4042. if (event->cpu != -1) {
  4043. swevent_hlist_put_cpu(event, event->cpu);
  4044. return;
  4045. }
  4046. for_each_possible_cpu(cpu)
  4047. swevent_hlist_put_cpu(event, cpu);
  4048. }
  4049. static int swevent_hlist_get_cpu(struct perf_event *event, int cpu)
  4050. {
  4051. struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
  4052. int err = 0;
  4053. mutex_lock(&swhash->hlist_mutex);
  4054. if (!swevent_hlist_deref(swhash) && cpu_online(cpu)) {
  4055. struct swevent_hlist *hlist;
  4056. hlist = kzalloc(sizeof(*hlist), GFP_KERNEL);
  4057. if (!hlist) {
  4058. err = -ENOMEM;
  4059. goto exit;
  4060. }
  4061. rcu_assign_pointer(swhash->swevent_hlist, hlist);
  4062. }
  4063. swhash->hlist_refcount++;
  4064. exit:
  4065. mutex_unlock(&swhash->hlist_mutex);
  4066. return err;
  4067. }
  4068. static int swevent_hlist_get(struct perf_event *event)
  4069. {
  4070. int err;
  4071. int cpu, failed_cpu;
  4072. if (event->cpu != -1)
  4073. return swevent_hlist_get_cpu(event, event->cpu);
  4074. get_online_cpus();
  4075. for_each_possible_cpu(cpu) {
  4076. err = swevent_hlist_get_cpu(event, cpu);
  4077. if (err) {
  4078. failed_cpu = cpu;
  4079. goto fail;
  4080. }
  4081. }
  4082. put_online_cpus();
  4083. return 0;
  4084. fail:
  4085. for_each_possible_cpu(cpu) {
  4086. if (cpu == failed_cpu)
  4087. break;
  4088. swevent_hlist_put_cpu(event, cpu);
  4089. }
  4090. put_online_cpus();
  4091. return err;
  4092. }
  4093. struct jump_label_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
  4094. static void sw_perf_event_destroy(struct perf_event *event)
  4095. {
  4096. u64 event_id = event->attr.config;
  4097. WARN_ON(event->parent);
  4098. jump_label_dec(&perf_swevent_enabled[event_id]);
  4099. swevent_hlist_put(event);
  4100. }
  4101. static int perf_swevent_init(struct perf_event *event)
  4102. {
  4103. int event_id = event->attr.config;
  4104. if (event->attr.type != PERF_TYPE_SOFTWARE)
  4105. return -ENOENT;
  4106. switch (event_id) {
  4107. case PERF_COUNT_SW_CPU_CLOCK:
  4108. case PERF_COUNT_SW_TASK_CLOCK:
  4109. return -ENOENT;
  4110. default:
  4111. break;
  4112. }
  4113. if (event_id >= PERF_COUNT_SW_MAX)
  4114. return -ENOENT;
  4115. if (!event->parent) {
  4116. int err;
  4117. err = swevent_hlist_get(event);
  4118. if (err)
  4119. return err;
  4120. jump_label_inc(&perf_swevent_enabled[event_id]);
  4121. event->destroy = sw_perf_event_destroy;
  4122. }
  4123. return 0;
  4124. }
  4125. static struct pmu perf_swevent = {
  4126. .task_ctx_nr = perf_sw_context,
  4127. .event_init = perf_swevent_init,
  4128. .add = perf_swevent_add,
  4129. .del = perf_swevent_del,
  4130. .start = perf_swevent_start,
  4131. .stop = perf_swevent_stop,
  4132. .read = perf_swevent_read,
  4133. };
  4134. #ifdef CONFIG_EVENT_TRACING
  4135. static int perf_tp_filter_match(struct perf_event *event,
  4136. struct perf_sample_data *data)
  4137. {
  4138. void *record = data->raw->data;
  4139. if (likely(!event->filter) || filter_match_preds(event->filter, record))
  4140. return 1;
  4141. return 0;
  4142. }
  4143. static int perf_tp_event_match(struct perf_event *event,
  4144. struct perf_sample_data *data,
  4145. struct pt_regs *regs)
  4146. {
  4147. if (event->hw.state & PERF_HES_STOPPED)
  4148. return 0;
  4149. /*
  4150. * All tracepoints are from kernel-space.
  4151. */
  4152. if (event->attr.exclude_kernel)
  4153. return 0;
  4154. if (!perf_tp_filter_match(event, data))
  4155. return 0;
  4156. return 1;
  4157. }
  4158. void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
  4159. struct pt_regs *regs, struct hlist_head *head, int rctx)
  4160. {
  4161. struct perf_sample_data data;
  4162. struct perf_event *event;
  4163. struct hlist_node *node;
  4164. struct perf_raw_record raw = {
  4165. .size = entry_size,
  4166. .data = record,
  4167. };
  4168. perf_sample_data_init(&data, addr);
  4169. data.raw = &raw;
  4170. hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
  4171. if (perf_tp_event_match(event, &data, regs))
  4172. perf_swevent_event(event, count, &data, regs);
  4173. }
  4174. perf_swevent_put_recursion_context(rctx);
  4175. }
  4176. EXPORT_SYMBOL_GPL(perf_tp_event);
  4177. static void tp_perf_event_destroy(struct perf_event *event)
  4178. {
  4179. perf_trace_destroy(event);
  4180. }
  4181. static int perf_tp_event_init(struct perf_event *event)
  4182. {
  4183. int err;
  4184. if (event->attr.type != PERF_TYPE_TRACEPOINT)
  4185. return -ENOENT;
  4186. err = perf_trace_init(event);
  4187. if (err)
  4188. return err;
  4189. event->destroy = tp_perf_event_destroy;
  4190. return 0;
  4191. }
  4192. static struct pmu perf_tracepoint = {
  4193. .task_ctx_nr = perf_sw_context,
  4194. .event_init = perf_tp_event_init,
  4195. .add = perf_trace_add,
  4196. .del = perf_trace_del,
  4197. .start = perf_swevent_start,
  4198. .stop = perf_swevent_stop,
  4199. .read = perf_swevent_read,
  4200. };
  4201. static inline void perf_tp_register(void)
  4202. {
  4203. perf_pmu_register(&perf_tracepoint, "tracepoint", PERF_TYPE_TRACEPOINT);
  4204. }
  4205. static int perf_event_set_filter(struct perf_event *event, void __user *arg)
  4206. {
  4207. char *filter_str;
  4208. int ret;
  4209. if (event->attr.type != PERF_TYPE_TRACEPOINT)
  4210. return -EINVAL;
  4211. filter_str = strndup_user(arg, PAGE_SIZE);
  4212. if (IS_ERR(filter_str))
  4213. return PTR_ERR(filter_str);
  4214. ret = ftrace_profile_set_filter(event, event->attr.config, filter_str);
  4215. kfree(filter_str);
  4216. return ret;
  4217. }
  4218. static void perf_event_free_filter(struct perf_event *event)
  4219. {
  4220. ftrace_profile_free_filter(event);
  4221. }
  4222. #else
  4223. static inline void perf_tp_register(void)
  4224. {
  4225. }
  4226. static int perf_event_set_filter(struct perf_event *event, void __user *arg)
  4227. {
  4228. return -ENOENT;
  4229. }
  4230. static void perf_event_free_filter(struct perf_event *event)
  4231. {
  4232. }
  4233. #endif /* CONFIG_EVENT_TRACING */
  4234. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  4235. void perf_bp_event(struct perf_event *bp, void *data)
  4236. {
  4237. struct perf_sample_data sample;
  4238. struct pt_regs *regs = data;
  4239. perf_sample_data_init(&sample, bp->attr.bp_addr);
  4240. if (!bp->hw.state && !perf_exclude_event(bp, regs))
  4241. perf_swevent_event(bp, 1, &sample, regs);
  4242. }
  4243. #endif
  4244. /*
  4245. * hrtimer based swevent callback
  4246. */
  4247. static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
  4248. {
  4249. enum hrtimer_restart ret = HRTIMER_RESTART;
  4250. struct perf_sample_data data;
  4251. struct pt_regs *regs;
  4252. struct perf_event *event;
  4253. u64 period;
  4254. event = container_of(hrtimer, struct perf_event, hw.hrtimer);
  4255. if (event->state != PERF_EVENT_STATE_ACTIVE)
  4256. return HRTIMER_NORESTART;
  4257. event->pmu->read(event);
  4258. perf_sample_data_init(&data, 0);
  4259. data.period = event->hw.last_period;
  4260. regs = get_irq_regs();
  4261. if (regs && !perf_exclude_event(event, regs)) {
  4262. if (!(event->attr.exclude_idle && is_idle_task(current)))
  4263. if (perf_event_overflow(event, &data, regs))
  4264. ret = HRTIMER_NORESTART;
  4265. }
  4266. period = max_t(u64, 10000, event->hw.sample_period);
  4267. hrtimer_forward_now(hrtimer, ns_to_ktime(period));
  4268. return ret;
  4269. }
  4270. static void perf_swevent_start_hrtimer(struct perf_event *event)
  4271. {
  4272. struct hw_perf_event *hwc = &event->hw;
  4273. s64 period;
  4274. if (!is_sampling_event(event))
  4275. return;
  4276. period = local64_read(&hwc->period_left);
  4277. if (period) {
  4278. if (period < 0)
  4279. period = 10000;
  4280. local64_set(&hwc->period_left, 0);
  4281. } else {
  4282. period = max_t(u64, 10000, hwc->sample_period);
  4283. }
  4284. __hrtimer_start_range_ns(&hwc->hrtimer,
  4285. ns_to_ktime(period), 0,
  4286. HRTIMER_MODE_REL_PINNED, 0);
  4287. }
  4288. static void perf_swevent_cancel_hrtimer(struct perf_event *event)
  4289. {
  4290. struct hw_perf_event *hwc = &event->hw;
  4291. if (is_sampling_event(event)) {
  4292. ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer);
  4293. local64_set(&hwc->period_left, ktime_to_ns(remaining));
  4294. hrtimer_cancel(&hwc->hrtimer);
  4295. }
  4296. }
  4297. static void perf_swevent_init_hrtimer(struct perf_event *event)
  4298. {
  4299. struct hw_perf_event *hwc = &event->hw;
  4300. if (!is_sampling_event(event))
  4301. return;
  4302. hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  4303. hwc->hrtimer.function = perf_swevent_hrtimer;
  4304. /*
  4305. * Since hrtimers have a fixed rate, we can do a static freq->period
  4306. * mapping and avoid the whole period adjust feedback stuff.
  4307. */
  4308. if (event->attr.freq) {
  4309. long freq = event->attr.sample_freq;
  4310. event->attr.sample_period = NSEC_PER_SEC / freq;
  4311. hwc->sample_period = event->attr.sample_period;
  4312. local64_set(&hwc->period_left, hwc->sample_period);
  4313. event->attr.freq = 0;
  4314. }
  4315. }
  4316. /*
  4317. * Software event: cpu wall time clock
  4318. */
  4319. static void cpu_clock_event_update(struct perf_event *event)
  4320. {
  4321. s64 prev;
  4322. u64 now;
  4323. now = local_clock();
  4324. prev = local64_xchg(&event->hw.prev_count, now);
  4325. local64_add(now - prev, &event->count);
  4326. }
  4327. static void cpu_clock_event_start(struct perf_event *event, int flags)
  4328. {
  4329. local64_set(&event->hw.prev_count, local_clock());
  4330. perf_swevent_start_hrtimer(event);
  4331. }
  4332. static void cpu_clock_event_stop(struct perf_event *event, int flags)
  4333. {
  4334. perf_swevent_cancel_hrtimer(event);
  4335. cpu_clock_event_update(event);
  4336. }
  4337. static int cpu_clock_event_add(struct perf_event *event, int flags)
  4338. {
  4339. if (flags & PERF_EF_START)
  4340. cpu_clock_event_start(event, flags);
  4341. return 0;
  4342. }
  4343. static void cpu_clock_event_del(struct perf_event *event, int flags)
  4344. {
  4345. cpu_clock_event_stop(event, flags);
  4346. }
  4347. static void cpu_clock_event_read(struct perf_event *event)
  4348. {
  4349. cpu_clock_event_update(event);
  4350. }
  4351. static int cpu_clock_event_init(struct perf_event *event)
  4352. {
  4353. if (event->attr.type != PERF_TYPE_SOFTWARE)
  4354. return -ENOENT;
  4355. if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK)
  4356. return -ENOENT;
  4357. perf_swevent_init_hrtimer(event);
  4358. return 0;
  4359. }
  4360. static struct pmu perf_cpu_clock = {
  4361. .task_ctx_nr = perf_sw_context,
  4362. .event_init = cpu_clock_event_init,
  4363. .add = cpu_clock_event_add,
  4364. .del = cpu_clock_event_del,
  4365. .start = cpu_clock_event_start,
  4366. .stop = cpu_clock_event_stop,
  4367. .read = cpu_clock_event_read,
  4368. };
  4369. /*
  4370. * Software event: task time clock
  4371. */
  4372. static void task_clock_event_update(struct perf_event *event, u64 now)
  4373. {
  4374. u64 prev;
  4375. s64 delta;
  4376. prev = local64_xchg(&event->hw.prev_count, now);
  4377. delta = now - prev;
  4378. local64_add(delta, &event->count);
  4379. }
  4380. static void task_clock_event_start(struct perf_event *event, int flags)
  4381. {
  4382. local64_set(&event->hw.prev_count, event->ctx->time);
  4383. perf_swevent_start_hrtimer(event);
  4384. }
  4385. static void task_clock_event_stop(struct perf_event *event, int flags)
  4386. {
  4387. perf_swevent_cancel_hrtimer(event);
  4388. task_clock_event_update(event, event->ctx->time);
  4389. }
  4390. static int task_clock_event_add(struct perf_event *event, int flags)
  4391. {
  4392. if (flags & PERF_EF_START)
  4393. task_clock_event_start(event, flags);
  4394. return 0;
  4395. }
  4396. static void task_clock_event_del(struct perf_event *event, int flags)
  4397. {
  4398. task_clock_event_stop(event, PERF_EF_UPDATE);
  4399. }
  4400. static void task_clock_event_read(struct perf_event *event)
  4401. {
  4402. u64 now = perf_clock();
  4403. u64 delta = now - event->ctx->timestamp;
  4404. u64 time = event->ctx->time + delta;
  4405. task_clock_event_update(event, time);
  4406. }
  4407. static int task_clock_event_init(struct perf_event *event)
  4408. {
  4409. if (event->attr.type != PERF_TYPE_SOFTWARE)
  4410. return -ENOENT;
  4411. if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK)
  4412. return -ENOENT;
  4413. perf_swevent_init_hrtimer(event);
  4414. return 0;
  4415. }
  4416. static struct pmu perf_task_clock = {
  4417. .task_ctx_nr = perf_sw_context,
  4418. .event_init = task_clock_event_init,
  4419. .add = task_clock_event_add,
  4420. .del = task_clock_event_del,
  4421. .start = task_clock_event_start,
  4422. .stop = task_clock_event_stop,
  4423. .read = task_clock_event_read,
  4424. };
  4425. static void perf_pmu_nop_void(struct pmu *pmu)
  4426. {
  4427. }
  4428. static int perf_pmu_nop_int(struct pmu *pmu)
  4429. {
  4430. return 0;
  4431. }
  4432. static void perf_pmu_start_txn(struct pmu *pmu)
  4433. {
  4434. perf_pmu_disable(pmu);
  4435. }
  4436. static int perf_pmu_commit_txn(struct pmu *pmu)
  4437. {
  4438. perf_pmu_enable(pmu);
  4439. return 0;
  4440. }
  4441. static void perf_pmu_cancel_txn(struct pmu *pmu)
  4442. {
  4443. perf_pmu_enable(pmu);
  4444. }
  4445. /*
  4446. * Ensures all contexts with the same task_ctx_nr have the same
  4447. * pmu_cpu_context too.
  4448. */
  4449. static void *find_pmu_context(int ctxn)
  4450. {
  4451. struct pmu *pmu;
  4452. if (ctxn < 0)
  4453. return NULL;
  4454. list_for_each_entry(pmu, &pmus, entry) {
  4455. if (pmu->task_ctx_nr == ctxn)
  4456. return pmu->pmu_cpu_context;
  4457. }
  4458. return NULL;
  4459. }
  4460. static void update_pmu_context(struct pmu *pmu, struct pmu *old_pmu)
  4461. {
  4462. int cpu;
  4463. for_each_possible_cpu(cpu) {
  4464. struct perf_cpu_context *cpuctx;
  4465. cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
  4466. if (cpuctx->active_pmu == old_pmu)
  4467. cpuctx->active_pmu = pmu;
  4468. }
  4469. }
  4470. static void free_pmu_context(struct pmu *pmu)
  4471. {
  4472. struct pmu *i;
  4473. mutex_lock(&pmus_lock);
  4474. /*
  4475. * Like a real lame refcount.
  4476. */
  4477. list_for_each_entry(i, &pmus, entry) {
  4478. if (i->pmu_cpu_context == pmu->pmu_cpu_context) {
  4479. update_pmu_context(i, pmu);
  4480. goto out;
  4481. }
  4482. }
  4483. free_percpu(pmu->pmu_cpu_context);
  4484. out:
  4485. mutex_unlock(&pmus_lock);
  4486. }
  4487. static struct idr pmu_idr;
  4488. static ssize_t
  4489. type_show(struct device *dev, struct device_attribute *attr, char *page)
  4490. {
  4491. struct pmu *pmu = dev_get_drvdata(dev);
  4492. return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->type);
  4493. }
  4494. static struct device_attribute pmu_dev_attrs[] = {
  4495. __ATTR_RO(type),
  4496. __ATTR_NULL,
  4497. };
  4498. static int pmu_bus_running;
  4499. static struct bus_type pmu_bus = {
  4500. .name = "event_source",
  4501. .dev_attrs = pmu_dev_attrs,
  4502. };
  4503. static void pmu_dev_release(struct device *dev)
  4504. {
  4505. kfree(dev);
  4506. }
  4507. static int pmu_dev_alloc(struct pmu *pmu)
  4508. {
  4509. int ret = -ENOMEM;
  4510. pmu->dev = kzalloc(sizeof(struct device), GFP_KERNEL);
  4511. if (!pmu->dev)
  4512. goto out;
  4513. device_initialize(pmu->dev);
  4514. ret = dev_set_name(pmu->dev, "%s", pmu->name);
  4515. if (ret)
  4516. goto free_dev;
  4517. dev_set_drvdata(pmu->dev, pmu);
  4518. pmu->dev->bus = &pmu_bus;
  4519. pmu->dev->release = pmu_dev_release;
  4520. ret = device_add(pmu->dev);
  4521. if (ret)
  4522. goto free_dev;
  4523. out:
  4524. return ret;
  4525. free_dev:
  4526. put_device(pmu->dev);
  4527. goto out;
  4528. }
  4529. static struct lock_class_key cpuctx_mutex;
  4530. static struct lock_class_key cpuctx_lock;
  4531. int perf_pmu_register(struct pmu *pmu, char *name, int type)
  4532. {
  4533. int cpu, ret;
  4534. mutex_lock(&pmus_lock);
  4535. ret = -ENOMEM;
  4536. pmu->pmu_disable_count = alloc_percpu(int);
  4537. if (!pmu->pmu_disable_count)
  4538. goto unlock;
  4539. pmu->type = -1;
  4540. if (!name)
  4541. goto skip_type;
  4542. pmu->name = name;
  4543. if (type < 0) {
  4544. int err = idr_pre_get(&pmu_idr, GFP_KERNEL);
  4545. if (!err)
  4546. goto free_pdc;
  4547. err = idr_get_new_above(&pmu_idr, pmu, PERF_TYPE_MAX, &type);
  4548. if (err) {
  4549. ret = err;
  4550. goto free_pdc;
  4551. }
  4552. }
  4553. pmu->type = type;
  4554. if (pmu_bus_running) {
  4555. ret = pmu_dev_alloc(pmu);
  4556. if (ret)
  4557. goto free_idr;
  4558. }
  4559. skip_type:
  4560. pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr);
  4561. if (pmu->pmu_cpu_context)
  4562. goto got_cpu_context;
  4563. pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context);
  4564. if (!pmu->pmu_cpu_context)
  4565. goto free_dev;
  4566. for_each_possible_cpu(cpu) {
  4567. struct perf_cpu_context *cpuctx;
  4568. cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
  4569. __perf_event_init_context(&cpuctx->ctx);
  4570. lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex);
  4571. lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock);
  4572. cpuctx->ctx.type = cpu_context;
  4573. cpuctx->ctx.pmu = pmu;
  4574. cpuctx->jiffies_interval = 1;
  4575. INIT_LIST_HEAD(&cpuctx->rotation_list);
  4576. cpuctx->active_pmu = pmu;
  4577. }
  4578. got_cpu_context:
  4579. if (!pmu->start_txn) {
  4580. if (pmu->pmu_enable) {
  4581. /*
  4582. * If we have pmu_enable/pmu_disable calls, install
  4583. * transaction stubs that use that to try and batch
  4584. * hardware accesses.
  4585. */
  4586. pmu->start_txn = perf_pmu_start_txn;
  4587. pmu->commit_txn = perf_pmu_commit_txn;
  4588. pmu->cancel_txn = perf_pmu_cancel_txn;
  4589. } else {
  4590. pmu->start_txn = perf_pmu_nop_void;
  4591. pmu->commit_txn = perf_pmu_nop_int;
  4592. pmu->cancel_txn = perf_pmu_nop_void;
  4593. }
  4594. }
  4595. if (!pmu->pmu_enable) {
  4596. pmu->pmu_enable = perf_pmu_nop_void;
  4597. pmu->pmu_disable = perf_pmu_nop_void;
  4598. }
  4599. list_add_rcu(&pmu->entry, &pmus);
  4600. ret = 0;
  4601. unlock:
  4602. mutex_unlock(&pmus_lock);
  4603. return ret;
  4604. free_dev:
  4605. device_del(pmu->dev);
  4606. put_device(pmu->dev);
  4607. free_idr:
  4608. if (pmu->type >= PERF_TYPE_MAX)
  4609. idr_remove(&pmu_idr, pmu->type);
  4610. free_pdc:
  4611. free_percpu(pmu->pmu_disable_count);
  4612. goto unlock;
  4613. }
  4614. void perf_pmu_unregister(struct pmu *pmu)
  4615. {
  4616. mutex_lock(&pmus_lock);
  4617. list_del_rcu(&pmu->entry);
  4618. mutex_unlock(&pmus_lock);
  4619. /*
  4620. * We dereference the pmu list under both SRCU and regular RCU, so
  4621. * synchronize against both of those.
  4622. */
  4623. synchronize_srcu(&pmus_srcu);
  4624. synchronize_rcu();
  4625. free_percpu(pmu->pmu_disable_count);
  4626. if (pmu->type >= PERF_TYPE_MAX)
  4627. idr_remove(&pmu_idr, pmu->type);
  4628. device_del(pmu->dev);
  4629. put_device(pmu->dev);
  4630. free_pmu_context(pmu);
  4631. }
  4632. struct pmu *perf_init_event(struct perf_event *event)
  4633. {
  4634. struct pmu *pmu = NULL;
  4635. int idx;
  4636. int ret;
  4637. idx = srcu_read_lock(&pmus_srcu);
  4638. rcu_read_lock();
  4639. pmu = idr_find(&pmu_idr, event->attr.type);
  4640. rcu_read_unlock();
  4641. if (pmu) {
  4642. event->pmu = pmu;
  4643. ret = pmu->event_init(event);
  4644. if (ret)
  4645. pmu = ERR_PTR(ret);
  4646. goto unlock;
  4647. }
  4648. list_for_each_entry_rcu(pmu, &pmus, entry) {
  4649. event->pmu = pmu;
  4650. ret = pmu->event_init(event);
  4651. if (!ret)
  4652. goto unlock;
  4653. if (ret != -ENOENT) {
  4654. pmu = ERR_PTR(ret);
  4655. goto unlock;
  4656. }
  4657. }
  4658. pmu = ERR_PTR(-ENOENT);
  4659. unlock:
  4660. srcu_read_unlock(&pmus_srcu, idx);
  4661. return pmu;
  4662. }
  4663. /*
  4664. * Allocate and initialize a event structure
  4665. */
  4666. static struct perf_event *
  4667. perf_event_alloc(struct perf_event_attr *attr, int cpu,
  4668. struct task_struct *task,
  4669. struct perf_event *group_leader,
  4670. struct perf_event *parent_event,
  4671. perf_overflow_handler_t overflow_handler,
  4672. void *context)
  4673. {
  4674. struct pmu *pmu;
  4675. struct perf_event *event;
  4676. struct hw_perf_event *hwc;
  4677. long err;
  4678. if ((unsigned)cpu >= nr_cpu_ids) {
  4679. if (!task || cpu != -1)
  4680. return ERR_PTR(-EINVAL);
  4681. }
  4682. event = kzalloc(sizeof(*event), GFP_KERNEL);
  4683. if (!event)
  4684. return ERR_PTR(-ENOMEM);
  4685. /*
  4686. * Single events are their own group leaders, with an
  4687. * empty sibling list:
  4688. */
  4689. if (!group_leader)
  4690. group_leader = event;
  4691. mutex_init(&event->child_mutex);
  4692. INIT_LIST_HEAD(&event->child_list);
  4693. INIT_LIST_HEAD(&event->group_entry);
  4694. INIT_LIST_HEAD(&event->event_entry);
  4695. INIT_LIST_HEAD(&event->sibling_list);
  4696. INIT_LIST_HEAD(&event->rb_entry);
  4697. init_waitqueue_head(&event->waitq);
  4698. init_irq_work(&event->pending, perf_pending_event);
  4699. mutex_init(&event->mmap_mutex);
  4700. event->cpu = cpu;
  4701. event->attr = *attr;
  4702. event->group_leader = group_leader;
  4703. event->pmu = NULL;
  4704. event->oncpu = -1;
  4705. event->parent = parent_event;
  4706. event->ns = get_pid_ns(current->nsproxy->pid_ns);
  4707. event->id = atomic64_inc_return(&perf_event_id);
  4708. event->state = PERF_EVENT_STATE_INACTIVE;
  4709. if (task) {
  4710. event->attach_state = PERF_ATTACH_TASK;
  4711. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  4712. /*
  4713. * hw_breakpoint is a bit difficult here..
  4714. */
  4715. if (attr->type == PERF_TYPE_BREAKPOINT)
  4716. event->hw.bp_target = task;
  4717. #endif
  4718. }
  4719. if (!overflow_handler && parent_event) {
  4720. overflow_handler = parent_event->overflow_handler;
  4721. context = parent_event->overflow_handler_context;
  4722. }
  4723. event->overflow_handler = overflow_handler;
  4724. event->overflow_handler_context = context;
  4725. if (attr->disabled)
  4726. event->state = PERF_EVENT_STATE_OFF;
  4727. pmu = NULL;
  4728. hwc = &event->hw;
  4729. hwc->sample_period = attr->sample_period;
  4730. if (attr->freq && attr->sample_freq)
  4731. hwc->sample_period = 1;
  4732. hwc->last_period = hwc->sample_period;
  4733. local64_set(&hwc->period_left, hwc->sample_period);
  4734. /*
  4735. * we currently do not support PERF_FORMAT_GROUP on inherited events
  4736. */
  4737. if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
  4738. goto done;
  4739. pmu = perf_init_event(event);
  4740. done:
  4741. err = 0;
  4742. if (!pmu)
  4743. err = -EINVAL;
  4744. else if (IS_ERR(pmu))
  4745. err = PTR_ERR(pmu);
  4746. if (err) {
  4747. if (event->ns)
  4748. put_pid_ns(event->ns);
  4749. kfree(event);
  4750. return ERR_PTR(err);
  4751. }
  4752. if (!event->parent) {
  4753. if (event->attach_state & PERF_ATTACH_TASK)
  4754. jump_label_inc(&perf_sched_events.key);
  4755. if (event->attr.mmap || event->attr.mmap_data)
  4756. atomic_inc(&nr_mmap_events);
  4757. if (event->attr.comm)
  4758. atomic_inc(&nr_comm_events);
  4759. if (event->attr.task)
  4760. atomic_inc(&nr_task_events);
  4761. if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
  4762. err = get_callchain_buffers();
  4763. if (err) {
  4764. free_event(event);
  4765. return ERR_PTR(err);
  4766. }
  4767. }
  4768. }
  4769. return event;
  4770. }
  4771. static int perf_copy_attr(struct perf_event_attr __user *uattr,
  4772. struct perf_event_attr *attr)
  4773. {
  4774. u32 size;
  4775. int ret;
  4776. if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0))
  4777. return -EFAULT;
  4778. /*
  4779. * zero the full structure, so that a short copy will be nice.
  4780. */
  4781. memset(attr, 0, sizeof(*attr));
  4782. ret = get_user(size, &uattr->size);
  4783. if (ret)
  4784. return ret;
  4785. if (size > PAGE_SIZE) /* silly large */
  4786. goto err_size;
  4787. if (!size) /* abi compat */
  4788. size = PERF_ATTR_SIZE_VER0;
  4789. if (size < PERF_ATTR_SIZE_VER0)
  4790. goto err_size;
  4791. /*
  4792. * If we're handed a bigger struct than we know of,
  4793. * ensure all the unknown bits are 0 - i.e. new
  4794. * user-space does not rely on any kernel feature
  4795. * extensions we dont know about yet.
  4796. */
  4797. if (size > sizeof(*attr)) {
  4798. unsigned char __user *addr;
  4799. unsigned char __user *end;
  4800. unsigned char val;
  4801. addr = (void __user *)uattr + sizeof(*attr);
  4802. end = (void __user *)uattr + size;
  4803. for (; addr < end; addr++) {
  4804. ret = get_user(val, addr);
  4805. if (ret)
  4806. return ret;
  4807. if (val)
  4808. goto err_size;
  4809. }
  4810. size = sizeof(*attr);
  4811. }
  4812. ret = copy_from_user(attr, uattr, size);
  4813. if (ret)
  4814. return -EFAULT;
  4815. if (attr->__reserved_1)
  4816. return -EINVAL;
  4817. if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
  4818. return -EINVAL;
  4819. if (attr->read_format & ~(PERF_FORMAT_MAX-1))
  4820. return -EINVAL;
  4821. out:
  4822. return ret;
  4823. err_size:
  4824. put_user(sizeof(*attr), &uattr->size);
  4825. ret = -E2BIG;
  4826. goto out;
  4827. }
  4828. static int
  4829. perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
  4830. {
  4831. struct ring_buffer *rb = NULL, *old_rb = NULL;
  4832. int ret = -EINVAL;
  4833. if (!output_event)
  4834. goto set;
  4835. /* don't allow circular references */
  4836. if (event == output_event)
  4837. goto out;
  4838. /*
  4839. * Don't allow cross-cpu buffers
  4840. */
  4841. if (output_event->cpu != event->cpu)
  4842. goto out;
  4843. /*
  4844. * If its not a per-cpu rb, it must be the same task.
  4845. */
  4846. if (output_event->cpu == -1 && output_event->ctx != event->ctx)
  4847. goto out;
  4848. set:
  4849. mutex_lock(&event->mmap_mutex);
  4850. /* Can't redirect output if we've got an active mmap() */
  4851. if (atomic_read(&event->mmap_count))
  4852. goto unlock;
  4853. if (output_event) {
  4854. /* get the rb we want to redirect to */
  4855. rb = ring_buffer_get(output_event);
  4856. if (!rb)
  4857. goto unlock;
  4858. }
  4859. old_rb = event->rb;
  4860. rcu_assign_pointer(event->rb, rb);
  4861. if (old_rb)
  4862. ring_buffer_detach(event, old_rb);
  4863. ret = 0;
  4864. unlock:
  4865. mutex_unlock(&event->mmap_mutex);
  4866. if (old_rb)
  4867. ring_buffer_put(old_rb);
  4868. out:
  4869. return ret;
  4870. }
  4871. /**
  4872. * sys_perf_event_open - open a performance event, associate it to a task/cpu
  4873. *
  4874. * @attr_uptr: event_id type attributes for monitoring/sampling
  4875. * @pid: target pid
  4876. * @cpu: target cpu
  4877. * @group_fd: group leader event fd
  4878. */
  4879. SYSCALL_DEFINE5(perf_event_open,
  4880. struct perf_event_attr __user *, attr_uptr,
  4881. pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
  4882. {
  4883. struct perf_event *group_leader = NULL, *output_event = NULL;
  4884. struct perf_event *event, *sibling;
  4885. struct perf_event_attr attr;
  4886. struct perf_event_context *ctx;
  4887. struct file *event_file = NULL;
  4888. struct file *group_file = NULL;
  4889. struct task_struct *task = NULL;
  4890. struct pmu *pmu;
  4891. int event_fd;
  4892. int move_group = 0;
  4893. int fput_needed = 0;
  4894. int err;
  4895. /* for future expandability... */
  4896. if (flags & ~PERF_FLAG_ALL)
  4897. return -EINVAL;
  4898. err = perf_copy_attr(attr_uptr, &attr);
  4899. if (err)
  4900. return err;
  4901. if (!attr.exclude_kernel) {
  4902. if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
  4903. return -EACCES;
  4904. }
  4905. if (attr.freq) {
  4906. if (attr.sample_freq > sysctl_perf_event_sample_rate)
  4907. return -EINVAL;
  4908. }
  4909. /*
  4910. * In cgroup mode, the pid argument is used to pass the fd
  4911. * opened to the cgroup directory in cgroupfs. The cpu argument
  4912. * designates the cpu on which to monitor threads from that
  4913. * cgroup.
  4914. */
  4915. if ((flags & PERF_FLAG_PID_CGROUP) && (pid == -1 || cpu == -1))
  4916. return -EINVAL;
  4917. event_fd = get_unused_fd_flags(O_RDWR);
  4918. if (event_fd < 0)
  4919. return event_fd;
  4920. if (group_fd != -1) {
  4921. group_leader = perf_fget_light(group_fd, &fput_needed);
  4922. if (IS_ERR(group_leader)) {
  4923. err = PTR_ERR(group_leader);
  4924. goto err_fd;
  4925. }
  4926. group_file = group_leader->filp;
  4927. if (flags & PERF_FLAG_FD_OUTPUT)
  4928. output_event = group_leader;
  4929. if (flags & PERF_FLAG_FD_NO_GROUP)
  4930. group_leader = NULL;
  4931. }
  4932. if (pid != -1 && !(flags & PERF_FLAG_PID_CGROUP)) {
  4933. task = find_lively_task_by_vpid(pid);
  4934. if (IS_ERR(task)) {
  4935. err = PTR_ERR(task);
  4936. goto err_group_fd;
  4937. }
  4938. }
  4939. event = perf_event_alloc(&attr, cpu, task, group_leader, NULL,
  4940. NULL, NULL);
  4941. if (IS_ERR(event)) {
  4942. err = PTR_ERR(event);
  4943. goto err_task;
  4944. }
  4945. if (flags & PERF_FLAG_PID_CGROUP) {
  4946. err = perf_cgroup_connect(pid, event, &attr, group_leader);
  4947. if (err)
  4948. goto err_alloc;
  4949. /*
  4950. * one more event:
  4951. * - that has cgroup constraint on event->cpu
  4952. * - that may need work on context switch
  4953. */
  4954. atomic_inc(&per_cpu(perf_cgroup_events, event->cpu));
  4955. jump_label_inc(&perf_sched_events.key);
  4956. }
  4957. /*
  4958. * Special case software events and allow them to be part of
  4959. * any hardware group.
  4960. */
  4961. pmu = event->pmu;
  4962. if (group_leader &&
  4963. (is_software_event(event) != is_software_event(group_leader))) {
  4964. if (is_software_event(event)) {
  4965. /*
  4966. * If event and group_leader are not both a software
  4967. * event, and event is, then group leader is not.
  4968. *
  4969. * Allow the addition of software events to !software
  4970. * groups, this is safe because software events never
  4971. * fail to schedule.
  4972. */
  4973. pmu = group_leader->pmu;
  4974. } else if (is_software_event(group_leader) &&
  4975. (group_leader->group_flags & PERF_GROUP_SOFTWARE)) {
  4976. /*
  4977. * In case the group is a pure software group, and we
  4978. * try to add a hardware event, move the whole group to
  4979. * the hardware context.
  4980. */
  4981. move_group = 1;
  4982. }
  4983. }
  4984. /*
  4985. * Get the target context (task or percpu):
  4986. */
  4987. ctx = find_get_context(pmu, task, cpu);
  4988. if (IS_ERR(ctx)) {
  4989. err = PTR_ERR(ctx);
  4990. goto err_alloc;
  4991. }
  4992. if (task) {
  4993. put_task_struct(task);
  4994. task = NULL;
  4995. }
  4996. /*
  4997. * Look up the group leader (we will attach this event to it):
  4998. */
  4999. if (group_leader) {
  5000. err = -EINVAL;
  5001. /*
  5002. * Do not allow a recursive hierarchy (this new sibling
  5003. * becoming part of another group-sibling):
  5004. */
  5005. if (group_leader->group_leader != group_leader)
  5006. goto err_context;
  5007. /*
  5008. * Do not allow to attach to a group in a different
  5009. * task or CPU context:
  5010. */
  5011. if (move_group) {
  5012. if (group_leader->ctx->type != ctx->type)
  5013. goto err_context;
  5014. } else {
  5015. if (group_leader->ctx != ctx)
  5016. goto err_context;
  5017. }
  5018. /*
  5019. * Only a group leader can be exclusive or pinned
  5020. */
  5021. if (attr.exclusive || attr.pinned)
  5022. goto err_context;
  5023. }
  5024. if (output_event) {
  5025. err = perf_event_set_output(event, output_event);
  5026. if (err)
  5027. goto err_context;
  5028. }
  5029. event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, O_RDWR);
  5030. if (IS_ERR(event_file)) {
  5031. err = PTR_ERR(event_file);
  5032. goto err_context;
  5033. }
  5034. if (move_group) {
  5035. struct perf_event_context *gctx = group_leader->ctx;
  5036. mutex_lock(&gctx->mutex);
  5037. perf_remove_from_context(group_leader);
  5038. list_for_each_entry(sibling, &group_leader->sibling_list,
  5039. group_entry) {
  5040. perf_remove_from_context(sibling);
  5041. put_ctx(gctx);
  5042. }
  5043. mutex_unlock(&gctx->mutex);
  5044. put_ctx(gctx);
  5045. }
  5046. event->filp = event_file;
  5047. WARN_ON_ONCE(ctx->parent_ctx);
  5048. mutex_lock(&ctx->mutex);
  5049. if (move_group) {
  5050. perf_install_in_context(ctx, group_leader, cpu);
  5051. get_ctx(ctx);
  5052. list_for_each_entry(sibling, &group_leader->sibling_list,
  5053. group_entry) {
  5054. perf_install_in_context(ctx, sibling, cpu);
  5055. get_ctx(ctx);
  5056. }
  5057. }
  5058. perf_install_in_context(ctx, event, cpu);
  5059. ++ctx->generation;
  5060. perf_unpin_context(ctx);
  5061. mutex_unlock(&ctx->mutex);
  5062. event->owner = current;
  5063. mutex_lock(&current->perf_event_mutex);
  5064. list_add_tail(&event->owner_entry, &current->perf_event_list);
  5065. mutex_unlock(&current->perf_event_mutex);
  5066. /*
  5067. * Precalculate sample_data sizes
  5068. */
  5069. perf_event__header_size(event);
  5070. perf_event__id_header_size(event);
  5071. /*
  5072. * Drop the reference on the group_event after placing the
  5073. * new event on the sibling_list. This ensures destruction
  5074. * of the group leader will find the pointer to itself in
  5075. * perf_group_detach().
  5076. */
  5077. fput_light(group_file, fput_needed);
  5078. fd_install(event_fd, event_file);
  5079. return event_fd;
  5080. err_context:
  5081. perf_unpin_context(ctx);
  5082. put_ctx(ctx);
  5083. err_alloc:
  5084. free_event(event);
  5085. err_task:
  5086. if (task)
  5087. put_task_struct(task);
  5088. err_group_fd:
  5089. fput_light(group_file, fput_needed);
  5090. err_fd:
  5091. put_unused_fd(event_fd);
  5092. return err;
  5093. }
  5094. /**
  5095. * perf_event_create_kernel_counter
  5096. *
  5097. * @attr: attributes of the counter to create
  5098. * @cpu: cpu in which the counter is bound
  5099. * @task: task to profile (NULL for percpu)
  5100. */
  5101. struct perf_event *
  5102. perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
  5103. struct task_struct *task,
  5104. perf_overflow_handler_t overflow_handler,
  5105. void *context)
  5106. {
  5107. struct perf_event_context *ctx;
  5108. struct perf_event *event;
  5109. int err;
  5110. /*
  5111. * Get the target context (task or percpu):
  5112. */
  5113. event = perf_event_alloc(attr, cpu, task, NULL, NULL,
  5114. overflow_handler, context);
  5115. if (IS_ERR(event)) {
  5116. err = PTR_ERR(event);
  5117. goto err;
  5118. }
  5119. ctx = find_get_context(event->pmu, task, cpu);
  5120. if (IS_ERR(ctx)) {
  5121. err = PTR_ERR(ctx);
  5122. goto err_free;
  5123. }
  5124. event->filp = NULL;
  5125. WARN_ON_ONCE(ctx->parent_ctx);
  5126. mutex_lock(&ctx->mutex);
  5127. perf_install_in_context(ctx, event, cpu);
  5128. ++ctx->generation;
  5129. perf_unpin_context(ctx);
  5130. mutex_unlock(&ctx->mutex);
  5131. return event;
  5132. err_free:
  5133. free_event(event);
  5134. err:
  5135. return ERR_PTR(err);
  5136. }
  5137. EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
  5138. static void sync_child_event(struct perf_event *child_event,
  5139. struct task_struct *child)
  5140. {
  5141. struct perf_event *parent_event = child_event->parent;
  5142. u64 child_val;
  5143. if (child_event->attr.inherit_stat)
  5144. perf_event_read_event(child_event, child);
  5145. child_val = perf_event_count(child_event);
  5146. /*
  5147. * Add back the child's count to the parent's count:
  5148. */
  5149. atomic64_add(child_val, &parent_event->child_count);
  5150. atomic64_add(child_event->total_time_enabled,
  5151. &parent_event->child_total_time_enabled);
  5152. atomic64_add(child_event->total_time_running,
  5153. &parent_event->child_total_time_running);
  5154. /*
  5155. * Remove this event from the parent's list
  5156. */
  5157. WARN_ON_ONCE(parent_event->ctx->parent_ctx);
  5158. mutex_lock(&parent_event->child_mutex);
  5159. list_del_init(&child_event->child_list);
  5160. mutex_unlock(&parent_event->child_mutex);
  5161. /*
  5162. * Release the parent event, if this was the last
  5163. * reference to it.
  5164. */
  5165. fput(parent_event->filp);
  5166. }
  5167. static void
  5168. __perf_event_exit_task(struct perf_event *child_event,
  5169. struct perf_event_context *child_ctx,
  5170. struct task_struct *child)
  5171. {
  5172. if (child_event->parent) {
  5173. raw_spin_lock_irq(&child_ctx->lock);
  5174. perf_group_detach(child_event);
  5175. raw_spin_unlock_irq(&child_ctx->lock);
  5176. }
  5177. perf_remove_from_context(child_event);
  5178. /*
  5179. * It can happen that the parent exits first, and has events
  5180. * that are still around due to the child reference. These
  5181. * events need to be zapped.
  5182. */
  5183. if (child_event->parent) {
  5184. sync_child_event(child_event, child);
  5185. free_event(child_event);
  5186. }
  5187. }
  5188. static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
  5189. {
  5190. struct perf_event *child_event, *tmp;
  5191. struct perf_event_context *child_ctx;
  5192. unsigned long flags;
  5193. if (likely(!child->perf_event_ctxp[ctxn])) {
  5194. perf_event_task(child, NULL, 0);
  5195. return;
  5196. }
  5197. local_irq_save(flags);
  5198. /*
  5199. * We can't reschedule here because interrupts are disabled,
  5200. * and either child is current or it is a task that can't be
  5201. * scheduled, so we are now safe from rescheduling changing
  5202. * our context.
  5203. */
  5204. child_ctx = rcu_dereference_raw(child->perf_event_ctxp[ctxn]);
  5205. /*
  5206. * Take the context lock here so that if find_get_context is
  5207. * reading child->perf_event_ctxp, we wait until it has
  5208. * incremented the context's refcount before we do put_ctx below.
  5209. */
  5210. raw_spin_lock(&child_ctx->lock);
  5211. task_ctx_sched_out(child_ctx);
  5212. child->perf_event_ctxp[ctxn] = NULL;
  5213. /*
  5214. * If this context is a clone; unclone it so it can't get
  5215. * swapped to another process while we're removing all
  5216. * the events from it.
  5217. */
  5218. unclone_ctx(child_ctx);
  5219. update_context_time(child_ctx);
  5220. raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
  5221. /*
  5222. * Report the task dead after unscheduling the events so that we
  5223. * won't get any samples after PERF_RECORD_EXIT. We can however still
  5224. * get a few PERF_RECORD_READ events.
  5225. */
  5226. perf_event_task(child, child_ctx, 0);
  5227. /*
  5228. * We can recurse on the same lock type through:
  5229. *
  5230. * __perf_event_exit_task()
  5231. * sync_child_event()
  5232. * fput(parent_event->filp)
  5233. * perf_release()
  5234. * mutex_lock(&ctx->mutex)
  5235. *
  5236. * But since its the parent context it won't be the same instance.
  5237. */
  5238. mutex_lock(&child_ctx->mutex);
  5239. again:
  5240. list_for_each_entry_safe(child_event, tmp, &child_ctx->pinned_groups,
  5241. group_entry)
  5242. __perf_event_exit_task(child_event, child_ctx, child);
  5243. list_for_each_entry_safe(child_event, tmp, &child_ctx->flexible_groups,
  5244. group_entry)
  5245. __perf_event_exit_task(child_event, child_ctx, child);
  5246. /*
  5247. * If the last event was a group event, it will have appended all
  5248. * its siblings to the list, but we obtained 'tmp' before that which
  5249. * will still point to the list head terminating the iteration.
  5250. */
  5251. if (!list_empty(&child_ctx->pinned_groups) ||
  5252. !list_empty(&child_ctx->flexible_groups))
  5253. goto again;
  5254. mutex_unlock(&child_ctx->mutex);
  5255. put_ctx(child_ctx);
  5256. }
  5257. /*
  5258. * When a child task exits, feed back event values to parent events.
  5259. */
  5260. void perf_event_exit_task(struct task_struct *child)
  5261. {
  5262. struct perf_event *event, *tmp;
  5263. int ctxn;
  5264. mutex_lock(&child->perf_event_mutex);
  5265. list_for_each_entry_safe(event, tmp, &child->perf_event_list,
  5266. owner_entry) {
  5267. list_del_init(&event->owner_entry);
  5268. /*
  5269. * Ensure the list deletion is visible before we clear
  5270. * the owner, closes a race against perf_release() where
  5271. * we need to serialize on the owner->perf_event_mutex.
  5272. */
  5273. smp_wmb();
  5274. event->owner = NULL;
  5275. }
  5276. mutex_unlock(&child->perf_event_mutex);
  5277. for_each_task_context_nr(ctxn)
  5278. perf_event_exit_task_context(child, ctxn);
  5279. }
  5280. static void perf_free_event(struct perf_event *event,
  5281. struct perf_event_context *ctx)
  5282. {
  5283. struct perf_event *parent = event->parent;
  5284. if (WARN_ON_ONCE(!parent))
  5285. return;
  5286. mutex_lock(&parent->child_mutex);
  5287. list_del_init(&event->child_list);
  5288. mutex_unlock(&parent->child_mutex);
  5289. fput(parent->filp);
  5290. perf_group_detach(event);
  5291. list_del_event(event, ctx);
  5292. free_event(event);
  5293. }
  5294. /*
  5295. * free an unexposed, unused context as created by inheritance by
  5296. * perf_event_init_task below, used by fork() in case of fail.
  5297. */
  5298. void perf_event_free_task(struct task_struct *task)
  5299. {
  5300. struct perf_event_context *ctx;
  5301. struct perf_event *event, *tmp;
  5302. int ctxn;
  5303. for_each_task_context_nr(ctxn) {
  5304. ctx = task->perf_event_ctxp[ctxn];
  5305. if (!ctx)
  5306. continue;
  5307. mutex_lock(&ctx->mutex);
  5308. again:
  5309. list_for_each_entry_safe(event, tmp, &ctx->pinned_groups,
  5310. group_entry)
  5311. perf_free_event(event, ctx);
  5312. list_for_each_entry_safe(event, tmp, &ctx->flexible_groups,
  5313. group_entry)
  5314. perf_free_event(event, ctx);
  5315. if (!list_empty(&ctx->pinned_groups) ||
  5316. !list_empty(&ctx->flexible_groups))
  5317. goto again;
  5318. mutex_unlock(&ctx->mutex);
  5319. put_ctx(ctx);
  5320. }
  5321. }
  5322. void perf_event_delayed_put(struct task_struct *task)
  5323. {
  5324. int ctxn;
  5325. for_each_task_context_nr(ctxn)
  5326. WARN_ON_ONCE(task->perf_event_ctxp[ctxn]);
  5327. }
  5328. /*
  5329. * inherit a event from parent task to child task:
  5330. */
  5331. static struct perf_event *
  5332. inherit_event(struct perf_event *parent_event,
  5333. struct task_struct *parent,
  5334. struct perf_event_context *parent_ctx,
  5335. struct task_struct *child,
  5336. struct perf_event *group_leader,
  5337. struct perf_event_context *child_ctx)
  5338. {
  5339. struct perf_event *child_event;
  5340. unsigned long flags;
  5341. /*
  5342. * Instead of creating recursive hierarchies of events,
  5343. * we link inherited events back to the original parent,
  5344. * which has a filp for sure, which we use as the reference
  5345. * count:
  5346. */
  5347. if (parent_event->parent)
  5348. parent_event = parent_event->parent;
  5349. child_event = perf_event_alloc(&parent_event->attr,
  5350. parent_event->cpu,
  5351. child,
  5352. group_leader, parent_event,
  5353. NULL, NULL);
  5354. if (IS_ERR(child_event))
  5355. return child_event;
  5356. get_ctx(child_ctx);
  5357. /*
  5358. * Make the child state follow the state of the parent event,
  5359. * not its attr.disabled bit. We hold the parent's mutex,
  5360. * so we won't race with perf_event_{en, dis}able_family.
  5361. */
  5362. if (parent_event->state >= PERF_EVENT_STATE_INACTIVE)
  5363. child_event->state = PERF_EVENT_STATE_INACTIVE;
  5364. else
  5365. child_event->state = PERF_EVENT_STATE_OFF;
  5366. if (parent_event->attr.freq) {
  5367. u64 sample_period = parent_event->hw.sample_period;
  5368. struct hw_perf_event *hwc = &child_event->hw;
  5369. hwc->sample_period = sample_period;
  5370. hwc->last_period = sample_period;
  5371. local64_set(&hwc->period_left, sample_period);
  5372. }
  5373. child_event->ctx = child_ctx;
  5374. child_event->overflow_handler = parent_event->overflow_handler;
  5375. child_event->overflow_handler_context
  5376. = parent_event->overflow_handler_context;
  5377. /*
  5378. * Precalculate sample_data sizes
  5379. */
  5380. perf_event__header_size(child_event);
  5381. perf_event__id_header_size(child_event);
  5382. /*
  5383. * Link it up in the child's context:
  5384. */
  5385. raw_spin_lock_irqsave(&child_ctx->lock, flags);
  5386. add_event_to_ctx(child_event, child_ctx);
  5387. raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
  5388. /*
  5389. * Get a reference to the parent filp - we will fput it
  5390. * when the child event exits. This is safe to do because
  5391. * we are in the parent and we know that the filp still
  5392. * exists and has a nonzero count:
  5393. */
  5394. atomic_long_inc(&parent_event->filp->f_count);
  5395. /*
  5396. * Link this into the parent event's child list
  5397. */
  5398. WARN_ON_ONCE(parent_event->ctx->parent_ctx);
  5399. mutex_lock(&parent_event->child_mutex);
  5400. list_add_tail(&child_event->child_list, &parent_event->child_list);
  5401. mutex_unlock(&parent_event->child_mutex);
  5402. return child_event;
  5403. }
  5404. static int inherit_group(struct perf_event *parent_event,
  5405. struct task_struct *parent,
  5406. struct perf_event_context *parent_ctx,
  5407. struct task_struct *child,
  5408. struct perf_event_context *child_ctx)
  5409. {
  5410. struct perf_event *leader;
  5411. struct perf_event *sub;
  5412. struct perf_event *child_ctr;
  5413. leader = inherit_event(parent_event, parent, parent_ctx,
  5414. child, NULL, child_ctx);
  5415. if (IS_ERR(leader))
  5416. return PTR_ERR(leader);
  5417. list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
  5418. child_ctr = inherit_event(sub, parent, parent_ctx,
  5419. child, leader, child_ctx);
  5420. if (IS_ERR(child_ctr))
  5421. return PTR_ERR(child_ctr);
  5422. }
  5423. return 0;
  5424. }
  5425. static int
  5426. inherit_task_group(struct perf_event *event, struct task_struct *parent,
  5427. struct perf_event_context *parent_ctx,
  5428. struct task_struct *child, int ctxn,
  5429. int *inherited_all)
  5430. {
  5431. int ret;
  5432. struct perf_event_context *child_ctx;
  5433. if (!event->attr.inherit) {
  5434. *inherited_all = 0;
  5435. return 0;
  5436. }
  5437. child_ctx = child->perf_event_ctxp[ctxn];
  5438. if (!child_ctx) {
  5439. /*
  5440. * This is executed from the parent task context, so
  5441. * inherit events that have been marked for cloning.
  5442. * First allocate and initialize a context for the
  5443. * child.
  5444. */
  5445. child_ctx = alloc_perf_context(event->pmu, child);
  5446. if (!child_ctx)
  5447. return -ENOMEM;
  5448. child->perf_event_ctxp[ctxn] = child_ctx;
  5449. }
  5450. ret = inherit_group(event, parent, parent_ctx,
  5451. child, child_ctx);
  5452. if (ret)
  5453. *inherited_all = 0;
  5454. return ret;
  5455. }
  5456. /*
  5457. * Initialize the perf_event context in task_struct
  5458. */
  5459. int perf_event_init_context(struct task_struct *child, int ctxn)
  5460. {
  5461. struct perf_event_context *child_ctx, *parent_ctx;
  5462. struct perf_event_context *cloned_ctx;
  5463. struct perf_event *event;
  5464. struct task_struct *parent = current;
  5465. int inherited_all = 1;
  5466. unsigned long flags;
  5467. int ret = 0;
  5468. if (likely(!parent->perf_event_ctxp[ctxn]))
  5469. return 0;
  5470. /*
  5471. * If the parent's context is a clone, pin it so it won't get
  5472. * swapped under us.
  5473. */
  5474. parent_ctx = perf_pin_task_context(parent, ctxn);
  5475. /*
  5476. * No need to check if parent_ctx != NULL here; since we saw
  5477. * it non-NULL earlier, the only reason for it to become NULL
  5478. * is if we exit, and since we're currently in the middle of
  5479. * a fork we can't be exiting at the same time.
  5480. */
  5481. /*
  5482. * Lock the parent list. No need to lock the child - not PID
  5483. * hashed yet and not running, so nobody can access it.
  5484. */
  5485. mutex_lock(&parent_ctx->mutex);
  5486. /*
  5487. * We dont have to disable NMIs - we are only looking at
  5488. * the list, not manipulating it:
  5489. */
  5490. list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) {
  5491. ret = inherit_task_group(event, parent, parent_ctx,
  5492. child, ctxn, &inherited_all);
  5493. if (ret)
  5494. break;
  5495. }
  5496. /*
  5497. * We can't hold ctx->lock when iterating the ->flexible_group list due
  5498. * to allocations, but we need to prevent rotation because
  5499. * rotate_ctx() will change the list from interrupt context.
  5500. */
  5501. raw_spin_lock_irqsave(&parent_ctx->lock, flags);
  5502. parent_ctx->rotate_disable = 1;
  5503. raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
  5504. list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) {
  5505. ret = inherit_task_group(event, parent, parent_ctx,
  5506. child, ctxn, &inherited_all);
  5507. if (ret)
  5508. break;
  5509. }
  5510. raw_spin_lock_irqsave(&parent_ctx->lock, flags);
  5511. parent_ctx->rotate_disable = 0;
  5512. child_ctx = child->perf_event_ctxp[ctxn];
  5513. if (child_ctx && inherited_all) {
  5514. /*
  5515. * Mark the child context as a clone of the parent
  5516. * context, or of whatever the parent is a clone of.
  5517. *
  5518. * Note that if the parent is a clone, the holding of
  5519. * parent_ctx->lock avoids it from being uncloned.
  5520. */
  5521. cloned_ctx = parent_ctx->parent_ctx;
  5522. if (cloned_ctx) {
  5523. child_ctx->parent_ctx = cloned_ctx;
  5524. child_ctx->parent_gen = parent_ctx->parent_gen;
  5525. } else {
  5526. child_ctx->parent_ctx = parent_ctx;
  5527. child_ctx->parent_gen = parent_ctx->generation;
  5528. }
  5529. get_ctx(child_ctx->parent_ctx);
  5530. }
  5531. raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
  5532. mutex_unlock(&parent_ctx->mutex);
  5533. perf_unpin_context(parent_ctx);
  5534. put_ctx(parent_ctx);
  5535. return ret;
  5536. }
  5537. /*
  5538. * Initialize the perf_event context in task_struct
  5539. */
  5540. int perf_event_init_task(struct task_struct *child)
  5541. {
  5542. int ctxn, ret;
  5543. memset(child->perf_event_ctxp, 0, sizeof(child->perf_event_ctxp));
  5544. mutex_init(&child->perf_event_mutex);
  5545. INIT_LIST_HEAD(&child->perf_event_list);
  5546. for_each_task_context_nr(ctxn) {
  5547. ret = perf_event_init_context(child, ctxn);
  5548. if (ret)
  5549. return ret;
  5550. }
  5551. return 0;
  5552. }
  5553. static void __init perf_event_init_all_cpus(void)
  5554. {
  5555. struct swevent_htable *swhash;
  5556. int cpu;
  5557. for_each_possible_cpu(cpu) {
  5558. swhash = &per_cpu(swevent_htable, cpu);
  5559. mutex_init(&swhash->hlist_mutex);
  5560. INIT_LIST_HEAD(&per_cpu(rotation_list, cpu));
  5561. }
  5562. }
  5563. static void __cpuinit perf_event_init_cpu(int cpu)
  5564. {
  5565. struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
  5566. mutex_lock(&swhash->hlist_mutex);
  5567. if (swhash->hlist_refcount > 0) {
  5568. struct swevent_hlist *hlist;
  5569. hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu));
  5570. WARN_ON(!hlist);
  5571. rcu_assign_pointer(swhash->swevent_hlist, hlist);
  5572. }
  5573. mutex_unlock(&swhash->hlist_mutex);
  5574. }
  5575. #if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC
  5576. static void perf_pmu_rotate_stop(struct pmu *pmu)
  5577. {
  5578. struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
  5579. WARN_ON(!irqs_disabled());
  5580. list_del_init(&cpuctx->rotation_list);
  5581. }
  5582. static void __perf_event_exit_context(void *__info)
  5583. {
  5584. struct perf_event_context *ctx = __info;
  5585. struct perf_event *event, *tmp;
  5586. perf_pmu_rotate_stop(ctx->pmu);
  5587. list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry)
  5588. __perf_remove_from_context(event);
  5589. list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, group_entry)
  5590. __perf_remove_from_context(event);
  5591. }
  5592. static void perf_event_exit_cpu_context(int cpu)
  5593. {
  5594. struct perf_event_context *ctx;
  5595. struct pmu *pmu;
  5596. int idx;
  5597. idx = srcu_read_lock(&pmus_srcu);
  5598. list_for_each_entry_rcu(pmu, &pmus, entry) {
  5599. ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx;
  5600. mutex_lock(&ctx->mutex);
  5601. smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1);
  5602. mutex_unlock(&ctx->mutex);
  5603. }
  5604. srcu_read_unlock(&pmus_srcu, idx);
  5605. }
  5606. static void perf_event_exit_cpu(int cpu)
  5607. {
  5608. struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
  5609. mutex_lock(&swhash->hlist_mutex);
  5610. swevent_hlist_release(swhash);
  5611. mutex_unlock(&swhash->hlist_mutex);
  5612. perf_event_exit_cpu_context(cpu);
  5613. }
  5614. #else
  5615. static inline void perf_event_exit_cpu(int cpu) { }
  5616. #endif
  5617. static int
  5618. perf_reboot(struct notifier_block *notifier, unsigned long val, void *v)
  5619. {
  5620. int cpu;
  5621. for_each_online_cpu(cpu)
  5622. perf_event_exit_cpu(cpu);
  5623. return NOTIFY_OK;
  5624. }
  5625. /*
  5626. * Run the perf reboot notifier at the very last possible moment so that
  5627. * the generic watchdog code runs as long as possible.
  5628. */
  5629. static struct notifier_block perf_reboot_notifier = {
  5630. .notifier_call = perf_reboot,
  5631. .priority = INT_MIN,
  5632. };
  5633. static int __cpuinit
  5634. perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
  5635. {
  5636. unsigned int cpu = (long)hcpu;
  5637. switch (action & ~CPU_TASKS_FROZEN) {
  5638. case CPU_UP_PREPARE:
  5639. case CPU_DOWN_FAILED:
  5640. perf_event_init_cpu(cpu);
  5641. break;
  5642. case CPU_UP_CANCELED:
  5643. case CPU_DOWN_PREPARE:
  5644. perf_event_exit_cpu(cpu);
  5645. break;
  5646. default:
  5647. break;
  5648. }
  5649. return NOTIFY_OK;
  5650. }
  5651. void __init perf_event_init(void)
  5652. {
  5653. int ret;
  5654. idr_init(&pmu_idr);
  5655. perf_event_init_all_cpus();
  5656. init_srcu_struct(&pmus_srcu);
  5657. perf_pmu_register(&perf_swevent, "software", PERF_TYPE_SOFTWARE);
  5658. perf_pmu_register(&perf_cpu_clock, NULL, -1);
  5659. perf_pmu_register(&perf_task_clock, NULL, -1);
  5660. perf_tp_register();
  5661. perf_cpu_notifier(perf_cpu_notify);
  5662. register_reboot_notifier(&perf_reboot_notifier);
  5663. ret = init_hw_breakpoint();
  5664. WARN(ret, "hw_breakpoint initialization failed with: %d", ret);
  5665. /* do not patch jump label more than once per second */
  5666. jump_label_rate_limit(&perf_sched_events, HZ);
  5667. }
  5668. static int __init perf_event_sysfs_init(void)
  5669. {
  5670. struct pmu *pmu;
  5671. int ret;
  5672. mutex_lock(&pmus_lock);
  5673. ret = bus_register(&pmu_bus);
  5674. if (ret)
  5675. goto unlock;
  5676. list_for_each_entry(pmu, &pmus, entry) {
  5677. if (!pmu->name || pmu->type < 0)
  5678. continue;
  5679. ret = pmu_dev_alloc(pmu);
  5680. WARN(ret, "Failed to register pmu: %s, reason %d\n", pmu->name, ret);
  5681. }
  5682. pmu_bus_running = 1;
  5683. ret = 0;
  5684. unlock:
  5685. mutex_unlock(&pmus_lock);
  5686. return ret;
  5687. }
  5688. device_initcall(perf_event_sysfs_init);
  5689. #ifdef CONFIG_CGROUP_PERF
  5690. static struct cgroup_subsys_state *perf_cgroup_create(
  5691. struct cgroup_subsys *ss, struct cgroup *cont)
  5692. {
  5693. struct perf_cgroup *jc;
  5694. jc = kzalloc(sizeof(*jc), GFP_KERNEL);
  5695. if (!jc)
  5696. return ERR_PTR(-ENOMEM);
  5697. jc->info = alloc_percpu(struct perf_cgroup_info);
  5698. if (!jc->info) {
  5699. kfree(jc);
  5700. return ERR_PTR(-ENOMEM);
  5701. }
  5702. return &jc->css;
  5703. }
  5704. static void perf_cgroup_destroy(struct cgroup_subsys *ss,
  5705. struct cgroup *cont)
  5706. {
  5707. struct perf_cgroup *jc;
  5708. jc = container_of(cgroup_subsys_state(cont, perf_subsys_id),
  5709. struct perf_cgroup, css);
  5710. free_percpu(jc->info);
  5711. kfree(jc);
  5712. }
  5713. static int __perf_cgroup_move(void *info)
  5714. {
  5715. struct task_struct *task = info;
  5716. perf_cgroup_switch(task, PERF_CGROUP_SWOUT | PERF_CGROUP_SWIN);
  5717. return 0;
  5718. }
  5719. static void perf_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
  5720. struct cgroup_taskset *tset)
  5721. {
  5722. struct task_struct *task;
  5723. cgroup_taskset_for_each(task, cgrp, tset)
  5724. task_function_call(task, __perf_cgroup_move, task);
  5725. }
  5726. static void perf_cgroup_exit(struct cgroup_subsys *ss, struct cgroup *cgrp,
  5727. struct cgroup *old_cgrp, struct task_struct *task)
  5728. {
  5729. /*
  5730. * cgroup_exit() is called in the copy_process() failure path.
  5731. * Ignore this case since the task hasn't ran yet, this avoids
  5732. * trying to poke a half freed task state from generic code.
  5733. */
  5734. if (!(task->flags & PF_EXITING))
  5735. return;
  5736. task_function_call(task, __perf_cgroup_move, task);
  5737. }
  5738. struct cgroup_subsys perf_subsys = {
  5739. .name = "perf_event",
  5740. .subsys_id = perf_subsys_id,
  5741. .create = perf_cgroup_create,
  5742. .destroy = perf_cgroup_destroy,
  5743. .exit = perf_cgroup_exit,
  5744. .attach = perf_cgroup_attach,
  5745. };
  5746. #endif /* CONFIG_CGROUP_PERF */