perfmon.c 169 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877
  1. /*
  2. * This file implements the perfmon-2 subsystem which is used
  3. * to program the IA-64 Performance Monitoring Unit (PMU).
  4. *
  5. * The initial version of perfmon.c was written by
  6. * Ganesh Venkitachalam, IBM Corp.
  7. *
  8. * Then it was modified for perfmon-1.x by Stephane Eranian and
  9. * David Mosberger, Hewlett Packard Co.
  10. *
  11. * Version Perfmon-2.x is a rewrite of perfmon-1.x
  12. * by Stephane Eranian, Hewlett Packard Co.
  13. *
  14. * Copyright (C) 1999-2005 Hewlett Packard Co
  15. * Stephane Eranian <eranian@hpl.hp.com>
  16. * David Mosberger-Tang <davidm@hpl.hp.com>
  17. *
  18. * More information about perfmon available at:
  19. * http://www.hpl.hp.com/research/linux/perfmon
  20. */
  21. #include <linux/module.h>
  22. #include <linux/kernel.h>
  23. #include <linux/sched.h>
  24. #include <linux/interrupt.h>
  25. #include <linux/smp_lock.h>
  26. #include <linux/proc_fs.h>
  27. #include <linux/seq_file.h>
  28. #include <linux/init.h>
  29. #include <linux/vmalloc.h>
  30. #include <linux/mm.h>
  31. #include <linux/sysctl.h>
  32. #include <linux/list.h>
  33. #include <linux/file.h>
  34. #include <linux/poll.h>
  35. #include <linux/vfs.h>
  36. #include <linux/smp.h>
  37. #include <linux/pagemap.h>
  38. #include <linux/mount.h>
  39. #include <linux/bitops.h>
  40. #include <linux/capability.h>
  41. #include <linux/rcupdate.h>
  42. #include <linux/completion.h>
  43. #include <asm/errno.h>
  44. #include <asm/intrinsics.h>
  45. #include <asm/page.h>
  46. #include <asm/perfmon.h>
  47. #include <asm/processor.h>
  48. #include <asm/signal.h>
  49. #include <asm/system.h>
  50. #include <asm/uaccess.h>
  51. #include <asm/delay.h>
  52. #ifdef CONFIG_PERFMON
  53. /*
  54. * perfmon context state
  55. */
  56. #define PFM_CTX_UNLOADED 1 /* context is not loaded onto any task */
  57. #define PFM_CTX_LOADED 2 /* context is loaded onto a task */
  58. #define PFM_CTX_MASKED 3 /* context is loaded but monitoring is masked due to overflow */
  59. #define PFM_CTX_ZOMBIE 4 /* owner of the context is closing it */
  60. #define PFM_INVALID_ACTIVATION (~0UL)
  61. #define PFM_NUM_PMC_REGS 64 /* PMC save area for ctxsw */
  62. #define PFM_NUM_PMD_REGS 64 /* PMD save area for ctxsw */
  63. /*
  64. * depth of message queue
  65. */
  66. #define PFM_MAX_MSGS 32
  67. #define PFM_CTXQ_EMPTY(g) ((g)->ctx_msgq_head == (g)->ctx_msgq_tail)
  68. /*
  69. * type of a PMU register (bitmask).
  70. * bitmask structure:
  71. * bit0 : register implemented
  72. * bit1 : end marker
  73. * bit2-3 : reserved
  74. * bit4 : pmc has pmc.pm
  75. * bit5 : pmc controls a counter (has pmc.oi), pmd is used as counter
  76. * bit6-7 : register type
  77. * bit8-31: reserved
  78. */
  79. #define PFM_REG_NOTIMPL 0x0 /* not implemented at all */
  80. #define PFM_REG_IMPL 0x1 /* register implemented */
  81. #define PFM_REG_END 0x2 /* end marker */
  82. #define PFM_REG_MONITOR (0x1<<4|PFM_REG_IMPL) /* a PMC with a pmc.pm field only */
  83. #define PFM_REG_COUNTING (0x2<<4|PFM_REG_MONITOR) /* a monitor + pmc.oi+ PMD used as a counter */
  84. #define PFM_REG_CONTROL (0x4<<4|PFM_REG_IMPL) /* PMU control register */
  85. #define PFM_REG_CONFIG (0x8<<4|PFM_REG_IMPL) /* configuration register */
  86. #define PFM_REG_BUFFER (0xc<<4|PFM_REG_IMPL) /* PMD used as buffer */
  87. #define PMC_IS_LAST(i) (pmu_conf->pmc_desc[i].type & PFM_REG_END)
  88. #define PMD_IS_LAST(i) (pmu_conf->pmd_desc[i].type & PFM_REG_END)
  89. #define PMC_OVFL_NOTIFY(ctx, i) ((ctx)->ctx_pmds[i].flags & PFM_REGFL_OVFL_NOTIFY)
  90. /* i assumed unsigned */
  91. #define PMC_IS_IMPL(i) (i< PMU_MAX_PMCS && (pmu_conf->pmc_desc[i].type & PFM_REG_IMPL))
  92. #define PMD_IS_IMPL(i) (i< PMU_MAX_PMDS && (pmu_conf->pmd_desc[i].type & PFM_REG_IMPL))
  93. /* XXX: these assume that register i is implemented */
  94. #define PMD_IS_COUNTING(i) ((pmu_conf->pmd_desc[i].type & PFM_REG_COUNTING) == PFM_REG_COUNTING)
  95. #define PMC_IS_COUNTING(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_COUNTING) == PFM_REG_COUNTING)
  96. #define PMC_IS_MONITOR(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_MONITOR) == PFM_REG_MONITOR)
  97. #define PMC_IS_CONTROL(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_CONTROL) == PFM_REG_CONTROL)
  98. #define PMC_DFL_VAL(i) pmu_conf->pmc_desc[i].default_value
  99. #define PMC_RSVD_MASK(i) pmu_conf->pmc_desc[i].reserved_mask
  100. #define PMD_PMD_DEP(i) pmu_conf->pmd_desc[i].dep_pmd[0]
  101. #define PMC_PMD_DEP(i) pmu_conf->pmc_desc[i].dep_pmd[0]
  102. #define PFM_NUM_IBRS IA64_NUM_DBG_REGS
  103. #define PFM_NUM_DBRS IA64_NUM_DBG_REGS
  104. #define CTX_OVFL_NOBLOCK(c) ((c)->ctx_fl_block == 0)
  105. #define CTX_HAS_SMPL(c) ((c)->ctx_fl_is_sampling)
  106. #define PFM_CTX_TASK(h) (h)->ctx_task
  107. #define PMU_PMC_OI 5 /* position of pmc.oi bit */
  108. /* XXX: does not support more than 64 PMDs */
  109. #define CTX_USED_PMD(ctx, mask) (ctx)->ctx_used_pmds[0] |= (mask)
  110. #define CTX_IS_USED_PMD(ctx, c) (((ctx)->ctx_used_pmds[0] & (1UL << (c))) != 0UL)
  111. #define CTX_USED_MONITOR(ctx, mask) (ctx)->ctx_used_monitors[0] |= (mask)
  112. #define CTX_USED_IBR(ctx,n) (ctx)->ctx_used_ibrs[(n)>>6] |= 1UL<< ((n) % 64)
  113. #define CTX_USED_DBR(ctx,n) (ctx)->ctx_used_dbrs[(n)>>6] |= 1UL<< ((n) % 64)
  114. #define CTX_USES_DBREGS(ctx) (((pfm_context_t *)(ctx))->ctx_fl_using_dbreg==1)
  115. #define PFM_CODE_RR 0 /* requesting code range restriction */
  116. #define PFM_DATA_RR 1 /* requestion data range restriction */
  117. #define PFM_CPUINFO_CLEAR(v) pfm_get_cpu_var(pfm_syst_info) &= ~(v)
  118. #define PFM_CPUINFO_SET(v) pfm_get_cpu_var(pfm_syst_info) |= (v)
  119. #define PFM_CPUINFO_GET() pfm_get_cpu_var(pfm_syst_info)
  120. #define RDEP(x) (1UL<<(x))
  121. /*
  122. * context protection macros
  123. * in SMP:
  124. * - we need to protect against CPU concurrency (spin_lock)
  125. * - we need to protect against PMU overflow interrupts (local_irq_disable)
  126. * in UP:
  127. * - we need to protect against PMU overflow interrupts (local_irq_disable)
  128. *
  129. * spin_lock_irqsave()/spin_unlock_irqrestore():
  130. * in SMP: local_irq_disable + spin_lock
  131. * in UP : local_irq_disable
  132. *
  133. * spin_lock()/spin_lock():
  134. * in UP : removed automatically
  135. * in SMP: protect against context accesses from other CPU. interrupts
  136. * are not masked. This is useful for the PMU interrupt handler
  137. * because we know we will not get PMU concurrency in that code.
  138. */
  139. #define PROTECT_CTX(c, f) \
  140. do { \
  141. DPRINT(("spinlock_irq_save ctx %p by [%d]\n", c, current->pid)); \
  142. spin_lock_irqsave(&(c)->ctx_lock, f); \
  143. DPRINT(("spinlocked ctx %p by [%d]\n", c, current->pid)); \
  144. } while(0)
  145. #define UNPROTECT_CTX(c, f) \
  146. do { \
  147. DPRINT(("spinlock_irq_restore ctx %p by [%d]\n", c, current->pid)); \
  148. spin_unlock_irqrestore(&(c)->ctx_lock, f); \
  149. } while(0)
  150. #define PROTECT_CTX_NOPRINT(c, f) \
  151. do { \
  152. spin_lock_irqsave(&(c)->ctx_lock, f); \
  153. } while(0)
  154. #define UNPROTECT_CTX_NOPRINT(c, f) \
  155. do { \
  156. spin_unlock_irqrestore(&(c)->ctx_lock, f); \
  157. } while(0)
  158. #define PROTECT_CTX_NOIRQ(c) \
  159. do { \
  160. spin_lock(&(c)->ctx_lock); \
  161. } while(0)
  162. #define UNPROTECT_CTX_NOIRQ(c) \
  163. do { \
  164. spin_unlock(&(c)->ctx_lock); \
  165. } while(0)
  166. #ifdef CONFIG_SMP
  167. #define GET_ACTIVATION() pfm_get_cpu_var(pmu_activation_number)
  168. #define INC_ACTIVATION() pfm_get_cpu_var(pmu_activation_number)++
  169. #define SET_ACTIVATION(c) (c)->ctx_last_activation = GET_ACTIVATION()
  170. #else /* !CONFIG_SMP */
  171. #define SET_ACTIVATION(t) do {} while(0)
  172. #define GET_ACTIVATION(t) do {} while(0)
  173. #define INC_ACTIVATION(t) do {} while(0)
  174. #endif /* CONFIG_SMP */
  175. #define SET_PMU_OWNER(t, c) do { pfm_get_cpu_var(pmu_owner) = (t); pfm_get_cpu_var(pmu_ctx) = (c); } while(0)
  176. #define GET_PMU_OWNER() pfm_get_cpu_var(pmu_owner)
  177. #define GET_PMU_CTX() pfm_get_cpu_var(pmu_ctx)
  178. #define LOCK_PFS(g) spin_lock_irqsave(&pfm_sessions.pfs_lock, g)
  179. #define UNLOCK_PFS(g) spin_unlock_irqrestore(&pfm_sessions.pfs_lock, g)
  180. #define PFM_REG_RETFLAG_SET(flags, val) do { flags &= ~PFM_REG_RETFL_MASK; flags |= (val); } while(0)
  181. /*
  182. * cmp0 must be the value of pmc0
  183. */
  184. #define PMC0_HAS_OVFL(cmp0) (cmp0 & ~0x1UL)
  185. #define PFMFS_MAGIC 0xa0b4d889
  186. /*
  187. * debugging
  188. */
  189. #define PFM_DEBUGGING 1
  190. #ifdef PFM_DEBUGGING
  191. #define DPRINT(a) \
  192. do { \
  193. if (unlikely(pfm_sysctl.debug >0)) { printk("%s.%d: CPU%d [%d] ", __FUNCTION__, __LINE__, smp_processor_id(), current->pid); printk a; } \
  194. } while (0)
  195. #define DPRINT_ovfl(a) \
  196. do { \
  197. if (unlikely(pfm_sysctl.debug > 0 && pfm_sysctl.debug_ovfl >0)) { printk("%s.%d: CPU%d [%d] ", __FUNCTION__, __LINE__, smp_processor_id(), current->pid); printk a; } \
  198. } while (0)
  199. #endif
  200. /*
  201. * 64-bit software counter structure
  202. *
  203. * the next_reset_type is applied to the next call to pfm_reset_regs()
  204. */
  205. typedef struct {
  206. unsigned long val; /* virtual 64bit counter value */
  207. unsigned long lval; /* last reset value */
  208. unsigned long long_reset; /* reset value on sampling overflow */
  209. unsigned long short_reset; /* reset value on overflow */
  210. unsigned long reset_pmds[4]; /* which other pmds to reset when this counter overflows */
  211. unsigned long smpl_pmds[4]; /* which pmds are accessed when counter overflow */
  212. unsigned long seed; /* seed for random-number generator */
  213. unsigned long mask; /* mask for random-number generator */
  214. unsigned int flags; /* notify/do not notify */
  215. unsigned long eventid; /* overflow event identifier */
  216. } pfm_counter_t;
  217. /*
  218. * context flags
  219. */
  220. typedef struct {
  221. unsigned int block:1; /* when 1, task will blocked on user notifications */
  222. unsigned int system:1; /* do system wide monitoring */
  223. unsigned int using_dbreg:1; /* using range restrictions (debug registers) */
  224. unsigned int is_sampling:1; /* true if using a custom format */
  225. unsigned int excl_idle:1; /* exclude idle task in system wide session */
  226. unsigned int going_zombie:1; /* context is zombie (MASKED+blocking) */
  227. unsigned int trap_reason:2; /* reason for going into pfm_handle_work() */
  228. unsigned int no_msg:1; /* no message sent on overflow */
  229. unsigned int can_restart:1; /* allowed to issue a PFM_RESTART */
  230. unsigned int reserved:22;
  231. } pfm_context_flags_t;
  232. #define PFM_TRAP_REASON_NONE 0x0 /* default value */
  233. #define PFM_TRAP_REASON_BLOCK 0x1 /* we need to block on overflow */
  234. #define PFM_TRAP_REASON_RESET 0x2 /* we need to reset PMDs */
  235. /*
  236. * perfmon context: encapsulates all the state of a monitoring session
  237. */
  238. typedef struct pfm_context {
  239. spinlock_t ctx_lock; /* context protection */
  240. pfm_context_flags_t ctx_flags; /* bitmask of flags (block reason incl.) */
  241. unsigned int ctx_state; /* state: active/inactive (no bitfield) */
  242. struct task_struct *ctx_task; /* task to which context is attached */
  243. unsigned long ctx_ovfl_regs[4]; /* which registers overflowed (notification) */
  244. struct completion ctx_restart_done; /* use for blocking notification mode */
  245. unsigned long ctx_used_pmds[4]; /* bitmask of PMD used */
  246. unsigned long ctx_all_pmds[4]; /* bitmask of all accessible PMDs */
  247. unsigned long ctx_reload_pmds[4]; /* bitmask of force reload PMD on ctxsw in */
  248. unsigned long ctx_all_pmcs[4]; /* bitmask of all accessible PMCs */
  249. unsigned long ctx_reload_pmcs[4]; /* bitmask of force reload PMC on ctxsw in */
  250. unsigned long ctx_used_monitors[4]; /* bitmask of monitor PMC being used */
  251. unsigned long ctx_pmcs[PFM_NUM_PMC_REGS]; /* saved copies of PMC values */
  252. unsigned int ctx_used_ibrs[1]; /* bitmask of used IBR (speedup ctxsw in) */
  253. unsigned int ctx_used_dbrs[1]; /* bitmask of used DBR (speedup ctxsw in) */
  254. unsigned long ctx_dbrs[IA64_NUM_DBG_REGS]; /* DBR values (cache) when not loaded */
  255. unsigned long ctx_ibrs[IA64_NUM_DBG_REGS]; /* IBR values (cache) when not loaded */
  256. pfm_counter_t ctx_pmds[PFM_NUM_PMD_REGS]; /* software state for PMDS */
  257. unsigned long th_pmcs[PFM_NUM_PMC_REGS]; /* PMC thread save state */
  258. unsigned long th_pmds[PFM_NUM_PMD_REGS]; /* PMD thread save state */
  259. u64 ctx_saved_psr_up; /* only contains psr.up value */
  260. unsigned long ctx_last_activation; /* context last activation number for last_cpu */
  261. unsigned int ctx_last_cpu; /* CPU id of current or last CPU used (SMP only) */
  262. unsigned int ctx_cpu; /* cpu to which perfmon is applied (system wide) */
  263. int ctx_fd; /* file descriptor used my this context */
  264. pfm_ovfl_arg_t ctx_ovfl_arg; /* argument to custom buffer format handler */
  265. pfm_buffer_fmt_t *ctx_buf_fmt; /* buffer format callbacks */
  266. void *ctx_smpl_hdr; /* points to sampling buffer header kernel vaddr */
  267. unsigned long ctx_smpl_size; /* size of sampling buffer */
  268. void *ctx_smpl_vaddr; /* user level virtual address of smpl buffer */
  269. wait_queue_head_t ctx_msgq_wait;
  270. pfm_msg_t ctx_msgq[PFM_MAX_MSGS];
  271. int ctx_msgq_head;
  272. int ctx_msgq_tail;
  273. struct fasync_struct *ctx_async_queue;
  274. wait_queue_head_t ctx_zombieq; /* termination cleanup wait queue */
  275. } pfm_context_t;
  276. /*
  277. * magic number used to verify that structure is really
  278. * a perfmon context
  279. */
  280. #define PFM_IS_FILE(f) ((f)->f_op == &pfm_file_ops)
  281. #define PFM_GET_CTX(t) ((pfm_context_t *)(t)->thread.pfm_context)
  282. #ifdef CONFIG_SMP
  283. #define SET_LAST_CPU(ctx, v) (ctx)->ctx_last_cpu = (v)
  284. #define GET_LAST_CPU(ctx) (ctx)->ctx_last_cpu
  285. #else
  286. #define SET_LAST_CPU(ctx, v) do {} while(0)
  287. #define GET_LAST_CPU(ctx) do {} while(0)
  288. #endif
  289. #define ctx_fl_block ctx_flags.block
  290. #define ctx_fl_system ctx_flags.system
  291. #define ctx_fl_using_dbreg ctx_flags.using_dbreg
  292. #define ctx_fl_is_sampling ctx_flags.is_sampling
  293. #define ctx_fl_excl_idle ctx_flags.excl_idle
  294. #define ctx_fl_going_zombie ctx_flags.going_zombie
  295. #define ctx_fl_trap_reason ctx_flags.trap_reason
  296. #define ctx_fl_no_msg ctx_flags.no_msg
  297. #define ctx_fl_can_restart ctx_flags.can_restart
  298. #define PFM_SET_WORK_PENDING(t, v) do { (t)->thread.pfm_needs_checking = v; } while(0);
  299. #define PFM_GET_WORK_PENDING(t) (t)->thread.pfm_needs_checking
  300. /*
  301. * global information about all sessions
  302. * mostly used to synchronize between system wide and per-process
  303. */
  304. typedef struct {
  305. spinlock_t pfs_lock; /* lock the structure */
  306. unsigned int pfs_task_sessions; /* number of per task sessions */
  307. unsigned int pfs_sys_sessions; /* number of per system wide sessions */
  308. unsigned int pfs_sys_use_dbregs; /* incremented when a system wide session uses debug regs */
  309. unsigned int pfs_ptrace_use_dbregs; /* incremented when a process uses debug regs */
  310. struct task_struct *pfs_sys_session[NR_CPUS]; /* point to task owning a system-wide session */
  311. } pfm_session_t;
  312. /*
  313. * information about a PMC or PMD.
  314. * dep_pmd[]: a bitmask of dependent PMD registers
  315. * dep_pmc[]: a bitmask of dependent PMC registers
  316. */
  317. typedef int (*pfm_reg_check_t)(struct task_struct *task, pfm_context_t *ctx, unsigned int cnum, unsigned long *val, struct pt_regs *regs);
  318. typedef struct {
  319. unsigned int type;
  320. int pm_pos;
  321. unsigned long default_value; /* power-on default value */
  322. unsigned long reserved_mask; /* bitmask of reserved bits */
  323. pfm_reg_check_t read_check;
  324. pfm_reg_check_t write_check;
  325. unsigned long dep_pmd[4];
  326. unsigned long dep_pmc[4];
  327. } pfm_reg_desc_t;
  328. /* assume cnum is a valid monitor */
  329. #define PMC_PM(cnum, val) (((val) >> (pmu_conf->pmc_desc[cnum].pm_pos)) & 0x1)
  330. /*
  331. * This structure is initialized at boot time and contains
  332. * a description of the PMU main characteristics.
  333. *
  334. * If the probe function is defined, detection is based
  335. * on its return value:
  336. * - 0 means recognized PMU
  337. * - anything else means not supported
  338. * When the probe function is not defined, then the pmu_family field
  339. * is used and it must match the host CPU family such that:
  340. * - cpu->family & config->pmu_family != 0
  341. */
  342. typedef struct {
  343. unsigned long ovfl_val; /* overflow value for counters */
  344. pfm_reg_desc_t *pmc_desc; /* detailed PMC register dependencies descriptions */
  345. pfm_reg_desc_t *pmd_desc; /* detailed PMD register dependencies descriptions */
  346. unsigned int num_pmcs; /* number of PMCS: computed at init time */
  347. unsigned int num_pmds; /* number of PMDS: computed at init time */
  348. unsigned long impl_pmcs[4]; /* bitmask of implemented PMCS */
  349. unsigned long impl_pmds[4]; /* bitmask of implemented PMDS */
  350. char *pmu_name; /* PMU family name */
  351. unsigned int pmu_family; /* cpuid family pattern used to identify pmu */
  352. unsigned int flags; /* pmu specific flags */
  353. unsigned int num_ibrs; /* number of IBRS: computed at init time */
  354. unsigned int num_dbrs; /* number of DBRS: computed at init time */
  355. unsigned int num_counters; /* PMC/PMD counting pairs : computed at init time */
  356. int (*probe)(void); /* customized probe routine */
  357. unsigned int use_rr_dbregs:1; /* set if debug registers used for range restriction */
  358. } pmu_config_t;
  359. /*
  360. * PMU specific flags
  361. */
  362. #define PFM_PMU_IRQ_RESEND 1 /* PMU needs explicit IRQ resend */
  363. /*
  364. * debug register related type definitions
  365. */
  366. typedef struct {
  367. unsigned long ibr_mask:56;
  368. unsigned long ibr_plm:4;
  369. unsigned long ibr_ig:3;
  370. unsigned long ibr_x:1;
  371. } ibr_mask_reg_t;
  372. typedef struct {
  373. unsigned long dbr_mask:56;
  374. unsigned long dbr_plm:4;
  375. unsigned long dbr_ig:2;
  376. unsigned long dbr_w:1;
  377. unsigned long dbr_r:1;
  378. } dbr_mask_reg_t;
  379. typedef union {
  380. unsigned long val;
  381. ibr_mask_reg_t ibr;
  382. dbr_mask_reg_t dbr;
  383. } dbreg_t;
  384. /*
  385. * perfmon command descriptions
  386. */
  387. typedef struct {
  388. int (*cmd_func)(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
  389. char *cmd_name;
  390. int cmd_flags;
  391. unsigned int cmd_narg;
  392. size_t cmd_argsize;
  393. int (*cmd_getsize)(void *arg, size_t *sz);
  394. } pfm_cmd_desc_t;
  395. #define PFM_CMD_FD 0x01 /* command requires a file descriptor */
  396. #define PFM_CMD_ARG_READ 0x02 /* command must read argument(s) */
  397. #define PFM_CMD_ARG_RW 0x04 /* command must read/write argument(s) */
  398. #define PFM_CMD_STOP 0x08 /* command does not work on zombie context */
  399. #define PFM_CMD_NAME(cmd) pfm_cmd_tab[(cmd)].cmd_name
  400. #define PFM_CMD_READ_ARG(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_ARG_READ)
  401. #define PFM_CMD_RW_ARG(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_ARG_RW)
  402. #define PFM_CMD_USE_FD(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_FD)
  403. #define PFM_CMD_STOPPED(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_STOP)
  404. #define PFM_CMD_ARG_MANY -1 /* cannot be zero */
  405. typedef struct {
  406. unsigned long pfm_spurious_ovfl_intr_count; /* keep track of spurious ovfl interrupts */
  407. unsigned long pfm_replay_ovfl_intr_count; /* keep track of replayed ovfl interrupts */
  408. unsigned long pfm_ovfl_intr_count; /* keep track of ovfl interrupts */
  409. unsigned long pfm_ovfl_intr_cycles; /* cycles spent processing ovfl interrupts */
  410. unsigned long pfm_ovfl_intr_cycles_min; /* min cycles spent processing ovfl interrupts */
  411. unsigned long pfm_ovfl_intr_cycles_max; /* max cycles spent processing ovfl interrupts */
  412. unsigned long pfm_smpl_handler_calls;
  413. unsigned long pfm_smpl_handler_cycles;
  414. char pad[SMP_CACHE_BYTES] ____cacheline_aligned;
  415. } pfm_stats_t;
  416. /*
  417. * perfmon internal variables
  418. */
  419. static pfm_stats_t pfm_stats[NR_CPUS];
  420. static pfm_session_t pfm_sessions; /* global sessions information */
  421. static DEFINE_SPINLOCK(pfm_alt_install_check);
  422. static pfm_intr_handler_desc_t *pfm_alt_intr_handler;
  423. static struct proc_dir_entry *perfmon_dir;
  424. static pfm_uuid_t pfm_null_uuid = {0,};
  425. static spinlock_t pfm_buffer_fmt_lock;
  426. static LIST_HEAD(pfm_buffer_fmt_list);
  427. static pmu_config_t *pmu_conf;
  428. /* sysctl() controls */
  429. pfm_sysctl_t pfm_sysctl;
  430. EXPORT_SYMBOL(pfm_sysctl);
  431. static ctl_table pfm_ctl_table[]={
  432. {
  433. .ctl_name = CTL_UNNUMBERED,
  434. .procname = "debug",
  435. .data = &pfm_sysctl.debug,
  436. .maxlen = sizeof(int),
  437. .mode = 0666,
  438. .proc_handler = &proc_dointvec,
  439. },
  440. {
  441. .ctl_name = CTL_UNNUMBERED,
  442. .procname = "debug_ovfl",
  443. .data = &pfm_sysctl.debug_ovfl,
  444. .maxlen = sizeof(int),
  445. .mode = 0666,
  446. .proc_handler = &proc_dointvec,
  447. },
  448. {
  449. .ctl_name = CTL_UNNUMBERED,
  450. .procname = "fastctxsw",
  451. .data = &pfm_sysctl.fastctxsw,
  452. .maxlen = sizeof(int),
  453. .mode = 0600,
  454. .proc_handler = &proc_dointvec,
  455. },
  456. {
  457. .ctl_name = CTL_UNNUMBERED,
  458. .procname = "expert_mode",
  459. .data = &pfm_sysctl.expert_mode,
  460. .maxlen = sizeof(int),
  461. .mode = 0600,
  462. .proc_handler = &proc_dointvec,
  463. },
  464. {}
  465. };
  466. static ctl_table pfm_sysctl_dir[] = {
  467. {
  468. .ctl_name = CTL_UNNUMBERED,
  469. .procname = "perfmon",
  470. .mode = 0755,
  471. .child = pfm_ctl_table,
  472. },
  473. {}
  474. };
  475. static ctl_table pfm_sysctl_root[] = {
  476. {
  477. .ctl_name = CTL_KERN,
  478. .procname = "kernel",
  479. .mode = 0755,
  480. .child = pfm_sysctl_dir,
  481. },
  482. {}
  483. };
  484. static struct ctl_table_header *pfm_sysctl_header;
  485. static int pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
  486. #define pfm_get_cpu_var(v) __ia64_per_cpu_var(v)
  487. #define pfm_get_cpu_data(a,b) per_cpu(a, b)
  488. static inline void
  489. pfm_put_task(struct task_struct *task)
  490. {
  491. if (task != current) put_task_struct(task);
  492. }
  493. static inline void
  494. pfm_set_task_notify(struct task_struct *task)
  495. {
  496. struct thread_info *info;
  497. info = (struct thread_info *) ((char *) task + IA64_TASK_SIZE);
  498. set_bit(TIF_NOTIFY_RESUME, &info->flags);
  499. }
  500. static inline void
  501. pfm_clear_task_notify(void)
  502. {
  503. clear_thread_flag(TIF_NOTIFY_RESUME);
  504. }
  505. static inline void
  506. pfm_reserve_page(unsigned long a)
  507. {
  508. SetPageReserved(vmalloc_to_page((void *)a));
  509. }
  510. static inline void
  511. pfm_unreserve_page(unsigned long a)
  512. {
  513. ClearPageReserved(vmalloc_to_page((void*)a));
  514. }
  515. static inline unsigned long
  516. pfm_protect_ctx_ctxsw(pfm_context_t *x)
  517. {
  518. spin_lock(&(x)->ctx_lock);
  519. return 0UL;
  520. }
  521. static inline void
  522. pfm_unprotect_ctx_ctxsw(pfm_context_t *x, unsigned long f)
  523. {
  524. spin_unlock(&(x)->ctx_lock);
  525. }
  526. static inline unsigned int
  527. pfm_do_munmap(struct mm_struct *mm, unsigned long addr, size_t len, int acct)
  528. {
  529. return do_munmap(mm, addr, len);
  530. }
  531. static inline unsigned long
  532. pfm_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags, unsigned long exec)
  533. {
  534. return get_unmapped_area(file, addr, len, pgoff, flags);
  535. }
  536. static int
  537. pfmfs_get_sb(struct file_system_type *fs_type, int flags, const char *dev_name, void *data,
  538. struct vfsmount *mnt)
  539. {
  540. return get_sb_pseudo(fs_type, "pfm:", NULL, PFMFS_MAGIC, mnt);
  541. }
  542. static struct file_system_type pfm_fs_type = {
  543. .name = "pfmfs",
  544. .get_sb = pfmfs_get_sb,
  545. .kill_sb = kill_anon_super,
  546. };
  547. DEFINE_PER_CPU(unsigned long, pfm_syst_info);
  548. DEFINE_PER_CPU(struct task_struct *, pmu_owner);
  549. DEFINE_PER_CPU(pfm_context_t *, pmu_ctx);
  550. DEFINE_PER_CPU(unsigned long, pmu_activation_number);
  551. EXPORT_PER_CPU_SYMBOL_GPL(pfm_syst_info);
  552. /* forward declaration */
  553. static const struct file_operations pfm_file_ops;
  554. /*
  555. * forward declarations
  556. */
  557. #ifndef CONFIG_SMP
  558. static void pfm_lazy_save_regs (struct task_struct *ta);
  559. #endif
  560. void dump_pmu_state(const char *);
  561. static int pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
  562. #include "perfmon_itanium.h"
  563. #include "perfmon_mckinley.h"
  564. #include "perfmon_montecito.h"
  565. #include "perfmon_generic.h"
  566. static pmu_config_t *pmu_confs[]={
  567. &pmu_conf_mont,
  568. &pmu_conf_mck,
  569. &pmu_conf_ita,
  570. &pmu_conf_gen, /* must be last */
  571. NULL
  572. };
  573. static int pfm_end_notify_user(pfm_context_t *ctx);
  574. static inline void
  575. pfm_clear_psr_pp(void)
  576. {
  577. ia64_rsm(IA64_PSR_PP);
  578. ia64_srlz_i();
  579. }
  580. static inline void
  581. pfm_set_psr_pp(void)
  582. {
  583. ia64_ssm(IA64_PSR_PP);
  584. ia64_srlz_i();
  585. }
  586. static inline void
  587. pfm_clear_psr_up(void)
  588. {
  589. ia64_rsm(IA64_PSR_UP);
  590. ia64_srlz_i();
  591. }
  592. static inline void
  593. pfm_set_psr_up(void)
  594. {
  595. ia64_ssm(IA64_PSR_UP);
  596. ia64_srlz_i();
  597. }
  598. static inline unsigned long
  599. pfm_get_psr(void)
  600. {
  601. unsigned long tmp;
  602. tmp = ia64_getreg(_IA64_REG_PSR);
  603. ia64_srlz_i();
  604. return tmp;
  605. }
  606. static inline void
  607. pfm_set_psr_l(unsigned long val)
  608. {
  609. ia64_setreg(_IA64_REG_PSR_L, val);
  610. ia64_srlz_i();
  611. }
  612. static inline void
  613. pfm_freeze_pmu(void)
  614. {
  615. ia64_set_pmc(0,1UL);
  616. ia64_srlz_d();
  617. }
  618. static inline void
  619. pfm_unfreeze_pmu(void)
  620. {
  621. ia64_set_pmc(0,0UL);
  622. ia64_srlz_d();
  623. }
  624. static inline void
  625. pfm_restore_ibrs(unsigned long *ibrs, unsigned int nibrs)
  626. {
  627. int i;
  628. for (i=0; i < nibrs; i++) {
  629. ia64_set_ibr(i, ibrs[i]);
  630. ia64_dv_serialize_instruction();
  631. }
  632. ia64_srlz_i();
  633. }
  634. static inline void
  635. pfm_restore_dbrs(unsigned long *dbrs, unsigned int ndbrs)
  636. {
  637. int i;
  638. for (i=0; i < ndbrs; i++) {
  639. ia64_set_dbr(i, dbrs[i]);
  640. ia64_dv_serialize_data();
  641. }
  642. ia64_srlz_d();
  643. }
  644. /*
  645. * PMD[i] must be a counter. no check is made
  646. */
  647. static inline unsigned long
  648. pfm_read_soft_counter(pfm_context_t *ctx, int i)
  649. {
  650. return ctx->ctx_pmds[i].val + (ia64_get_pmd(i) & pmu_conf->ovfl_val);
  651. }
  652. /*
  653. * PMD[i] must be a counter. no check is made
  654. */
  655. static inline void
  656. pfm_write_soft_counter(pfm_context_t *ctx, int i, unsigned long val)
  657. {
  658. unsigned long ovfl_val = pmu_conf->ovfl_val;
  659. ctx->ctx_pmds[i].val = val & ~ovfl_val;
  660. /*
  661. * writing to unimplemented part is ignore, so we do not need to
  662. * mask off top part
  663. */
  664. ia64_set_pmd(i, val & ovfl_val);
  665. }
  666. static pfm_msg_t *
  667. pfm_get_new_msg(pfm_context_t *ctx)
  668. {
  669. int idx, next;
  670. next = (ctx->ctx_msgq_tail+1) % PFM_MAX_MSGS;
  671. DPRINT(("ctx_fd=%p head=%d tail=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail));
  672. if (next == ctx->ctx_msgq_head) return NULL;
  673. idx = ctx->ctx_msgq_tail;
  674. ctx->ctx_msgq_tail = next;
  675. DPRINT(("ctx=%p head=%d tail=%d msg=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail, idx));
  676. return ctx->ctx_msgq+idx;
  677. }
  678. static pfm_msg_t *
  679. pfm_get_next_msg(pfm_context_t *ctx)
  680. {
  681. pfm_msg_t *msg;
  682. DPRINT(("ctx=%p head=%d tail=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail));
  683. if (PFM_CTXQ_EMPTY(ctx)) return NULL;
  684. /*
  685. * get oldest message
  686. */
  687. msg = ctx->ctx_msgq+ctx->ctx_msgq_head;
  688. /*
  689. * and move forward
  690. */
  691. ctx->ctx_msgq_head = (ctx->ctx_msgq_head+1) % PFM_MAX_MSGS;
  692. DPRINT(("ctx=%p head=%d tail=%d type=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail, msg->pfm_gen_msg.msg_type));
  693. return msg;
  694. }
  695. static void
  696. pfm_reset_msgq(pfm_context_t *ctx)
  697. {
  698. ctx->ctx_msgq_head = ctx->ctx_msgq_tail = 0;
  699. DPRINT(("ctx=%p msgq reset\n", ctx));
  700. }
  701. static void *
  702. pfm_rvmalloc(unsigned long size)
  703. {
  704. void *mem;
  705. unsigned long addr;
  706. size = PAGE_ALIGN(size);
  707. mem = vmalloc(size);
  708. if (mem) {
  709. //printk("perfmon: CPU%d pfm_rvmalloc(%ld)=%p\n", smp_processor_id(), size, mem);
  710. memset(mem, 0, size);
  711. addr = (unsigned long)mem;
  712. while (size > 0) {
  713. pfm_reserve_page(addr);
  714. addr+=PAGE_SIZE;
  715. size-=PAGE_SIZE;
  716. }
  717. }
  718. return mem;
  719. }
  720. static void
  721. pfm_rvfree(void *mem, unsigned long size)
  722. {
  723. unsigned long addr;
  724. if (mem) {
  725. DPRINT(("freeing physical buffer @%p size=%lu\n", mem, size));
  726. addr = (unsigned long) mem;
  727. while ((long) size > 0) {
  728. pfm_unreserve_page(addr);
  729. addr+=PAGE_SIZE;
  730. size-=PAGE_SIZE;
  731. }
  732. vfree(mem);
  733. }
  734. return;
  735. }
  736. static pfm_context_t *
  737. pfm_context_alloc(void)
  738. {
  739. pfm_context_t *ctx;
  740. /*
  741. * allocate context descriptor
  742. * must be able to free with interrupts disabled
  743. */
  744. ctx = kzalloc(sizeof(pfm_context_t), GFP_KERNEL);
  745. if (ctx) {
  746. DPRINT(("alloc ctx @%p\n", ctx));
  747. }
  748. return ctx;
  749. }
  750. static void
  751. pfm_context_free(pfm_context_t *ctx)
  752. {
  753. if (ctx) {
  754. DPRINT(("free ctx @%p\n", ctx));
  755. kfree(ctx);
  756. }
  757. }
  758. static void
  759. pfm_mask_monitoring(struct task_struct *task)
  760. {
  761. pfm_context_t *ctx = PFM_GET_CTX(task);
  762. unsigned long mask, val, ovfl_mask;
  763. int i;
  764. DPRINT_ovfl(("masking monitoring for [%d]\n", task->pid));
  765. ovfl_mask = pmu_conf->ovfl_val;
  766. /*
  767. * monitoring can only be masked as a result of a valid
  768. * counter overflow. In UP, it means that the PMU still
  769. * has an owner. Note that the owner can be different
  770. * from the current task. However the PMU state belongs
  771. * to the owner.
  772. * In SMP, a valid overflow only happens when task is
  773. * current. Therefore if we come here, we know that
  774. * the PMU state belongs to the current task, therefore
  775. * we can access the live registers.
  776. *
  777. * So in both cases, the live register contains the owner's
  778. * state. We can ONLY touch the PMU registers and NOT the PSR.
  779. *
  780. * As a consequence to this call, the ctx->th_pmds[] array
  781. * contains stale information which must be ignored
  782. * when context is reloaded AND monitoring is active (see
  783. * pfm_restart).
  784. */
  785. mask = ctx->ctx_used_pmds[0];
  786. for (i = 0; mask; i++, mask>>=1) {
  787. /* skip non used pmds */
  788. if ((mask & 0x1) == 0) continue;
  789. val = ia64_get_pmd(i);
  790. if (PMD_IS_COUNTING(i)) {
  791. /*
  792. * we rebuild the full 64 bit value of the counter
  793. */
  794. ctx->ctx_pmds[i].val += (val & ovfl_mask);
  795. } else {
  796. ctx->ctx_pmds[i].val = val;
  797. }
  798. DPRINT_ovfl(("pmd[%d]=0x%lx hw_pmd=0x%lx\n",
  799. i,
  800. ctx->ctx_pmds[i].val,
  801. val & ovfl_mask));
  802. }
  803. /*
  804. * mask monitoring by setting the privilege level to 0
  805. * we cannot use psr.pp/psr.up for this, it is controlled by
  806. * the user
  807. *
  808. * if task is current, modify actual registers, otherwise modify
  809. * thread save state, i.e., what will be restored in pfm_load_regs()
  810. */
  811. mask = ctx->ctx_used_monitors[0] >> PMU_FIRST_COUNTER;
  812. for(i= PMU_FIRST_COUNTER; mask; i++, mask>>=1) {
  813. if ((mask & 0x1) == 0UL) continue;
  814. ia64_set_pmc(i, ctx->th_pmcs[i] & ~0xfUL);
  815. ctx->th_pmcs[i] &= ~0xfUL;
  816. DPRINT_ovfl(("pmc[%d]=0x%lx\n", i, ctx->th_pmcs[i]));
  817. }
  818. /*
  819. * make all of this visible
  820. */
  821. ia64_srlz_d();
  822. }
  823. /*
  824. * must always be done with task == current
  825. *
  826. * context must be in MASKED state when calling
  827. */
  828. static void
  829. pfm_restore_monitoring(struct task_struct *task)
  830. {
  831. pfm_context_t *ctx = PFM_GET_CTX(task);
  832. unsigned long mask, ovfl_mask;
  833. unsigned long psr, val;
  834. int i, is_system;
  835. is_system = ctx->ctx_fl_system;
  836. ovfl_mask = pmu_conf->ovfl_val;
  837. if (task != current) {
  838. printk(KERN_ERR "perfmon.%d: invalid task[%d] current[%d]\n", __LINE__, task->pid, current->pid);
  839. return;
  840. }
  841. if (ctx->ctx_state != PFM_CTX_MASKED) {
  842. printk(KERN_ERR "perfmon.%d: task[%d] current[%d] invalid state=%d\n", __LINE__,
  843. task->pid, current->pid, ctx->ctx_state);
  844. return;
  845. }
  846. psr = pfm_get_psr();
  847. /*
  848. * monitoring is masked via the PMC.
  849. * As we restore their value, we do not want each counter to
  850. * restart right away. We stop monitoring using the PSR,
  851. * restore the PMC (and PMD) and then re-establish the psr
  852. * as it was. Note that there can be no pending overflow at
  853. * this point, because monitoring was MASKED.
  854. *
  855. * system-wide session are pinned and self-monitoring
  856. */
  857. if (is_system && (PFM_CPUINFO_GET() & PFM_CPUINFO_DCR_PP)) {
  858. /* disable dcr pp */
  859. ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) & ~IA64_DCR_PP);
  860. pfm_clear_psr_pp();
  861. } else {
  862. pfm_clear_psr_up();
  863. }
  864. /*
  865. * first, we restore the PMD
  866. */
  867. mask = ctx->ctx_used_pmds[0];
  868. for (i = 0; mask; i++, mask>>=1) {
  869. /* skip non used pmds */
  870. if ((mask & 0x1) == 0) continue;
  871. if (PMD_IS_COUNTING(i)) {
  872. /*
  873. * we split the 64bit value according to
  874. * counter width
  875. */
  876. val = ctx->ctx_pmds[i].val & ovfl_mask;
  877. ctx->ctx_pmds[i].val &= ~ovfl_mask;
  878. } else {
  879. val = ctx->ctx_pmds[i].val;
  880. }
  881. ia64_set_pmd(i, val);
  882. DPRINT(("pmd[%d]=0x%lx hw_pmd=0x%lx\n",
  883. i,
  884. ctx->ctx_pmds[i].val,
  885. val));
  886. }
  887. /*
  888. * restore the PMCs
  889. */
  890. mask = ctx->ctx_used_monitors[0] >> PMU_FIRST_COUNTER;
  891. for(i= PMU_FIRST_COUNTER; mask; i++, mask>>=1) {
  892. if ((mask & 0x1) == 0UL) continue;
  893. ctx->th_pmcs[i] = ctx->ctx_pmcs[i];
  894. ia64_set_pmc(i, ctx->th_pmcs[i]);
  895. DPRINT(("[%d] pmc[%d]=0x%lx\n", task->pid, i, ctx->th_pmcs[i]));
  896. }
  897. ia64_srlz_d();
  898. /*
  899. * must restore DBR/IBR because could be modified while masked
  900. * XXX: need to optimize
  901. */
  902. if (ctx->ctx_fl_using_dbreg) {
  903. pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
  904. pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
  905. }
  906. /*
  907. * now restore PSR
  908. */
  909. if (is_system && (PFM_CPUINFO_GET() & PFM_CPUINFO_DCR_PP)) {
  910. /* enable dcr pp */
  911. ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) | IA64_DCR_PP);
  912. ia64_srlz_i();
  913. }
  914. pfm_set_psr_l(psr);
  915. }
  916. static inline void
  917. pfm_save_pmds(unsigned long *pmds, unsigned long mask)
  918. {
  919. int i;
  920. ia64_srlz_d();
  921. for (i=0; mask; i++, mask>>=1) {
  922. if (mask & 0x1) pmds[i] = ia64_get_pmd(i);
  923. }
  924. }
  925. /*
  926. * reload from thread state (used for ctxw only)
  927. */
  928. static inline void
  929. pfm_restore_pmds(unsigned long *pmds, unsigned long mask)
  930. {
  931. int i;
  932. unsigned long val, ovfl_val = pmu_conf->ovfl_val;
  933. for (i=0; mask; i++, mask>>=1) {
  934. if ((mask & 0x1) == 0) continue;
  935. val = PMD_IS_COUNTING(i) ? pmds[i] & ovfl_val : pmds[i];
  936. ia64_set_pmd(i, val);
  937. }
  938. ia64_srlz_d();
  939. }
  940. /*
  941. * propagate PMD from context to thread-state
  942. */
  943. static inline void
  944. pfm_copy_pmds(struct task_struct *task, pfm_context_t *ctx)
  945. {
  946. unsigned long ovfl_val = pmu_conf->ovfl_val;
  947. unsigned long mask = ctx->ctx_all_pmds[0];
  948. unsigned long val;
  949. int i;
  950. DPRINT(("mask=0x%lx\n", mask));
  951. for (i=0; mask; i++, mask>>=1) {
  952. val = ctx->ctx_pmds[i].val;
  953. /*
  954. * We break up the 64 bit value into 2 pieces
  955. * the lower bits go to the machine state in the
  956. * thread (will be reloaded on ctxsw in).
  957. * The upper part stays in the soft-counter.
  958. */
  959. if (PMD_IS_COUNTING(i)) {
  960. ctx->ctx_pmds[i].val = val & ~ovfl_val;
  961. val &= ovfl_val;
  962. }
  963. ctx->th_pmds[i] = val;
  964. DPRINT(("pmd[%d]=0x%lx soft_val=0x%lx\n",
  965. i,
  966. ctx->th_pmds[i],
  967. ctx->ctx_pmds[i].val));
  968. }
  969. }
  970. /*
  971. * propagate PMC from context to thread-state
  972. */
  973. static inline void
  974. pfm_copy_pmcs(struct task_struct *task, pfm_context_t *ctx)
  975. {
  976. unsigned long mask = ctx->ctx_all_pmcs[0];
  977. int i;
  978. DPRINT(("mask=0x%lx\n", mask));
  979. for (i=0; mask; i++, mask>>=1) {
  980. /* masking 0 with ovfl_val yields 0 */
  981. ctx->th_pmcs[i] = ctx->ctx_pmcs[i];
  982. DPRINT(("pmc[%d]=0x%lx\n", i, ctx->th_pmcs[i]));
  983. }
  984. }
  985. static inline void
  986. pfm_restore_pmcs(unsigned long *pmcs, unsigned long mask)
  987. {
  988. int i;
  989. for (i=0; mask; i++, mask>>=1) {
  990. if ((mask & 0x1) == 0) continue;
  991. ia64_set_pmc(i, pmcs[i]);
  992. }
  993. ia64_srlz_d();
  994. }
  995. static inline int
  996. pfm_uuid_cmp(pfm_uuid_t a, pfm_uuid_t b)
  997. {
  998. return memcmp(a, b, sizeof(pfm_uuid_t));
  999. }
  1000. static inline int
  1001. pfm_buf_fmt_exit(pfm_buffer_fmt_t *fmt, struct task_struct *task, void *buf, struct pt_regs *regs)
  1002. {
  1003. int ret = 0;
  1004. if (fmt->fmt_exit) ret = (*fmt->fmt_exit)(task, buf, regs);
  1005. return ret;
  1006. }
  1007. static inline int
  1008. pfm_buf_fmt_getsize(pfm_buffer_fmt_t *fmt, struct task_struct *task, unsigned int flags, int cpu, void *arg, unsigned long *size)
  1009. {
  1010. int ret = 0;
  1011. if (fmt->fmt_getsize) ret = (*fmt->fmt_getsize)(task, flags, cpu, arg, size);
  1012. return ret;
  1013. }
  1014. static inline int
  1015. pfm_buf_fmt_validate(pfm_buffer_fmt_t *fmt, struct task_struct *task, unsigned int flags,
  1016. int cpu, void *arg)
  1017. {
  1018. int ret = 0;
  1019. if (fmt->fmt_validate) ret = (*fmt->fmt_validate)(task, flags, cpu, arg);
  1020. return ret;
  1021. }
  1022. static inline int
  1023. pfm_buf_fmt_init(pfm_buffer_fmt_t *fmt, struct task_struct *task, void *buf, unsigned int flags,
  1024. int cpu, void *arg)
  1025. {
  1026. int ret = 0;
  1027. if (fmt->fmt_init) ret = (*fmt->fmt_init)(task, buf, flags, cpu, arg);
  1028. return ret;
  1029. }
  1030. static inline int
  1031. pfm_buf_fmt_restart(pfm_buffer_fmt_t *fmt, struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs)
  1032. {
  1033. int ret = 0;
  1034. if (fmt->fmt_restart) ret = (*fmt->fmt_restart)(task, ctrl, buf, regs);
  1035. return ret;
  1036. }
  1037. static inline int
  1038. pfm_buf_fmt_restart_active(pfm_buffer_fmt_t *fmt, struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs)
  1039. {
  1040. int ret = 0;
  1041. if (fmt->fmt_restart_active) ret = (*fmt->fmt_restart_active)(task, ctrl, buf, regs);
  1042. return ret;
  1043. }
  1044. static pfm_buffer_fmt_t *
  1045. __pfm_find_buffer_fmt(pfm_uuid_t uuid)
  1046. {
  1047. struct list_head * pos;
  1048. pfm_buffer_fmt_t * entry;
  1049. list_for_each(pos, &pfm_buffer_fmt_list) {
  1050. entry = list_entry(pos, pfm_buffer_fmt_t, fmt_list);
  1051. if (pfm_uuid_cmp(uuid, entry->fmt_uuid) == 0)
  1052. return entry;
  1053. }
  1054. return NULL;
  1055. }
  1056. /*
  1057. * find a buffer format based on its uuid
  1058. */
  1059. static pfm_buffer_fmt_t *
  1060. pfm_find_buffer_fmt(pfm_uuid_t uuid)
  1061. {
  1062. pfm_buffer_fmt_t * fmt;
  1063. spin_lock(&pfm_buffer_fmt_lock);
  1064. fmt = __pfm_find_buffer_fmt(uuid);
  1065. spin_unlock(&pfm_buffer_fmt_lock);
  1066. return fmt;
  1067. }
  1068. int
  1069. pfm_register_buffer_fmt(pfm_buffer_fmt_t *fmt)
  1070. {
  1071. int ret = 0;
  1072. /* some sanity checks */
  1073. if (fmt == NULL || fmt->fmt_name == NULL) return -EINVAL;
  1074. /* we need at least a handler */
  1075. if (fmt->fmt_handler == NULL) return -EINVAL;
  1076. /*
  1077. * XXX: need check validity of fmt_arg_size
  1078. */
  1079. spin_lock(&pfm_buffer_fmt_lock);
  1080. if (__pfm_find_buffer_fmt(fmt->fmt_uuid)) {
  1081. printk(KERN_ERR "perfmon: duplicate sampling format: %s\n", fmt->fmt_name);
  1082. ret = -EBUSY;
  1083. goto out;
  1084. }
  1085. list_add(&fmt->fmt_list, &pfm_buffer_fmt_list);
  1086. printk(KERN_INFO "perfmon: added sampling format %s\n", fmt->fmt_name);
  1087. out:
  1088. spin_unlock(&pfm_buffer_fmt_lock);
  1089. return ret;
  1090. }
  1091. EXPORT_SYMBOL(pfm_register_buffer_fmt);
  1092. int
  1093. pfm_unregister_buffer_fmt(pfm_uuid_t uuid)
  1094. {
  1095. pfm_buffer_fmt_t *fmt;
  1096. int ret = 0;
  1097. spin_lock(&pfm_buffer_fmt_lock);
  1098. fmt = __pfm_find_buffer_fmt(uuid);
  1099. if (!fmt) {
  1100. printk(KERN_ERR "perfmon: cannot unregister format, not found\n");
  1101. ret = -EINVAL;
  1102. goto out;
  1103. }
  1104. list_del_init(&fmt->fmt_list);
  1105. printk(KERN_INFO "perfmon: removed sampling format: %s\n", fmt->fmt_name);
  1106. out:
  1107. spin_unlock(&pfm_buffer_fmt_lock);
  1108. return ret;
  1109. }
  1110. EXPORT_SYMBOL(pfm_unregister_buffer_fmt);
  1111. extern void update_pal_halt_status(int);
  1112. static int
  1113. pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu)
  1114. {
  1115. unsigned long flags;
  1116. /*
  1117. * validy checks on cpu_mask have been done upstream
  1118. */
  1119. LOCK_PFS(flags);
  1120. DPRINT(("in sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
  1121. pfm_sessions.pfs_sys_sessions,
  1122. pfm_sessions.pfs_task_sessions,
  1123. pfm_sessions.pfs_sys_use_dbregs,
  1124. is_syswide,
  1125. cpu));
  1126. if (is_syswide) {
  1127. /*
  1128. * cannot mix system wide and per-task sessions
  1129. */
  1130. if (pfm_sessions.pfs_task_sessions > 0UL) {
  1131. DPRINT(("system wide not possible, %u conflicting task_sessions\n",
  1132. pfm_sessions.pfs_task_sessions));
  1133. goto abort;
  1134. }
  1135. if (pfm_sessions.pfs_sys_session[cpu]) goto error_conflict;
  1136. DPRINT(("reserving system wide session on CPU%u currently on CPU%u\n", cpu, smp_processor_id()));
  1137. pfm_sessions.pfs_sys_session[cpu] = task;
  1138. pfm_sessions.pfs_sys_sessions++ ;
  1139. } else {
  1140. if (pfm_sessions.pfs_sys_sessions) goto abort;
  1141. pfm_sessions.pfs_task_sessions++;
  1142. }
  1143. DPRINT(("out sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
  1144. pfm_sessions.pfs_sys_sessions,
  1145. pfm_sessions.pfs_task_sessions,
  1146. pfm_sessions.pfs_sys_use_dbregs,
  1147. is_syswide,
  1148. cpu));
  1149. /*
  1150. * disable default_idle() to go to PAL_HALT
  1151. */
  1152. update_pal_halt_status(0);
  1153. UNLOCK_PFS(flags);
  1154. return 0;
  1155. error_conflict:
  1156. DPRINT(("system wide not possible, conflicting session [%d] on CPU%d\n",
  1157. pfm_sessions.pfs_sys_session[cpu]->pid,
  1158. cpu));
  1159. abort:
  1160. UNLOCK_PFS(flags);
  1161. return -EBUSY;
  1162. }
  1163. static int
  1164. pfm_unreserve_session(pfm_context_t *ctx, int is_syswide, unsigned int cpu)
  1165. {
  1166. unsigned long flags;
  1167. /*
  1168. * validy checks on cpu_mask have been done upstream
  1169. */
  1170. LOCK_PFS(flags);
  1171. DPRINT(("in sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
  1172. pfm_sessions.pfs_sys_sessions,
  1173. pfm_sessions.pfs_task_sessions,
  1174. pfm_sessions.pfs_sys_use_dbregs,
  1175. is_syswide,
  1176. cpu));
  1177. if (is_syswide) {
  1178. pfm_sessions.pfs_sys_session[cpu] = NULL;
  1179. /*
  1180. * would not work with perfmon+more than one bit in cpu_mask
  1181. */
  1182. if (ctx && ctx->ctx_fl_using_dbreg) {
  1183. if (pfm_sessions.pfs_sys_use_dbregs == 0) {
  1184. printk(KERN_ERR "perfmon: invalid release for ctx %p sys_use_dbregs=0\n", ctx);
  1185. } else {
  1186. pfm_sessions.pfs_sys_use_dbregs--;
  1187. }
  1188. }
  1189. pfm_sessions.pfs_sys_sessions--;
  1190. } else {
  1191. pfm_sessions.pfs_task_sessions--;
  1192. }
  1193. DPRINT(("out sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
  1194. pfm_sessions.pfs_sys_sessions,
  1195. pfm_sessions.pfs_task_sessions,
  1196. pfm_sessions.pfs_sys_use_dbregs,
  1197. is_syswide,
  1198. cpu));
  1199. /*
  1200. * if possible, enable default_idle() to go into PAL_HALT
  1201. */
  1202. if (pfm_sessions.pfs_task_sessions == 0 && pfm_sessions.pfs_sys_sessions == 0)
  1203. update_pal_halt_status(1);
  1204. UNLOCK_PFS(flags);
  1205. return 0;
  1206. }
  1207. /*
  1208. * removes virtual mapping of the sampling buffer.
  1209. * IMPORTANT: cannot be called with interrupts disable, e.g. inside
  1210. * a PROTECT_CTX() section.
  1211. */
  1212. static int
  1213. pfm_remove_smpl_mapping(struct task_struct *task, void *vaddr, unsigned long size)
  1214. {
  1215. int r;
  1216. /* sanity checks */
  1217. if (task->mm == NULL || size == 0UL || vaddr == NULL) {
  1218. printk(KERN_ERR "perfmon: pfm_remove_smpl_mapping [%d] invalid context mm=%p\n", task->pid, task->mm);
  1219. return -EINVAL;
  1220. }
  1221. DPRINT(("smpl_vaddr=%p size=%lu\n", vaddr, size));
  1222. /*
  1223. * does the actual unmapping
  1224. */
  1225. down_write(&task->mm->mmap_sem);
  1226. DPRINT(("down_write done smpl_vaddr=%p size=%lu\n", vaddr, size));
  1227. r = pfm_do_munmap(task->mm, (unsigned long)vaddr, size, 0);
  1228. up_write(&task->mm->mmap_sem);
  1229. if (r !=0) {
  1230. printk(KERN_ERR "perfmon: [%d] unable to unmap sampling buffer @%p size=%lu\n", task->pid, vaddr, size);
  1231. }
  1232. DPRINT(("do_unmap(%p, %lu)=%d\n", vaddr, size, r));
  1233. return 0;
  1234. }
  1235. /*
  1236. * free actual physical storage used by sampling buffer
  1237. */
  1238. #if 0
  1239. static int
  1240. pfm_free_smpl_buffer(pfm_context_t *ctx)
  1241. {
  1242. pfm_buffer_fmt_t *fmt;
  1243. if (ctx->ctx_smpl_hdr == NULL) goto invalid_free;
  1244. /*
  1245. * we won't use the buffer format anymore
  1246. */
  1247. fmt = ctx->ctx_buf_fmt;
  1248. DPRINT(("sampling buffer @%p size %lu vaddr=%p\n",
  1249. ctx->ctx_smpl_hdr,
  1250. ctx->ctx_smpl_size,
  1251. ctx->ctx_smpl_vaddr));
  1252. pfm_buf_fmt_exit(fmt, current, NULL, NULL);
  1253. /*
  1254. * free the buffer
  1255. */
  1256. pfm_rvfree(ctx->ctx_smpl_hdr, ctx->ctx_smpl_size);
  1257. ctx->ctx_smpl_hdr = NULL;
  1258. ctx->ctx_smpl_size = 0UL;
  1259. return 0;
  1260. invalid_free:
  1261. printk(KERN_ERR "perfmon: pfm_free_smpl_buffer [%d] no buffer\n", current->pid);
  1262. return -EINVAL;
  1263. }
  1264. #endif
  1265. static inline void
  1266. pfm_exit_smpl_buffer(pfm_buffer_fmt_t *fmt)
  1267. {
  1268. if (fmt == NULL) return;
  1269. pfm_buf_fmt_exit(fmt, current, NULL, NULL);
  1270. }
  1271. /*
  1272. * pfmfs should _never_ be mounted by userland - too much of security hassle,
  1273. * no real gain from having the whole whorehouse mounted. So we don't need
  1274. * any operations on the root directory. However, we need a non-trivial
  1275. * d_name - pfm: will go nicely and kill the special-casing in procfs.
  1276. */
  1277. static struct vfsmount *pfmfs_mnt;
  1278. static int __init
  1279. init_pfm_fs(void)
  1280. {
  1281. int err = register_filesystem(&pfm_fs_type);
  1282. if (!err) {
  1283. pfmfs_mnt = kern_mount(&pfm_fs_type);
  1284. err = PTR_ERR(pfmfs_mnt);
  1285. if (IS_ERR(pfmfs_mnt))
  1286. unregister_filesystem(&pfm_fs_type);
  1287. else
  1288. err = 0;
  1289. }
  1290. return err;
  1291. }
  1292. static void __exit
  1293. exit_pfm_fs(void)
  1294. {
  1295. unregister_filesystem(&pfm_fs_type);
  1296. mntput(pfmfs_mnt);
  1297. }
  1298. static ssize_t
  1299. pfm_read(struct file *filp, char __user *buf, size_t size, loff_t *ppos)
  1300. {
  1301. pfm_context_t *ctx;
  1302. pfm_msg_t *msg;
  1303. ssize_t ret;
  1304. unsigned long flags;
  1305. DECLARE_WAITQUEUE(wait, current);
  1306. if (PFM_IS_FILE(filp) == 0) {
  1307. printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", current->pid);
  1308. return -EINVAL;
  1309. }
  1310. ctx = (pfm_context_t *)filp->private_data;
  1311. if (ctx == NULL) {
  1312. printk(KERN_ERR "perfmon: pfm_read: NULL ctx [%d]\n", current->pid);
  1313. return -EINVAL;
  1314. }
  1315. /*
  1316. * check even when there is no message
  1317. */
  1318. if (size < sizeof(pfm_msg_t)) {
  1319. DPRINT(("message is too small ctx=%p (>=%ld)\n", ctx, sizeof(pfm_msg_t)));
  1320. return -EINVAL;
  1321. }
  1322. PROTECT_CTX(ctx, flags);
  1323. /*
  1324. * put ourselves on the wait queue
  1325. */
  1326. add_wait_queue(&ctx->ctx_msgq_wait, &wait);
  1327. for(;;) {
  1328. /*
  1329. * check wait queue
  1330. */
  1331. set_current_state(TASK_INTERRUPTIBLE);
  1332. DPRINT(("head=%d tail=%d\n", ctx->ctx_msgq_head, ctx->ctx_msgq_tail));
  1333. ret = 0;
  1334. if(PFM_CTXQ_EMPTY(ctx) == 0) break;
  1335. UNPROTECT_CTX(ctx, flags);
  1336. /*
  1337. * check non-blocking read
  1338. */
  1339. ret = -EAGAIN;
  1340. if(filp->f_flags & O_NONBLOCK) break;
  1341. /*
  1342. * check pending signals
  1343. */
  1344. if(signal_pending(current)) {
  1345. ret = -EINTR;
  1346. break;
  1347. }
  1348. /*
  1349. * no message, so wait
  1350. */
  1351. schedule();
  1352. PROTECT_CTX(ctx, flags);
  1353. }
  1354. DPRINT(("[%d] back to running ret=%ld\n", current->pid, ret));
  1355. set_current_state(TASK_RUNNING);
  1356. remove_wait_queue(&ctx->ctx_msgq_wait, &wait);
  1357. if (ret < 0) goto abort;
  1358. ret = -EINVAL;
  1359. msg = pfm_get_next_msg(ctx);
  1360. if (msg == NULL) {
  1361. printk(KERN_ERR "perfmon: pfm_read no msg for ctx=%p [%d]\n", ctx, current->pid);
  1362. goto abort_locked;
  1363. }
  1364. DPRINT(("fd=%d type=%d\n", msg->pfm_gen_msg.msg_ctx_fd, msg->pfm_gen_msg.msg_type));
  1365. ret = -EFAULT;
  1366. if(copy_to_user(buf, msg, sizeof(pfm_msg_t)) == 0) ret = sizeof(pfm_msg_t);
  1367. abort_locked:
  1368. UNPROTECT_CTX(ctx, flags);
  1369. abort:
  1370. return ret;
  1371. }
  1372. static ssize_t
  1373. pfm_write(struct file *file, const char __user *ubuf,
  1374. size_t size, loff_t *ppos)
  1375. {
  1376. DPRINT(("pfm_write called\n"));
  1377. return -EINVAL;
  1378. }
  1379. static unsigned int
  1380. pfm_poll(struct file *filp, poll_table * wait)
  1381. {
  1382. pfm_context_t *ctx;
  1383. unsigned long flags;
  1384. unsigned int mask = 0;
  1385. if (PFM_IS_FILE(filp) == 0) {
  1386. printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", current->pid);
  1387. return 0;
  1388. }
  1389. ctx = (pfm_context_t *)filp->private_data;
  1390. if (ctx == NULL) {
  1391. printk(KERN_ERR "perfmon: pfm_poll: NULL ctx [%d]\n", current->pid);
  1392. return 0;
  1393. }
  1394. DPRINT(("pfm_poll ctx_fd=%d before poll_wait\n", ctx->ctx_fd));
  1395. poll_wait(filp, &ctx->ctx_msgq_wait, wait);
  1396. PROTECT_CTX(ctx, flags);
  1397. if (PFM_CTXQ_EMPTY(ctx) == 0)
  1398. mask = POLLIN | POLLRDNORM;
  1399. UNPROTECT_CTX(ctx, flags);
  1400. DPRINT(("pfm_poll ctx_fd=%d mask=0x%x\n", ctx->ctx_fd, mask));
  1401. return mask;
  1402. }
  1403. static int
  1404. pfm_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
  1405. {
  1406. DPRINT(("pfm_ioctl called\n"));
  1407. return -EINVAL;
  1408. }
  1409. /*
  1410. * interrupt cannot be masked when coming here
  1411. */
  1412. static inline int
  1413. pfm_do_fasync(int fd, struct file *filp, pfm_context_t *ctx, int on)
  1414. {
  1415. int ret;
  1416. ret = fasync_helper (fd, filp, on, &ctx->ctx_async_queue);
  1417. DPRINT(("pfm_fasync called by [%d] on ctx_fd=%d on=%d async_queue=%p ret=%d\n",
  1418. current->pid,
  1419. fd,
  1420. on,
  1421. ctx->ctx_async_queue, ret));
  1422. return ret;
  1423. }
  1424. static int
  1425. pfm_fasync(int fd, struct file *filp, int on)
  1426. {
  1427. pfm_context_t *ctx;
  1428. int ret;
  1429. if (PFM_IS_FILE(filp) == 0) {
  1430. printk(KERN_ERR "perfmon: pfm_fasync bad magic [%d]\n", current->pid);
  1431. return -EBADF;
  1432. }
  1433. ctx = (pfm_context_t *)filp->private_data;
  1434. if (ctx == NULL) {
  1435. printk(KERN_ERR "perfmon: pfm_fasync NULL ctx [%d]\n", current->pid);
  1436. return -EBADF;
  1437. }
  1438. /*
  1439. * we cannot mask interrupts during this call because this may
  1440. * may go to sleep if memory is not readily avalaible.
  1441. *
  1442. * We are protected from the conetxt disappearing by the get_fd()/put_fd()
  1443. * done in caller. Serialization of this function is ensured by caller.
  1444. */
  1445. ret = pfm_do_fasync(fd, filp, ctx, on);
  1446. DPRINT(("pfm_fasync called on ctx_fd=%d on=%d async_queue=%p ret=%d\n",
  1447. fd,
  1448. on,
  1449. ctx->ctx_async_queue, ret));
  1450. return ret;
  1451. }
  1452. #ifdef CONFIG_SMP
  1453. /*
  1454. * this function is exclusively called from pfm_close().
  1455. * The context is not protected at that time, nor are interrupts
  1456. * on the remote CPU. That's necessary to avoid deadlocks.
  1457. */
  1458. static void
  1459. pfm_syswide_force_stop(void *info)
  1460. {
  1461. pfm_context_t *ctx = (pfm_context_t *)info;
  1462. struct pt_regs *regs = task_pt_regs(current);
  1463. struct task_struct *owner;
  1464. unsigned long flags;
  1465. int ret;
  1466. if (ctx->ctx_cpu != smp_processor_id()) {
  1467. printk(KERN_ERR "perfmon: pfm_syswide_force_stop for CPU%d but on CPU%d\n",
  1468. ctx->ctx_cpu,
  1469. smp_processor_id());
  1470. return;
  1471. }
  1472. owner = GET_PMU_OWNER();
  1473. if (owner != ctx->ctx_task) {
  1474. printk(KERN_ERR "perfmon: pfm_syswide_force_stop CPU%d unexpected owner [%d] instead of [%d]\n",
  1475. smp_processor_id(),
  1476. owner->pid, ctx->ctx_task->pid);
  1477. return;
  1478. }
  1479. if (GET_PMU_CTX() != ctx) {
  1480. printk(KERN_ERR "perfmon: pfm_syswide_force_stop CPU%d unexpected ctx %p instead of %p\n",
  1481. smp_processor_id(),
  1482. GET_PMU_CTX(), ctx);
  1483. return;
  1484. }
  1485. DPRINT(("on CPU%d forcing system wide stop for [%d]\n", smp_processor_id(), ctx->ctx_task->pid));
  1486. /*
  1487. * the context is already protected in pfm_close(), we simply
  1488. * need to mask interrupts to avoid a PMU interrupt race on
  1489. * this CPU
  1490. */
  1491. local_irq_save(flags);
  1492. ret = pfm_context_unload(ctx, NULL, 0, regs);
  1493. if (ret) {
  1494. DPRINT(("context_unload returned %d\n", ret));
  1495. }
  1496. /*
  1497. * unmask interrupts, PMU interrupts are now spurious here
  1498. */
  1499. local_irq_restore(flags);
  1500. }
  1501. static void
  1502. pfm_syswide_cleanup_other_cpu(pfm_context_t *ctx)
  1503. {
  1504. int ret;
  1505. DPRINT(("calling CPU%d for cleanup\n", ctx->ctx_cpu));
  1506. ret = smp_call_function_single(ctx->ctx_cpu, pfm_syswide_force_stop, ctx, 0, 1);
  1507. DPRINT(("called CPU%d for cleanup ret=%d\n", ctx->ctx_cpu, ret));
  1508. }
  1509. #endif /* CONFIG_SMP */
  1510. /*
  1511. * called for each close(). Partially free resources.
  1512. * When caller is self-monitoring, the context is unloaded.
  1513. */
  1514. static int
  1515. pfm_flush(struct file *filp, fl_owner_t id)
  1516. {
  1517. pfm_context_t *ctx;
  1518. struct task_struct *task;
  1519. struct pt_regs *regs;
  1520. unsigned long flags;
  1521. unsigned long smpl_buf_size = 0UL;
  1522. void *smpl_buf_vaddr = NULL;
  1523. int state, is_system;
  1524. if (PFM_IS_FILE(filp) == 0) {
  1525. DPRINT(("bad magic for\n"));
  1526. return -EBADF;
  1527. }
  1528. ctx = (pfm_context_t *)filp->private_data;
  1529. if (ctx == NULL) {
  1530. printk(KERN_ERR "perfmon: pfm_flush: NULL ctx [%d]\n", current->pid);
  1531. return -EBADF;
  1532. }
  1533. /*
  1534. * remove our file from the async queue, if we use this mode.
  1535. * This can be done without the context being protected. We come
  1536. * here when the context has become unreacheable by other tasks.
  1537. *
  1538. * We may still have active monitoring at this point and we may
  1539. * end up in pfm_overflow_handler(). However, fasync_helper()
  1540. * operates with interrupts disabled and it cleans up the
  1541. * queue. If the PMU handler is called prior to entering
  1542. * fasync_helper() then it will send a signal. If it is
  1543. * invoked after, it will find an empty queue and no
  1544. * signal will be sent. In both case, we are safe
  1545. */
  1546. if (filp->f_flags & FASYNC) {
  1547. DPRINT(("cleaning up async_queue=%p\n", ctx->ctx_async_queue));
  1548. pfm_do_fasync (-1, filp, ctx, 0);
  1549. }
  1550. PROTECT_CTX(ctx, flags);
  1551. state = ctx->ctx_state;
  1552. is_system = ctx->ctx_fl_system;
  1553. task = PFM_CTX_TASK(ctx);
  1554. regs = task_pt_regs(task);
  1555. DPRINT(("ctx_state=%d is_current=%d\n",
  1556. state,
  1557. task == current ? 1 : 0));
  1558. /*
  1559. * if state == UNLOADED, then task is NULL
  1560. */
  1561. /*
  1562. * we must stop and unload because we are losing access to the context.
  1563. */
  1564. if (task == current) {
  1565. #ifdef CONFIG_SMP
  1566. /*
  1567. * the task IS the owner but it migrated to another CPU: that's bad
  1568. * but we must handle this cleanly. Unfortunately, the kernel does
  1569. * not provide a mechanism to block migration (while the context is loaded).
  1570. *
  1571. * We need to release the resource on the ORIGINAL cpu.
  1572. */
  1573. if (is_system && ctx->ctx_cpu != smp_processor_id()) {
  1574. DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
  1575. /*
  1576. * keep context protected but unmask interrupt for IPI
  1577. */
  1578. local_irq_restore(flags);
  1579. pfm_syswide_cleanup_other_cpu(ctx);
  1580. /*
  1581. * restore interrupt masking
  1582. */
  1583. local_irq_save(flags);
  1584. /*
  1585. * context is unloaded at this point
  1586. */
  1587. } else
  1588. #endif /* CONFIG_SMP */
  1589. {
  1590. DPRINT(("forcing unload\n"));
  1591. /*
  1592. * stop and unload, returning with state UNLOADED
  1593. * and session unreserved.
  1594. */
  1595. pfm_context_unload(ctx, NULL, 0, regs);
  1596. DPRINT(("ctx_state=%d\n", ctx->ctx_state));
  1597. }
  1598. }
  1599. /*
  1600. * remove virtual mapping, if any, for the calling task.
  1601. * cannot reset ctx field until last user is calling close().
  1602. *
  1603. * ctx_smpl_vaddr must never be cleared because it is needed
  1604. * by every task with access to the context
  1605. *
  1606. * When called from do_exit(), the mm context is gone already, therefore
  1607. * mm is NULL, i.e., the VMA is already gone and we do not have to
  1608. * do anything here
  1609. */
  1610. if (ctx->ctx_smpl_vaddr && current->mm) {
  1611. smpl_buf_vaddr = ctx->ctx_smpl_vaddr;
  1612. smpl_buf_size = ctx->ctx_smpl_size;
  1613. }
  1614. UNPROTECT_CTX(ctx, flags);
  1615. /*
  1616. * if there was a mapping, then we systematically remove it
  1617. * at this point. Cannot be done inside critical section
  1618. * because some VM function reenables interrupts.
  1619. *
  1620. */
  1621. if (smpl_buf_vaddr) pfm_remove_smpl_mapping(current, smpl_buf_vaddr, smpl_buf_size);
  1622. return 0;
  1623. }
  1624. /*
  1625. * called either on explicit close() or from exit_files().
  1626. * Only the LAST user of the file gets to this point, i.e., it is
  1627. * called only ONCE.
  1628. *
  1629. * IMPORTANT: we get called ONLY when the refcnt on the file gets to zero
  1630. * (fput()),i.e, last task to access the file. Nobody else can access the
  1631. * file at this point.
  1632. *
  1633. * When called from exit_files(), the VMA has been freed because exit_mm()
  1634. * is executed before exit_files().
  1635. *
  1636. * When called from exit_files(), the current task is not yet ZOMBIE but we
  1637. * flush the PMU state to the context.
  1638. */
  1639. static int
  1640. pfm_close(struct inode *inode, struct file *filp)
  1641. {
  1642. pfm_context_t *ctx;
  1643. struct task_struct *task;
  1644. struct pt_regs *regs;
  1645. DECLARE_WAITQUEUE(wait, current);
  1646. unsigned long flags;
  1647. unsigned long smpl_buf_size = 0UL;
  1648. void *smpl_buf_addr = NULL;
  1649. int free_possible = 1;
  1650. int state, is_system;
  1651. DPRINT(("pfm_close called private=%p\n", filp->private_data));
  1652. if (PFM_IS_FILE(filp) == 0) {
  1653. DPRINT(("bad magic\n"));
  1654. return -EBADF;
  1655. }
  1656. ctx = (pfm_context_t *)filp->private_data;
  1657. if (ctx == NULL) {
  1658. printk(KERN_ERR "perfmon: pfm_close: NULL ctx [%d]\n", current->pid);
  1659. return -EBADF;
  1660. }
  1661. PROTECT_CTX(ctx, flags);
  1662. state = ctx->ctx_state;
  1663. is_system = ctx->ctx_fl_system;
  1664. task = PFM_CTX_TASK(ctx);
  1665. regs = task_pt_regs(task);
  1666. DPRINT(("ctx_state=%d is_current=%d\n",
  1667. state,
  1668. task == current ? 1 : 0));
  1669. /*
  1670. * if task == current, then pfm_flush() unloaded the context
  1671. */
  1672. if (state == PFM_CTX_UNLOADED) goto doit;
  1673. /*
  1674. * context is loaded/masked and task != current, we need to
  1675. * either force an unload or go zombie
  1676. */
  1677. /*
  1678. * The task is currently blocked or will block after an overflow.
  1679. * we must force it to wakeup to get out of the
  1680. * MASKED state and transition to the unloaded state by itself.
  1681. *
  1682. * This situation is only possible for per-task mode
  1683. */
  1684. if (state == PFM_CTX_MASKED && CTX_OVFL_NOBLOCK(ctx) == 0) {
  1685. /*
  1686. * set a "partial" zombie state to be checked
  1687. * upon return from down() in pfm_handle_work().
  1688. *
  1689. * We cannot use the ZOMBIE state, because it is checked
  1690. * by pfm_load_regs() which is called upon wakeup from down().
  1691. * In such case, it would free the context and then we would
  1692. * return to pfm_handle_work() which would access the
  1693. * stale context. Instead, we set a flag invisible to pfm_load_regs()
  1694. * but visible to pfm_handle_work().
  1695. *
  1696. * For some window of time, we have a zombie context with
  1697. * ctx_state = MASKED and not ZOMBIE
  1698. */
  1699. ctx->ctx_fl_going_zombie = 1;
  1700. /*
  1701. * force task to wake up from MASKED state
  1702. */
  1703. complete(&ctx->ctx_restart_done);
  1704. DPRINT(("waking up ctx_state=%d\n", state));
  1705. /*
  1706. * put ourself to sleep waiting for the other
  1707. * task to report completion
  1708. *
  1709. * the context is protected by mutex, therefore there
  1710. * is no risk of being notified of completion before
  1711. * begin actually on the waitq.
  1712. */
  1713. set_current_state(TASK_INTERRUPTIBLE);
  1714. add_wait_queue(&ctx->ctx_zombieq, &wait);
  1715. UNPROTECT_CTX(ctx, flags);
  1716. /*
  1717. * XXX: check for signals :
  1718. * - ok for explicit close
  1719. * - not ok when coming from exit_files()
  1720. */
  1721. schedule();
  1722. PROTECT_CTX(ctx, flags);
  1723. remove_wait_queue(&ctx->ctx_zombieq, &wait);
  1724. set_current_state(TASK_RUNNING);
  1725. /*
  1726. * context is unloaded at this point
  1727. */
  1728. DPRINT(("after zombie wakeup ctx_state=%d for\n", state));
  1729. }
  1730. else if (task != current) {
  1731. #ifdef CONFIG_SMP
  1732. /*
  1733. * switch context to zombie state
  1734. */
  1735. ctx->ctx_state = PFM_CTX_ZOMBIE;
  1736. DPRINT(("zombie ctx for [%d]\n", task->pid));
  1737. /*
  1738. * cannot free the context on the spot. deferred until
  1739. * the task notices the ZOMBIE state
  1740. */
  1741. free_possible = 0;
  1742. #else
  1743. pfm_context_unload(ctx, NULL, 0, regs);
  1744. #endif
  1745. }
  1746. doit:
  1747. /* reload state, may have changed during opening of critical section */
  1748. state = ctx->ctx_state;
  1749. /*
  1750. * the context is still attached to a task (possibly current)
  1751. * we cannot destroy it right now
  1752. */
  1753. /*
  1754. * we must free the sampling buffer right here because
  1755. * we cannot rely on it being cleaned up later by the
  1756. * monitored task. It is not possible to free vmalloc'ed
  1757. * memory in pfm_load_regs(). Instead, we remove the buffer
  1758. * now. should there be subsequent PMU overflow originally
  1759. * meant for sampling, the will be converted to spurious
  1760. * and that's fine because the monitoring tools is gone anyway.
  1761. */
  1762. if (ctx->ctx_smpl_hdr) {
  1763. smpl_buf_addr = ctx->ctx_smpl_hdr;
  1764. smpl_buf_size = ctx->ctx_smpl_size;
  1765. /* no more sampling */
  1766. ctx->ctx_smpl_hdr = NULL;
  1767. ctx->ctx_fl_is_sampling = 0;
  1768. }
  1769. DPRINT(("ctx_state=%d free_possible=%d addr=%p size=%lu\n",
  1770. state,
  1771. free_possible,
  1772. smpl_buf_addr,
  1773. smpl_buf_size));
  1774. if (smpl_buf_addr) pfm_exit_smpl_buffer(ctx->ctx_buf_fmt);
  1775. /*
  1776. * UNLOADED that the session has already been unreserved.
  1777. */
  1778. if (state == PFM_CTX_ZOMBIE) {
  1779. pfm_unreserve_session(ctx, ctx->ctx_fl_system , ctx->ctx_cpu);
  1780. }
  1781. /*
  1782. * disconnect file descriptor from context must be done
  1783. * before we unlock.
  1784. */
  1785. filp->private_data = NULL;
  1786. /*
  1787. * if we free on the spot, the context is now completely unreacheable
  1788. * from the callers side. The monitored task side is also cut, so we
  1789. * can freely cut.
  1790. *
  1791. * If we have a deferred free, only the caller side is disconnected.
  1792. */
  1793. UNPROTECT_CTX(ctx, flags);
  1794. /*
  1795. * All memory free operations (especially for vmalloc'ed memory)
  1796. * MUST be done with interrupts ENABLED.
  1797. */
  1798. if (smpl_buf_addr) pfm_rvfree(smpl_buf_addr, smpl_buf_size);
  1799. /*
  1800. * return the memory used by the context
  1801. */
  1802. if (free_possible) pfm_context_free(ctx);
  1803. return 0;
  1804. }
  1805. static int
  1806. pfm_no_open(struct inode *irrelevant, struct file *dontcare)
  1807. {
  1808. DPRINT(("pfm_no_open called\n"));
  1809. return -ENXIO;
  1810. }
  1811. static const struct file_operations pfm_file_ops = {
  1812. .llseek = no_llseek,
  1813. .read = pfm_read,
  1814. .write = pfm_write,
  1815. .poll = pfm_poll,
  1816. .ioctl = pfm_ioctl,
  1817. .open = pfm_no_open, /* special open code to disallow open via /proc */
  1818. .fasync = pfm_fasync,
  1819. .release = pfm_close,
  1820. .flush = pfm_flush
  1821. };
  1822. static int
  1823. pfmfs_delete_dentry(struct dentry *dentry)
  1824. {
  1825. return 1;
  1826. }
  1827. static struct dentry_operations pfmfs_dentry_operations = {
  1828. .d_delete = pfmfs_delete_dentry,
  1829. };
  1830. static int
  1831. pfm_alloc_fd(struct file **cfile)
  1832. {
  1833. int fd, ret = 0;
  1834. struct file *file = NULL;
  1835. struct inode * inode;
  1836. char name[32];
  1837. struct qstr this;
  1838. fd = get_unused_fd();
  1839. if (fd < 0) return -ENFILE;
  1840. ret = -ENFILE;
  1841. file = get_empty_filp();
  1842. if (!file) goto out;
  1843. /*
  1844. * allocate a new inode
  1845. */
  1846. inode = new_inode(pfmfs_mnt->mnt_sb);
  1847. if (!inode) goto out;
  1848. DPRINT(("new inode ino=%ld @%p\n", inode->i_ino, inode));
  1849. inode->i_mode = S_IFCHR|S_IRUGO;
  1850. inode->i_uid = current->fsuid;
  1851. inode->i_gid = current->fsgid;
  1852. sprintf(name, "[%lu]", inode->i_ino);
  1853. this.name = name;
  1854. this.len = strlen(name);
  1855. this.hash = inode->i_ino;
  1856. ret = -ENOMEM;
  1857. /*
  1858. * allocate a new dcache entry
  1859. */
  1860. file->f_path.dentry = d_alloc(pfmfs_mnt->mnt_sb->s_root, &this);
  1861. if (!file->f_path.dentry) goto out;
  1862. file->f_path.dentry->d_op = &pfmfs_dentry_operations;
  1863. d_add(file->f_path.dentry, inode);
  1864. file->f_path.mnt = mntget(pfmfs_mnt);
  1865. file->f_mapping = inode->i_mapping;
  1866. file->f_op = &pfm_file_ops;
  1867. file->f_mode = FMODE_READ;
  1868. file->f_flags = O_RDONLY;
  1869. file->f_pos = 0;
  1870. /*
  1871. * may have to delay until context is attached?
  1872. */
  1873. fd_install(fd, file);
  1874. /*
  1875. * the file structure we will use
  1876. */
  1877. *cfile = file;
  1878. return fd;
  1879. out:
  1880. if (file) put_filp(file);
  1881. put_unused_fd(fd);
  1882. return ret;
  1883. }
  1884. static void
  1885. pfm_free_fd(int fd, struct file *file)
  1886. {
  1887. struct files_struct *files = current->files;
  1888. struct fdtable *fdt;
  1889. /*
  1890. * there ie no fd_uninstall(), so we do it here
  1891. */
  1892. spin_lock(&files->file_lock);
  1893. fdt = files_fdtable(files);
  1894. rcu_assign_pointer(fdt->fd[fd], NULL);
  1895. spin_unlock(&files->file_lock);
  1896. if (file)
  1897. put_filp(file);
  1898. put_unused_fd(fd);
  1899. }
  1900. static int
  1901. pfm_remap_buffer(struct vm_area_struct *vma, unsigned long buf, unsigned long addr, unsigned long size)
  1902. {
  1903. DPRINT(("CPU%d buf=0x%lx addr=0x%lx size=%ld\n", smp_processor_id(), buf, addr, size));
  1904. while (size > 0) {
  1905. unsigned long pfn = ia64_tpa(buf) >> PAGE_SHIFT;
  1906. if (remap_pfn_range(vma, addr, pfn, PAGE_SIZE, PAGE_READONLY))
  1907. return -ENOMEM;
  1908. addr += PAGE_SIZE;
  1909. buf += PAGE_SIZE;
  1910. size -= PAGE_SIZE;
  1911. }
  1912. return 0;
  1913. }
  1914. /*
  1915. * allocate a sampling buffer and remaps it into the user address space of the task
  1916. */
  1917. static int
  1918. pfm_smpl_buffer_alloc(struct task_struct *task, pfm_context_t *ctx, unsigned long rsize, void **user_vaddr)
  1919. {
  1920. struct mm_struct *mm = task->mm;
  1921. struct vm_area_struct *vma = NULL;
  1922. unsigned long size;
  1923. void *smpl_buf;
  1924. /*
  1925. * the fixed header + requested size and align to page boundary
  1926. */
  1927. size = PAGE_ALIGN(rsize);
  1928. DPRINT(("sampling buffer rsize=%lu size=%lu bytes\n", rsize, size));
  1929. /*
  1930. * check requested size to avoid Denial-of-service attacks
  1931. * XXX: may have to refine this test
  1932. * Check against address space limit.
  1933. *
  1934. * if ((mm->total_vm << PAGE_SHIFT) + len> task->rlim[RLIMIT_AS].rlim_cur)
  1935. * return -ENOMEM;
  1936. */
  1937. if (size > task->signal->rlim[RLIMIT_MEMLOCK].rlim_cur)
  1938. return -ENOMEM;
  1939. /*
  1940. * We do the easy to undo allocations first.
  1941. *
  1942. * pfm_rvmalloc(), clears the buffer, so there is no leak
  1943. */
  1944. smpl_buf = pfm_rvmalloc(size);
  1945. if (smpl_buf == NULL) {
  1946. DPRINT(("Can't allocate sampling buffer\n"));
  1947. return -ENOMEM;
  1948. }
  1949. DPRINT(("smpl_buf @%p\n", smpl_buf));
  1950. /* allocate vma */
  1951. vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
  1952. if (!vma) {
  1953. DPRINT(("Cannot allocate vma\n"));
  1954. goto error_kmem;
  1955. }
  1956. /*
  1957. * partially initialize the vma for the sampling buffer
  1958. */
  1959. vma->vm_mm = mm;
  1960. vma->vm_flags = VM_READ| VM_MAYREAD |VM_RESERVED;
  1961. vma->vm_page_prot = PAGE_READONLY; /* XXX may need to change */
  1962. /*
  1963. * Now we have everything we need and we can initialize
  1964. * and connect all the data structures
  1965. */
  1966. ctx->ctx_smpl_hdr = smpl_buf;
  1967. ctx->ctx_smpl_size = size; /* aligned size */
  1968. /*
  1969. * Let's do the difficult operations next.
  1970. *
  1971. * now we atomically find some area in the address space and
  1972. * remap the buffer in it.
  1973. */
  1974. down_write(&task->mm->mmap_sem);
  1975. /* find some free area in address space, must have mmap sem held */
  1976. vma->vm_start = pfm_get_unmapped_area(NULL, 0, size, 0, MAP_PRIVATE|MAP_ANONYMOUS, 0);
  1977. if (vma->vm_start == 0UL) {
  1978. DPRINT(("Cannot find unmapped area for size %ld\n", size));
  1979. up_write(&task->mm->mmap_sem);
  1980. goto error;
  1981. }
  1982. vma->vm_end = vma->vm_start + size;
  1983. vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT;
  1984. DPRINT(("aligned size=%ld, hdr=%p mapped @0x%lx\n", size, ctx->ctx_smpl_hdr, vma->vm_start));
  1985. /* can only be applied to current task, need to have the mm semaphore held when called */
  1986. if (pfm_remap_buffer(vma, (unsigned long)smpl_buf, vma->vm_start, size)) {
  1987. DPRINT(("Can't remap buffer\n"));
  1988. up_write(&task->mm->mmap_sem);
  1989. goto error;
  1990. }
  1991. /*
  1992. * now insert the vma in the vm list for the process, must be
  1993. * done with mmap lock held
  1994. */
  1995. insert_vm_struct(mm, vma);
  1996. mm->total_vm += size >> PAGE_SHIFT;
  1997. vm_stat_account(vma->vm_mm, vma->vm_flags, vma->vm_file,
  1998. vma_pages(vma));
  1999. up_write(&task->mm->mmap_sem);
  2000. /*
  2001. * keep track of user level virtual address
  2002. */
  2003. ctx->ctx_smpl_vaddr = (void *)vma->vm_start;
  2004. *(unsigned long *)user_vaddr = vma->vm_start;
  2005. return 0;
  2006. error:
  2007. kmem_cache_free(vm_area_cachep, vma);
  2008. error_kmem:
  2009. pfm_rvfree(smpl_buf, size);
  2010. return -ENOMEM;
  2011. }
  2012. /*
  2013. * XXX: do something better here
  2014. */
  2015. static int
  2016. pfm_bad_permissions(struct task_struct *task)
  2017. {
  2018. /* inspired by ptrace_attach() */
  2019. DPRINT(("cur: uid=%d gid=%d task: euid=%d suid=%d uid=%d egid=%d sgid=%d\n",
  2020. current->uid,
  2021. current->gid,
  2022. task->euid,
  2023. task->suid,
  2024. task->uid,
  2025. task->egid,
  2026. task->sgid));
  2027. return ((current->uid != task->euid)
  2028. || (current->uid != task->suid)
  2029. || (current->uid != task->uid)
  2030. || (current->gid != task->egid)
  2031. || (current->gid != task->sgid)
  2032. || (current->gid != task->gid)) && !capable(CAP_SYS_PTRACE);
  2033. }
  2034. static int
  2035. pfarg_is_sane(struct task_struct *task, pfarg_context_t *pfx)
  2036. {
  2037. int ctx_flags;
  2038. /* valid signal */
  2039. ctx_flags = pfx->ctx_flags;
  2040. if (ctx_flags & PFM_FL_SYSTEM_WIDE) {
  2041. /*
  2042. * cannot block in this mode
  2043. */
  2044. if (ctx_flags & PFM_FL_NOTIFY_BLOCK) {
  2045. DPRINT(("cannot use blocking mode when in system wide monitoring\n"));
  2046. return -EINVAL;
  2047. }
  2048. } else {
  2049. }
  2050. /* probably more to add here */
  2051. return 0;
  2052. }
  2053. static int
  2054. pfm_setup_buffer_fmt(struct task_struct *task, pfm_context_t *ctx, unsigned int ctx_flags,
  2055. unsigned int cpu, pfarg_context_t *arg)
  2056. {
  2057. pfm_buffer_fmt_t *fmt = NULL;
  2058. unsigned long size = 0UL;
  2059. void *uaddr = NULL;
  2060. void *fmt_arg = NULL;
  2061. int ret = 0;
  2062. #define PFM_CTXARG_BUF_ARG(a) (pfm_buffer_fmt_t *)(a+1)
  2063. /* invoke and lock buffer format, if found */
  2064. fmt = pfm_find_buffer_fmt(arg->ctx_smpl_buf_id);
  2065. if (fmt == NULL) {
  2066. DPRINT(("[%d] cannot find buffer format\n", task->pid));
  2067. return -EINVAL;
  2068. }
  2069. /*
  2070. * buffer argument MUST be contiguous to pfarg_context_t
  2071. */
  2072. if (fmt->fmt_arg_size) fmt_arg = PFM_CTXARG_BUF_ARG(arg);
  2073. ret = pfm_buf_fmt_validate(fmt, task, ctx_flags, cpu, fmt_arg);
  2074. DPRINT(("[%d] after validate(0x%x,%d,%p)=%d\n", task->pid, ctx_flags, cpu, fmt_arg, ret));
  2075. if (ret) goto error;
  2076. /* link buffer format and context */
  2077. ctx->ctx_buf_fmt = fmt;
  2078. /*
  2079. * check if buffer format wants to use perfmon buffer allocation/mapping service
  2080. */
  2081. ret = pfm_buf_fmt_getsize(fmt, task, ctx_flags, cpu, fmt_arg, &size);
  2082. if (ret) goto error;
  2083. if (size) {
  2084. /*
  2085. * buffer is always remapped into the caller's address space
  2086. */
  2087. ret = pfm_smpl_buffer_alloc(current, ctx, size, &uaddr);
  2088. if (ret) goto error;
  2089. /* keep track of user address of buffer */
  2090. arg->ctx_smpl_vaddr = uaddr;
  2091. }
  2092. ret = pfm_buf_fmt_init(fmt, task, ctx->ctx_smpl_hdr, ctx_flags, cpu, fmt_arg);
  2093. error:
  2094. return ret;
  2095. }
  2096. static void
  2097. pfm_reset_pmu_state(pfm_context_t *ctx)
  2098. {
  2099. int i;
  2100. /*
  2101. * install reset values for PMC.
  2102. */
  2103. for (i=1; PMC_IS_LAST(i) == 0; i++) {
  2104. if (PMC_IS_IMPL(i) == 0) continue;
  2105. ctx->ctx_pmcs[i] = PMC_DFL_VAL(i);
  2106. DPRINT(("pmc[%d]=0x%lx\n", i, ctx->ctx_pmcs[i]));
  2107. }
  2108. /*
  2109. * PMD registers are set to 0UL when the context in memset()
  2110. */
  2111. /*
  2112. * On context switched restore, we must restore ALL pmc and ALL pmd even
  2113. * when they are not actively used by the task. In UP, the incoming process
  2114. * may otherwise pick up left over PMC, PMD state from the previous process.
  2115. * As opposed to PMD, stale PMC can cause harm to the incoming
  2116. * process because they may change what is being measured.
  2117. * Therefore, we must systematically reinstall the entire
  2118. * PMC state. In SMP, the same thing is possible on the
  2119. * same CPU but also on between 2 CPUs.
  2120. *
  2121. * The problem with PMD is information leaking especially
  2122. * to user level when psr.sp=0
  2123. *
  2124. * There is unfortunately no easy way to avoid this problem
  2125. * on either UP or SMP. This definitively slows down the
  2126. * pfm_load_regs() function.
  2127. */
  2128. /*
  2129. * bitmask of all PMCs accessible to this context
  2130. *
  2131. * PMC0 is treated differently.
  2132. */
  2133. ctx->ctx_all_pmcs[0] = pmu_conf->impl_pmcs[0] & ~0x1;
  2134. /*
  2135. * bitmask of all PMDs that are accesible to this context
  2136. */
  2137. ctx->ctx_all_pmds[0] = pmu_conf->impl_pmds[0];
  2138. DPRINT(("<%d> all_pmcs=0x%lx all_pmds=0x%lx\n", ctx->ctx_fd, ctx->ctx_all_pmcs[0],ctx->ctx_all_pmds[0]));
  2139. /*
  2140. * useful in case of re-enable after disable
  2141. */
  2142. ctx->ctx_used_ibrs[0] = 0UL;
  2143. ctx->ctx_used_dbrs[0] = 0UL;
  2144. }
  2145. static int
  2146. pfm_ctx_getsize(void *arg, size_t *sz)
  2147. {
  2148. pfarg_context_t *req = (pfarg_context_t *)arg;
  2149. pfm_buffer_fmt_t *fmt;
  2150. *sz = 0;
  2151. if (!pfm_uuid_cmp(req->ctx_smpl_buf_id, pfm_null_uuid)) return 0;
  2152. fmt = pfm_find_buffer_fmt(req->ctx_smpl_buf_id);
  2153. if (fmt == NULL) {
  2154. DPRINT(("cannot find buffer format\n"));
  2155. return -EINVAL;
  2156. }
  2157. /* get just enough to copy in user parameters */
  2158. *sz = fmt->fmt_arg_size;
  2159. DPRINT(("arg_size=%lu\n", *sz));
  2160. return 0;
  2161. }
  2162. /*
  2163. * cannot attach if :
  2164. * - kernel task
  2165. * - task not owned by caller
  2166. * - task incompatible with context mode
  2167. */
  2168. static int
  2169. pfm_task_incompatible(pfm_context_t *ctx, struct task_struct *task)
  2170. {
  2171. /*
  2172. * no kernel task or task not owner by caller
  2173. */
  2174. if (task->mm == NULL) {
  2175. DPRINT(("task [%d] has not memory context (kernel thread)\n", task->pid));
  2176. return -EPERM;
  2177. }
  2178. if (pfm_bad_permissions(task)) {
  2179. DPRINT(("no permission to attach to [%d]\n", task->pid));
  2180. return -EPERM;
  2181. }
  2182. /*
  2183. * cannot block in self-monitoring mode
  2184. */
  2185. if (CTX_OVFL_NOBLOCK(ctx) == 0 && task == current) {
  2186. DPRINT(("cannot load a blocking context on self for [%d]\n", task->pid));
  2187. return -EINVAL;
  2188. }
  2189. if (task->exit_state == EXIT_ZOMBIE) {
  2190. DPRINT(("cannot attach to zombie task [%d]\n", task->pid));
  2191. return -EBUSY;
  2192. }
  2193. /*
  2194. * always ok for self
  2195. */
  2196. if (task == current) return 0;
  2197. if ((task->state != TASK_STOPPED) && (task->state != TASK_TRACED)) {
  2198. DPRINT(("cannot attach to non-stopped task [%d] state=%ld\n", task->pid, task->state));
  2199. return -EBUSY;
  2200. }
  2201. /*
  2202. * make sure the task is off any CPU
  2203. */
  2204. wait_task_inactive(task);
  2205. /* more to come... */
  2206. return 0;
  2207. }
  2208. static int
  2209. pfm_get_task(pfm_context_t *ctx, pid_t pid, struct task_struct **task)
  2210. {
  2211. struct task_struct *p = current;
  2212. int ret;
  2213. /* XXX: need to add more checks here */
  2214. if (pid < 2) return -EPERM;
  2215. if (pid != current->pid) {
  2216. read_lock(&tasklist_lock);
  2217. p = find_task_by_pid(pid);
  2218. /* make sure task cannot go away while we operate on it */
  2219. if (p) get_task_struct(p);
  2220. read_unlock(&tasklist_lock);
  2221. if (p == NULL) return -ESRCH;
  2222. }
  2223. ret = pfm_task_incompatible(ctx, p);
  2224. if (ret == 0) {
  2225. *task = p;
  2226. } else if (p != current) {
  2227. pfm_put_task(p);
  2228. }
  2229. return ret;
  2230. }
  2231. static int
  2232. pfm_context_create(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
  2233. {
  2234. pfarg_context_t *req = (pfarg_context_t *)arg;
  2235. struct file *filp;
  2236. int ctx_flags;
  2237. int ret;
  2238. /* let's check the arguments first */
  2239. ret = pfarg_is_sane(current, req);
  2240. if (ret < 0) return ret;
  2241. ctx_flags = req->ctx_flags;
  2242. ret = -ENOMEM;
  2243. ctx = pfm_context_alloc();
  2244. if (!ctx) goto error;
  2245. ret = pfm_alloc_fd(&filp);
  2246. if (ret < 0) goto error_file;
  2247. req->ctx_fd = ctx->ctx_fd = ret;
  2248. /*
  2249. * attach context to file
  2250. */
  2251. filp->private_data = ctx;
  2252. /*
  2253. * does the user want to sample?
  2254. */
  2255. if (pfm_uuid_cmp(req->ctx_smpl_buf_id, pfm_null_uuid)) {
  2256. ret = pfm_setup_buffer_fmt(current, ctx, ctx_flags, 0, req);
  2257. if (ret) goto buffer_error;
  2258. }
  2259. /*
  2260. * init context protection lock
  2261. */
  2262. spin_lock_init(&ctx->ctx_lock);
  2263. /*
  2264. * context is unloaded
  2265. */
  2266. ctx->ctx_state = PFM_CTX_UNLOADED;
  2267. /*
  2268. * initialization of context's flags
  2269. */
  2270. ctx->ctx_fl_block = (ctx_flags & PFM_FL_NOTIFY_BLOCK) ? 1 : 0;
  2271. ctx->ctx_fl_system = (ctx_flags & PFM_FL_SYSTEM_WIDE) ? 1: 0;
  2272. ctx->ctx_fl_is_sampling = ctx->ctx_buf_fmt ? 1 : 0; /* assume record() is defined */
  2273. ctx->ctx_fl_no_msg = (ctx_flags & PFM_FL_OVFL_NO_MSG) ? 1: 0;
  2274. /*
  2275. * will move to set properties
  2276. * ctx->ctx_fl_excl_idle = (ctx_flags & PFM_FL_EXCL_IDLE) ? 1: 0;
  2277. */
  2278. /*
  2279. * init restart semaphore to locked
  2280. */
  2281. init_completion(&ctx->ctx_restart_done);
  2282. /*
  2283. * activation is used in SMP only
  2284. */
  2285. ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;
  2286. SET_LAST_CPU(ctx, -1);
  2287. /*
  2288. * initialize notification message queue
  2289. */
  2290. ctx->ctx_msgq_head = ctx->ctx_msgq_tail = 0;
  2291. init_waitqueue_head(&ctx->ctx_msgq_wait);
  2292. init_waitqueue_head(&ctx->ctx_zombieq);
  2293. DPRINT(("ctx=%p flags=0x%x system=%d notify_block=%d excl_idle=%d no_msg=%d ctx_fd=%d \n",
  2294. ctx,
  2295. ctx_flags,
  2296. ctx->ctx_fl_system,
  2297. ctx->ctx_fl_block,
  2298. ctx->ctx_fl_excl_idle,
  2299. ctx->ctx_fl_no_msg,
  2300. ctx->ctx_fd));
  2301. /*
  2302. * initialize soft PMU state
  2303. */
  2304. pfm_reset_pmu_state(ctx);
  2305. return 0;
  2306. buffer_error:
  2307. pfm_free_fd(ctx->ctx_fd, filp);
  2308. if (ctx->ctx_buf_fmt) {
  2309. pfm_buf_fmt_exit(ctx->ctx_buf_fmt, current, NULL, regs);
  2310. }
  2311. error_file:
  2312. pfm_context_free(ctx);
  2313. error:
  2314. return ret;
  2315. }
  2316. static inline unsigned long
  2317. pfm_new_counter_value (pfm_counter_t *reg, int is_long_reset)
  2318. {
  2319. unsigned long val = is_long_reset ? reg->long_reset : reg->short_reset;
  2320. unsigned long new_seed, old_seed = reg->seed, mask = reg->mask;
  2321. extern unsigned long carta_random32 (unsigned long seed);
  2322. if (reg->flags & PFM_REGFL_RANDOM) {
  2323. new_seed = carta_random32(old_seed);
  2324. val -= (old_seed & mask); /* counter values are negative numbers! */
  2325. if ((mask >> 32) != 0)
  2326. /* construct a full 64-bit random value: */
  2327. new_seed |= carta_random32(old_seed >> 32) << 32;
  2328. reg->seed = new_seed;
  2329. }
  2330. reg->lval = val;
  2331. return val;
  2332. }
  2333. static void
  2334. pfm_reset_regs_masked(pfm_context_t *ctx, unsigned long *ovfl_regs, int is_long_reset)
  2335. {
  2336. unsigned long mask = ovfl_regs[0];
  2337. unsigned long reset_others = 0UL;
  2338. unsigned long val;
  2339. int i;
  2340. /*
  2341. * now restore reset value on sampling overflowed counters
  2342. */
  2343. mask >>= PMU_FIRST_COUNTER;
  2344. for(i = PMU_FIRST_COUNTER; mask; i++, mask >>= 1) {
  2345. if ((mask & 0x1UL) == 0UL) continue;
  2346. ctx->ctx_pmds[i].val = val = pfm_new_counter_value(ctx->ctx_pmds+ i, is_long_reset);
  2347. reset_others |= ctx->ctx_pmds[i].reset_pmds[0];
  2348. DPRINT_ovfl((" %s reset ctx_pmds[%d]=%lx\n", is_long_reset ? "long" : "short", i, val));
  2349. }
  2350. /*
  2351. * Now take care of resetting the other registers
  2352. */
  2353. for(i = 0; reset_others; i++, reset_others >>= 1) {
  2354. if ((reset_others & 0x1) == 0) continue;
  2355. ctx->ctx_pmds[i].val = val = pfm_new_counter_value(ctx->ctx_pmds + i, is_long_reset);
  2356. DPRINT_ovfl(("%s reset_others pmd[%d]=%lx\n",
  2357. is_long_reset ? "long" : "short", i, val));
  2358. }
  2359. }
  2360. static void
  2361. pfm_reset_regs(pfm_context_t *ctx, unsigned long *ovfl_regs, int is_long_reset)
  2362. {
  2363. unsigned long mask = ovfl_regs[0];
  2364. unsigned long reset_others = 0UL;
  2365. unsigned long val;
  2366. int i;
  2367. DPRINT_ovfl(("ovfl_regs=0x%lx is_long_reset=%d\n", ovfl_regs[0], is_long_reset));
  2368. if (ctx->ctx_state == PFM_CTX_MASKED) {
  2369. pfm_reset_regs_masked(ctx, ovfl_regs, is_long_reset);
  2370. return;
  2371. }
  2372. /*
  2373. * now restore reset value on sampling overflowed counters
  2374. */
  2375. mask >>= PMU_FIRST_COUNTER;
  2376. for(i = PMU_FIRST_COUNTER; mask; i++, mask >>= 1) {
  2377. if ((mask & 0x1UL) == 0UL) continue;
  2378. val = pfm_new_counter_value(ctx->ctx_pmds+ i, is_long_reset);
  2379. reset_others |= ctx->ctx_pmds[i].reset_pmds[0];
  2380. DPRINT_ovfl((" %s reset ctx_pmds[%d]=%lx\n", is_long_reset ? "long" : "short", i, val));
  2381. pfm_write_soft_counter(ctx, i, val);
  2382. }
  2383. /*
  2384. * Now take care of resetting the other registers
  2385. */
  2386. for(i = 0; reset_others; i++, reset_others >>= 1) {
  2387. if ((reset_others & 0x1) == 0) continue;
  2388. val = pfm_new_counter_value(ctx->ctx_pmds + i, is_long_reset);
  2389. if (PMD_IS_COUNTING(i)) {
  2390. pfm_write_soft_counter(ctx, i, val);
  2391. } else {
  2392. ia64_set_pmd(i, val);
  2393. }
  2394. DPRINT_ovfl(("%s reset_others pmd[%d]=%lx\n",
  2395. is_long_reset ? "long" : "short", i, val));
  2396. }
  2397. ia64_srlz_d();
  2398. }
  2399. static int
  2400. pfm_write_pmcs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
  2401. {
  2402. struct task_struct *task;
  2403. pfarg_reg_t *req = (pfarg_reg_t *)arg;
  2404. unsigned long value, pmc_pm;
  2405. unsigned long smpl_pmds, reset_pmds, impl_pmds;
  2406. unsigned int cnum, reg_flags, flags, pmc_type;
  2407. int i, can_access_pmu = 0, is_loaded, is_system, expert_mode;
  2408. int is_monitor, is_counting, state;
  2409. int ret = -EINVAL;
  2410. pfm_reg_check_t wr_func;
  2411. #define PFM_CHECK_PMC_PM(x, y, z) ((x)->ctx_fl_system ^ PMC_PM(y, z))
  2412. state = ctx->ctx_state;
  2413. is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
  2414. is_system = ctx->ctx_fl_system;
  2415. task = ctx->ctx_task;
  2416. impl_pmds = pmu_conf->impl_pmds[0];
  2417. if (state == PFM_CTX_ZOMBIE) return -EINVAL;
  2418. if (is_loaded) {
  2419. /*
  2420. * In system wide and when the context is loaded, access can only happen
  2421. * when the caller is running on the CPU being monitored by the session.
  2422. * It does not have to be the owner (ctx_task) of the context per se.
  2423. */
  2424. if (is_system && ctx->ctx_cpu != smp_processor_id()) {
  2425. DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
  2426. return -EBUSY;
  2427. }
  2428. can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
  2429. }
  2430. expert_mode = pfm_sysctl.expert_mode;
  2431. for (i = 0; i < count; i++, req++) {
  2432. cnum = req->reg_num;
  2433. reg_flags = req->reg_flags;
  2434. value = req->reg_value;
  2435. smpl_pmds = req->reg_smpl_pmds[0];
  2436. reset_pmds = req->reg_reset_pmds[0];
  2437. flags = 0;
  2438. if (cnum >= PMU_MAX_PMCS) {
  2439. DPRINT(("pmc%u is invalid\n", cnum));
  2440. goto error;
  2441. }
  2442. pmc_type = pmu_conf->pmc_desc[cnum].type;
  2443. pmc_pm = (value >> pmu_conf->pmc_desc[cnum].pm_pos) & 0x1;
  2444. is_counting = (pmc_type & PFM_REG_COUNTING) == PFM_REG_COUNTING ? 1 : 0;
  2445. is_monitor = (pmc_type & PFM_REG_MONITOR) == PFM_REG_MONITOR ? 1 : 0;
  2446. /*
  2447. * we reject all non implemented PMC as well
  2448. * as attempts to modify PMC[0-3] which are used
  2449. * as status registers by the PMU
  2450. */
  2451. if ((pmc_type & PFM_REG_IMPL) == 0 || (pmc_type & PFM_REG_CONTROL) == PFM_REG_CONTROL) {
  2452. DPRINT(("pmc%u is unimplemented or no-access pmc_type=%x\n", cnum, pmc_type));
  2453. goto error;
  2454. }
  2455. wr_func = pmu_conf->pmc_desc[cnum].write_check;
  2456. /*
  2457. * If the PMC is a monitor, then if the value is not the default:
  2458. * - system-wide session: PMCx.pm=1 (privileged monitor)
  2459. * - per-task : PMCx.pm=0 (user monitor)
  2460. */
  2461. if (is_monitor && value != PMC_DFL_VAL(cnum) && is_system ^ pmc_pm) {
  2462. DPRINT(("pmc%u pmc_pm=%lu is_system=%d\n",
  2463. cnum,
  2464. pmc_pm,
  2465. is_system));
  2466. goto error;
  2467. }
  2468. if (is_counting) {
  2469. /*
  2470. * enforce generation of overflow interrupt. Necessary on all
  2471. * CPUs.
  2472. */
  2473. value |= 1 << PMU_PMC_OI;
  2474. if (reg_flags & PFM_REGFL_OVFL_NOTIFY) {
  2475. flags |= PFM_REGFL_OVFL_NOTIFY;
  2476. }
  2477. if (reg_flags & PFM_REGFL_RANDOM) flags |= PFM_REGFL_RANDOM;
  2478. /* verify validity of smpl_pmds */
  2479. if ((smpl_pmds & impl_pmds) != smpl_pmds) {
  2480. DPRINT(("invalid smpl_pmds 0x%lx for pmc%u\n", smpl_pmds, cnum));
  2481. goto error;
  2482. }
  2483. /* verify validity of reset_pmds */
  2484. if ((reset_pmds & impl_pmds) != reset_pmds) {
  2485. DPRINT(("invalid reset_pmds 0x%lx for pmc%u\n", reset_pmds, cnum));
  2486. goto error;
  2487. }
  2488. } else {
  2489. if (reg_flags & (PFM_REGFL_OVFL_NOTIFY|PFM_REGFL_RANDOM)) {
  2490. DPRINT(("cannot set ovfl_notify or random on pmc%u\n", cnum));
  2491. goto error;
  2492. }
  2493. /* eventid on non-counting monitors are ignored */
  2494. }
  2495. /*
  2496. * execute write checker, if any
  2497. */
  2498. if (likely(expert_mode == 0 && wr_func)) {
  2499. ret = (*wr_func)(task, ctx, cnum, &value, regs);
  2500. if (ret) goto error;
  2501. ret = -EINVAL;
  2502. }
  2503. /*
  2504. * no error on this register
  2505. */
  2506. PFM_REG_RETFLAG_SET(req->reg_flags, 0);
  2507. /*
  2508. * Now we commit the changes to the software state
  2509. */
  2510. /*
  2511. * update overflow information
  2512. */
  2513. if (is_counting) {
  2514. /*
  2515. * full flag update each time a register is programmed
  2516. */
  2517. ctx->ctx_pmds[cnum].flags = flags;
  2518. ctx->ctx_pmds[cnum].reset_pmds[0] = reset_pmds;
  2519. ctx->ctx_pmds[cnum].smpl_pmds[0] = smpl_pmds;
  2520. ctx->ctx_pmds[cnum].eventid = req->reg_smpl_eventid;
  2521. /*
  2522. * Mark all PMDS to be accessed as used.
  2523. *
  2524. * We do not keep track of PMC because we have to
  2525. * systematically restore ALL of them.
  2526. *
  2527. * We do not update the used_monitors mask, because
  2528. * if we have not programmed them, then will be in
  2529. * a quiescent state, therefore we will not need to
  2530. * mask/restore then when context is MASKED.
  2531. */
  2532. CTX_USED_PMD(ctx, reset_pmds);
  2533. CTX_USED_PMD(ctx, smpl_pmds);
  2534. /*
  2535. * make sure we do not try to reset on
  2536. * restart because we have established new values
  2537. */
  2538. if (state == PFM_CTX_MASKED) ctx->ctx_ovfl_regs[0] &= ~1UL << cnum;
  2539. }
  2540. /*
  2541. * Needed in case the user does not initialize the equivalent
  2542. * PMD. Clearing is done indirectly via pfm_reset_pmu_state() so there is no
  2543. * possible leak here.
  2544. */
  2545. CTX_USED_PMD(ctx, pmu_conf->pmc_desc[cnum].dep_pmd[0]);
  2546. /*
  2547. * keep track of the monitor PMC that we are using.
  2548. * we save the value of the pmc in ctx_pmcs[] and if
  2549. * the monitoring is not stopped for the context we also
  2550. * place it in the saved state area so that it will be
  2551. * picked up later by the context switch code.
  2552. *
  2553. * The value in ctx_pmcs[] can only be changed in pfm_write_pmcs().
  2554. *
  2555. * The value in th_pmcs[] may be modified on overflow, i.e., when
  2556. * monitoring needs to be stopped.
  2557. */
  2558. if (is_monitor) CTX_USED_MONITOR(ctx, 1UL << cnum);
  2559. /*
  2560. * update context state
  2561. */
  2562. ctx->ctx_pmcs[cnum] = value;
  2563. if (is_loaded) {
  2564. /*
  2565. * write thread state
  2566. */
  2567. if (is_system == 0) ctx->th_pmcs[cnum] = value;
  2568. /*
  2569. * write hardware register if we can
  2570. */
  2571. if (can_access_pmu) {
  2572. ia64_set_pmc(cnum, value);
  2573. }
  2574. #ifdef CONFIG_SMP
  2575. else {
  2576. /*
  2577. * per-task SMP only here
  2578. *
  2579. * we are guaranteed that the task is not running on the other CPU,
  2580. * we indicate that this PMD will need to be reloaded if the task
  2581. * is rescheduled on the CPU it ran last on.
  2582. */
  2583. ctx->ctx_reload_pmcs[0] |= 1UL << cnum;
  2584. }
  2585. #endif
  2586. }
  2587. DPRINT(("pmc[%u]=0x%lx ld=%d apmu=%d flags=0x%x all_pmcs=0x%lx used_pmds=0x%lx eventid=%ld smpl_pmds=0x%lx reset_pmds=0x%lx reloads_pmcs=0x%lx used_monitors=0x%lx ovfl_regs=0x%lx\n",
  2588. cnum,
  2589. value,
  2590. is_loaded,
  2591. can_access_pmu,
  2592. flags,
  2593. ctx->ctx_all_pmcs[0],
  2594. ctx->ctx_used_pmds[0],
  2595. ctx->ctx_pmds[cnum].eventid,
  2596. smpl_pmds,
  2597. reset_pmds,
  2598. ctx->ctx_reload_pmcs[0],
  2599. ctx->ctx_used_monitors[0],
  2600. ctx->ctx_ovfl_regs[0]));
  2601. }
  2602. /*
  2603. * make sure the changes are visible
  2604. */
  2605. if (can_access_pmu) ia64_srlz_d();
  2606. return 0;
  2607. error:
  2608. PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
  2609. return ret;
  2610. }
  2611. static int
  2612. pfm_write_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
  2613. {
  2614. struct task_struct *task;
  2615. pfarg_reg_t *req = (pfarg_reg_t *)arg;
  2616. unsigned long value, hw_value, ovfl_mask;
  2617. unsigned int cnum;
  2618. int i, can_access_pmu = 0, state;
  2619. int is_counting, is_loaded, is_system, expert_mode;
  2620. int ret = -EINVAL;
  2621. pfm_reg_check_t wr_func;
  2622. state = ctx->ctx_state;
  2623. is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
  2624. is_system = ctx->ctx_fl_system;
  2625. ovfl_mask = pmu_conf->ovfl_val;
  2626. task = ctx->ctx_task;
  2627. if (unlikely(state == PFM_CTX_ZOMBIE)) return -EINVAL;
  2628. /*
  2629. * on both UP and SMP, we can only write to the PMC when the task is
  2630. * the owner of the local PMU.
  2631. */
  2632. if (likely(is_loaded)) {
  2633. /*
  2634. * In system wide and when the context is loaded, access can only happen
  2635. * when the caller is running on the CPU being monitored by the session.
  2636. * It does not have to be the owner (ctx_task) of the context per se.
  2637. */
  2638. if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) {
  2639. DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
  2640. return -EBUSY;
  2641. }
  2642. can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
  2643. }
  2644. expert_mode = pfm_sysctl.expert_mode;
  2645. for (i = 0; i < count; i++, req++) {
  2646. cnum = req->reg_num;
  2647. value = req->reg_value;
  2648. if (!PMD_IS_IMPL(cnum)) {
  2649. DPRINT(("pmd[%u] is unimplemented or invalid\n", cnum));
  2650. goto abort_mission;
  2651. }
  2652. is_counting = PMD_IS_COUNTING(cnum);
  2653. wr_func = pmu_conf->pmd_desc[cnum].write_check;
  2654. /*
  2655. * execute write checker, if any
  2656. */
  2657. if (unlikely(expert_mode == 0 && wr_func)) {
  2658. unsigned long v = value;
  2659. ret = (*wr_func)(task, ctx, cnum, &v, regs);
  2660. if (ret) goto abort_mission;
  2661. value = v;
  2662. ret = -EINVAL;
  2663. }
  2664. /*
  2665. * no error on this register
  2666. */
  2667. PFM_REG_RETFLAG_SET(req->reg_flags, 0);
  2668. /*
  2669. * now commit changes to software state
  2670. */
  2671. hw_value = value;
  2672. /*
  2673. * update virtualized (64bits) counter
  2674. */
  2675. if (is_counting) {
  2676. /*
  2677. * write context state
  2678. */
  2679. ctx->ctx_pmds[cnum].lval = value;
  2680. /*
  2681. * when context is load we use the split value
  2682. */
  2683. if (is_loaded) {
  2684. hw_value = value & ovfl_mask;
  2685. value = value & ~ovfl_mask;
  2686. }
  2687. }
  2688. /*
  2689. * update reset values (not just for counters)
  2690. */
  2691. ctx->ctx_pmds[cnum].long_reset = req->reg_long_reset;
  2692. ctx->ctx_pmds[cnum].short_reset = req->reg_short_reset;
  2693. /*
  2694. * update randomization parameters (not just for counters)
  2695. */
  2696. ctx->ctx_pmds[cnum].seed = req->reg_random_seed;
  2697. ctx->ctx_pmds[cnum].mask = req->reg_random_mask;
  2698. /*
  2699. * update context value
  2700. */
  2701. ctx->ctx_pmds[cnum].val = value;
  2702. /*
  2703. * Keep track of what we use
  2704. *
  2705. * We do not keep track of PMC because we have to
  2706. * systematically restore ALL of them.
  2707. */
  2708. CTX_USED_PMD(ctx, PMD_PMD_DEP(cnum));
  2709. /*
  2710. * mark this PMD register used as well
  2711. */
  2712. CTX_USED_PMD(ctx, RDEP(cnum));
  2713. /*
  2714. * make sure we do not try to reset on
  2715. * restart because we have established new values
  2716. */
  2717. if (is_counting && state == PFM_CTX_MASKED) {
  2718. ctx->ctx_ovfl_regs[0] &= ~1UL << cnum;
  2719. }
  2720. if (is_loaded) {
  2721. /*
  2722. * write thread state
  2723. */
  2724. if (is_system == 0) ctx->th_pmds[cnum] = hw_value;
  2725. /*
  2726. * write hardware register if we can
  2727. */
  2728. if (can_access_pmu) {
  2729. ia64_set_pmd(cnum, hw_value);
  2730. } else {
  2731. #ifdef CONFIG_SMP
  2732. /*
  2733. * we are guaranteed that the task is not running on the other CPU,
  2734. * we indicate that this PMD will need to be reloaded if the task
  2735. * is rescheduled on the CPU it ran last on.
  2736. */
  2737. ctx->ctx_reload_pmds[0] |= 1UL << cnum;
  2738. #endif
  2739. }
  2740. }
  2741. DPRINT(("pmd[%u]=0x%lx ld=%d apmu=%d, hw_value=0x%lx ctx_pmd=0x%lx short_reset=0x%lx "
  2742. "long_reset=0x%lx notify=%c seed=0x%lx mask=0x%lx used_pmds=0x%lx reset_pmds=0x%lx reload_pmds=0x%lx all_pmds=0x%lx ovfl_regs=0x%lx\n",
  2743. cnum,
  2744. value,
  2745. is_loaded,
  2746. can_access_pmu,
  2747. hw_value,
  2748. ctx->ctx_pmds[cnum].val,
  2749. ctx->ctx_pmds[cnum].short_reset,
  2750. ctx->ctx_pmds[cnum].long_reset,
  2751. PMC_OVFL_NOTIFY(ctx, cnum) ? 'Y':'N',
  2752. ctx->ctx_pmds[cnum].seed,
  2753. ctx->ctx_pmds[cnum].mask,
  2754. ctx->ctx_used_pmds[0],
  2755. ctx->ctx_pmds[cnum].reset_pmds[0],
  2756. ctx->ctx_reload_pmds[0],
  2757. ctx->ctx_all_pmds[0],
  2758. ctx->ctx_ovfl_regs[0]));
  2759. }
  2760. /*
  2761. * make changes visible
  2762. */
  2763. if (can_access_pmu) ia64_srlz_d();
  2764. return 0;
  2765. abort_mission:
  2766. /*
  2767. * for now, we have only one possibility for error
  2768. */
  2769. PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
  2770. return ret;
  2771. }
  2772. /*
  2773. * By the way of PROTECT_CONTEXT(), interrupts are masked while we are in this function.
  2774. * Therefore we know, we do not have to worry about the PMU overflow interrupt. If an
  2775. * interrupt is delivered during the call, it will be kept pending until we leave, making
  2776. * it appears as if it had been generated at the UNPROTECT_CONTEXT(). At least we are
  2777. * guaranteed to return consistent data to the user, it may simply be old. It is not
  2778. * trivial to treat the overflow while inside the call because you may end up in
  2779. * some module sampling buffer code causing deadlocks.
  2780. */
  2781. static int
  2782. pfm_read_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
  2783. {
  2784. struct task_struct *task;
  2785. unsigned long val = 0UL, lval, ovfl_mask, sval;
  2786. pfarg_reg_t *req = (pfarg_reg_t *)arg;
  2787. unsigned int cnum, reg_flags = 0;
  2788. int i, can_access_pmu = 0, state;
  2789. int is_loaded, is_system, is_counting, expert_mode;
  2790. int ret = -EINVAL;
  2791. pfm_reg_check_t rd_func;
  2792. /*
  2793. * access is possible when loaded only for
  2794. * self-monitoring tasks or in UP mode
  2795. */
  2796. state = ctx->ctx_state;
  2797. is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
  2798. is_system = ctx->ctx_fl_system;
  2799. ovfl_mask = pmu_conf->ovfl_val;
  2800. task = ctx->ctx_task;
  2801. if (state == PFM_CTX_ZOMBIE) return -EINVAL;
  2802. if (likely(is_loaded)) {
  2803. /*
  2804. * In system wide and when the context is loaded, access can only happen
  2805. * when the caller is running on the CPU being monitored by the session.
  2806. * It does not have to be the owner (ctx_task) of the context per se.
  2807. */
  2808. if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) {
  2809. DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
  2810. return -EBUSY;
  2811. }
  2812. /*
  2813. * this can be true when not self-monitoring only in UP
  2814. */
  2815. can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
  2816. if (can_access_pmu) ia64_srlz_d();
  2817. }
  2818. expert_mode = pfm_sysctl.expert_mode;
  2819. DPRINT(("ld=%d apmu=%d ctx_state=%d\n",
  2820. is_loaded,
  2821. can_access_pmu,
  2822. state));
  2823. /*
  2824. * on both UP and SMP, we can only read the PMD from the hardware register when
  2825. * the task is the owner of the local PMU.
  2826. */
  2827. for (i = 0; i < count; i++, req++) {
  2828. cnum = req->reg_num;
  2829. reg_flags = req->reg_flags;
  2830. if (unlikely(!PMD_IS_IMPL(cnum))) goto error;
  2831. /*
  2832. * we can only read the register that we use. That includes
  2833. * the one we explicitely initialize AND the one we want included
  2834. * in the sampling buffer (smpl_regs).
  2835. *
  2836. * Having this restriction allows optimization in the ctxsw routine
  2837. * without compromising security (leaks)
  2838. */
  2839. if (unlikely(!CTX_IS_USED_PMD(ctx, cnum))) goto error;
  2840. sval = ctx->ctx_pmds[cnum].val;
  2841. lval = ctx->ctx_pmds[cnum].lval;
  2842. is_counting = PMD_IS_COUNTING(cnum);
  2843. /*
  2844. * If the task is not the current one, then we check if the
  2845. * PMU state is still in the local live register due to lazy ctxsw.
  2846. * If true, then we read directly from the registers.
  2847. */
  2848. if (can_access_pmu){
  2849. val = ia64_get_pmd(cnum);
  2850. } else {
  2851. /*
  2852. * context has been saved
  2853. * if context is zombie, then task does not exist anymore.
  2854. * In this case, we use the full value saved in the context (pfm_flush_regs()).
  2855. */
  2856. val = is_loaded ? ctx->th_pmds[cnum] : 0UL;
  2857. }
  2858. rd_func = pmu_conf->pmd_desc[cnum].read_check;
  2859. if (is_counting) {
  2860. /*
  2861. * XXX: need to check for overflow when loaded
  2862. */
  2863. val &= ovfl_mask;
  2864. val += sval;
  2865. }
  2866. /*
  2867. * execute read checker, if any
  2868. */
  2869. if (unlikely(expert_mode == 0 && rd_func)) {
  2870. unsigned long v = val;
  2871. ret = (*rd_func)(ctx->ctx_task, ctx, cnum, &v, regs);
  2872. if (ret) goto error;
  2873. val = v;
  2874. ret = -EINVAL;
  2875. }
  2876. PFM_REG_RETFLAG_SET(reg_flags, 0);
  2877. DPRINT(("pmd[%u]=0x%lx\n", cnum, val));
  2878. /*
  2879. * update register return value, abort all if problem during copy.
  2880. * we only modify the reg_flags field. no check mode is fine because
  2881. * access has been verified upfront in sys_perfmonctl().
  2882. */
  2883. req->reg_value = val;
  2884. req->reg_flags = reg_flags;
  2885. req->reg_last_reset_val = lval;
  2886. }
  2887. return 0;
  2888. error:
  2889. PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
  2890. return ret;
  2891. }
  2892. int
  2893. pfm_mod_write_pmcs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
  2894. {
  2895. pfm_context_t *ctx;
  2896. if (req == NULL) return -EINVAL;
  2897. ctx = GET_PMU_CTX();
  2898. if (ctx == NULL) return -EINVAL;
  2899. /*
  2900. * for now limit to current task, which is enough when calling
  2901. * from overflow handler
  2902. */
  2903. if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
  2904. return pfm_write_pmcs(ctx, req, nreq, regs);
  2905. }
  2906. EXPORT_SYMBOL(pfm_mod_write_pmcs);
  2907. int
  2908. pfm_mod_read_pmds(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
  2909. {
  2910. pfm_context_t *ctx;
  2911. if (req == NULL) return -EINVAL;
  2912. ctx = GET_PMU_CTX();
  2913. if (ctx == NULL) return -EINVAL;
  2914. /*
  2915. * for now limit to current task, which is enough when calling
  2916. * from overflow handler
  2917. */
  2918. if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
  2919. return pfm_read_pmds(ctx, req, nreq, regs);
  2920. }
  2921. EXPORT_SYMBOL(pfm_mod_read_pmds);
  2922. /*
  2923. * Only call this function when a process it trying to
  2924. * write the debug registers (reading is always allowed)
  2925. */
  2926. int
  2927. pfm_use_debug_registers(struct task_struct *task)
  2928. {
  2929. pfm_context_t *ctx = task->thread.pfm_context;
  2930. unsigned long flags;
  2931. int ret = 0;
  2932. if (pmu_conf->use_rr_dbregs == 0) return 0;
  2933. DPRINT(("called for [%d]\n", task->pid));
  2934. /*
  2935. * do it only once
  2936. */
  2937. if (task->thread.flags & IA64_THREAD_DBG_VALID) return 0;
  2938. /*
  2939. * Even on SMP, we do not need to use an atomic here because
  2940. * the only way in is via ptrace() and this is possible only when the
  2941. * process is stopped. Even in the case where the ctxsw out is not totally
  2942. * completed by the time we come here, there is no way the 'stopped' process
  2943. * could be in the middle of fiddling with the pfm_write_ibr_dbr() routine.
  2944. * So this is always safe.
  2945. */
  2946. if (ctx && ctx->ctx_fl_using_dbreg == 1) return -1;
  2947. LOCK_PFS(flags);
  2948. /*
  2949. * We cannot allow setting breakpoints when system wide monitoring
  2950. * sessions are using the debug registers.
  2951. */
  2952. if (pfm_sessions.pfs_sys_use_dbregs> 0)
  2953. ret = -1;
  2954. else
  2955. pfm_sessions.pfs_ptrace_use_dbregs++;
  2956. DPRINT(("ptrace_use_dbregs=%u sys_use_dbregs=%u by [%d] ret = %d\n",
  2957. pfm_sessions.pfs_ptrace_use_dbregs,
  2958. pfm_sessions.pfs_sys_use_dbregs,
  2959. task->pid, ret));
  2960. UNLOCK_PFS(flags);
  2961. return ret;
  2962. }
  2963. /*
  2964. * This function is called for every task that exits with the
  2965. * IA64_THREAD_DBG_VALID set. This indicates a task which was
  2966. * able to use the debug registers for debugging purposes via
  2967. * ptrace(). Therefore we know it was not using them for
  2968. * perfmormance monitoring, so we only decrement the number
  2969. * of "ptraced" debug register users to keep the count up to date
  2970. */
  2971. int
  2972. pfm_release_debug_registers(struct task_struct *task)
  2973. {
  2974. unsigned long flags;
  2975. int ret;
  2976. if (pmu_conf->use_rr_dbregs == 0) return 0;
  2977. LOCK_PFS(flags);
  2978. if (pfm_sessions.pfs_ptrace_use_dbregs == 0) {
  2979. printk(KERN_ERR "perfmon: invalid release for [%d] ptrace_use_dbregs=0\n", task->pid);
  2980. ret = -1;
  2981. } else {
  2982. pfm_sessions.pfs_ptrace_use_dbregs--;
  2983. ret = 0;
  2984. }
  2985. UNLOCK_PFS(flags);
  2986. return ret;
  2987. }
  2988. static int
  2989. pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
  2990. {
  2991. struct task_struct *task;
  2992. pfm_buffer_fmt_t *fmt;
  2993. pfm_ovfl_ctrl_t rst_ctrl;
  2994. int state, is_system;
  2995. int ret = 0;
  2996. state = ctx->ctx_state;
  2997. fmt = ctx->ctx_buf_fmt;
  2998. is_system = ctx->ctx_fl_system;
  2999. task = PFM_CTX_TASK(ctx);
  3000. switch(state) {
  3001. case PFM_CTX_MASKED:
  3002. break;
  3003. case PFM_CTX_LOADED:
  3004. if (CTX_HAS_SMPL(ctx) && fmt->fmt_restart_active) break;
  3005. /* fall through */
  3006. case PFM_CTX_UNLOADED:
  3007. case PFM_CTX_ZOMBIE:
  3008. DPRINT(("invalid state=%d\n", state));
  3009. return -EBUSY;
  3010. default:
  3011. DPRINT(("state=%d, cannot operate (no active_restart handler)\n", state));
  3012. return -EINVAL;
  3013. }
  3014. /*
  3015. * In system wide and when the context is loaded, access can only happen
  3016. * when the caller is running on the CPU being monitored by the session.
  3017. * It does not have to be the owner (ctx_task) of the context per se.
  3018. */
  3019. if (is_system && ctx->ctx_cpu != smp_processor_id()) {
  3020. DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
  3021. return -EBUSY;
  3022. }
  3023. /* sanity check */
  3024. if (unlikely(task == NULL)) {
  3025. printk(KERN_ERR "perfmon: [%d] pfm_restart no task\n", current->pid);
  3026. return -EINVAL;
  3027. }
  3028. if (task == current || is_system) {
  3029. fmt = ctx->ctx_buf_fmt;
  3030. DPRINT(("restarting self %d ovfl=0x%lx\n",
  3031. task->pid,
  3032. ctx->ctx_ovfl_regs[0]));
  3033. if (CTX_HAS_SMPL(ctx)) {
  3034. prefetch(ctx->ctx_smpl_hdr);
  3035. rst_ctrl.bits.mask_monitoring = 0;
  3036. rst_ctrl.bits.reset_ovfl_pmds = 0;
  3037. if (state == PFM_CTX_LOADED)
  3038. ret = pfm_buf_fmt_restart_active(fmt, task, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
  3039. else
  3040. ret = pfm_buf_fmt_restart(fmt, task, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
  3041. } else {
  3042. rst_ctrl.bits.mask_monitoring = 0;
  3043. rst_ctrl.bits.reset_ovfl_pmds = 1;
  3044. }
  3045. if (ret == 0) {
  3046. if (rst_ctrl.bits.reset_ovfl_pmds)
  3047. pfm_reset_regs(ctx, ctx->ctx_ovfl_regs, PFM_PMD_LONG_RESET);
  3048. if (rst_ctrl.bits.mask_monitoring == 0) {
  3049. DPRINT(("resuming monitoring for [%d]\n", task->pid));
  3050. if (state == PFM_CTX_MASKED) pfm_restore_monitoring(task);
  3051. } else {
  3052. DPRINT(("keeping monitoring stopped for [%d]\n", task->pid));
  3053. // cannot use pfm_stop_monitoring(task, regs);
  3054. }
  3055. }
  3056. /*
  3057. * clear overflowed PMD mask to remove any stale information
  3058. */
  3059. ctx->ctx_ovfl_regs[0] = 0UL;
  3060. /*
  3061. * back to LOADED state
  3062. */
  3063. ctx->ctx_state = PFM_CTX_LOADED;
  3064. /*
  3065. * XXX: not really useful for self monitoring
  3066. */
  3067. ctx->ctx_fl_can_restart = 0;
  3068. return 0;
  3069. }
  3070. /*
  3071. * restart another task
  3072. */
  3073. /*
  3074. * When PFM_CTX_MASKED, we cannot issue a restart before the previous
  3075. * one is seen by the task.
  3076. */
  3077. if (state == PFM_CTX_MASKED) {
  3078. if (ctx->ctx_fl_can_restart == 0) return -EINVAL;
  3079. /*
  3080. * will prevent subsequent restart before this one is
  3081. * seen by other task
  3082. */
  3083. ctx->ctx_fl_can_restart = 0;
  3084. }
  3085. /*
  3086. * if blocking, then post the semaphore is PFM_CTX_MASKED, i.e.
  3087. * the task is blocked or on its way to block. That's the normal
  3088. * restart path. If the monitoring is not masked, then the task
  3089. * can be actively monitoring and we cannot directly intervene.
  3090. * Therefore we use the trap mechanism to catch the task and
  3091. * force it to reset the buffer/reset PMDs.
  3092. *
  3093. * if non-blocking, then we ensure that the task will go into
  3094. * pfm_handle_work() before returning to user mode.
  3095. *
  3096. * We cannot explicitely reset another task, it MUST always
  3097. * be done by the task itself. This works for system wide because
  3098. * the tool that is controlling the session is logically doing
  3099. * "self-monitoring".
  3100. */
  3101. if (CTX_OVFL_NOBLOCK(ctx) == 0 && state == PFM_CTX_MASKED) {
  3102. DPRINT(("unblocking [%d] \n", task->pid));
  3103. complete(&ctx->ctx_restart_done);
  3104. } else {
  3105. DPRINT(("[%d] armed exit trap\n", task->pid));
  3106. ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_RESET;
  3107. PFM_SET_WORK_PENDING(task, 1);
  3108. pfm_set_task_notify(task);
  3109. /*
  3110. * XXX: send reschedule if task runs on another CPU
  3111. */
  3112. }
  3113. return 0;
  3114. }
  3115. static int
  3116. pfm_debug(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
  3117. {
  3118. unsigned int m = *(unsigned int *)arg;
  3119. pfm_sysctl.debug = m == 0 ? 0 : 1;
  3120. printk(KERN_INFO "perfmon debugging %s (timing reset)\n", pfm_sysctl.debug ? "on" : "off");
  3121. if (m == 0) {
  3122. memset(pfm_stats, 0, sizeof(pfm_stats));
  3123. for(m=0; m < NR_CPUS; m++) pfm_stats[m].pfm_ovfl_intr_cycles_min = ~0UL;
  3124. }
  3125. return 0;
  3126. }
  3127. /*
  3128. * arg can be NULL and count can be zero for this function
  3129. */
  3130. static int
  3131. pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
  3132. {
  3133. struct thread_struct *thread = NULL;
  3134. struct task_struct *task;
  3135. pfarg_dbreg_t *req = (pfarg_dbreg_t *)arg;
  3136. unsigned long flags;
  3137. dbreg_t dbreg;
  3138. unsigned int rnum;
  3139. int first_time;
  3140. int ret = 0, state;
  3141. int i, can_access_pmu = 0;
  3142. int is_system, is_loaded;
  3143. if (pmu_conf->use_rr_dbregs == 0) return -EINVAL;
  3144. state = ctx->ctx_state;
  3145. is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
  3146. is_system = ctx->ctx_fl_system;
  3147. task = ctx->ctx_task;
  3148. if (state == PFM_CTX_ZOMBIE) return -EINVAL;
  3149. /*
  3150. * on both UP and SMP, we can only write to the PMC when the task is
  3151. * the owner of the local PMU.
  3152. */
  3153. if (is_loaded) {
  3154. thread = &task->thread;
  3155. /*
  3156. * In system wide and when the context is loaded, access can only happen
  3157. * when the caller is running on the CPU being monitored by the session.
  3158. * It does not have to be the owner (ctx_task) of the context per se.
  3159. */
  3160. if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) {
  3161. DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
  3162. return -EBUSY;
  3163. }
  3164. can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
  3165. }
  3166. /*
  3167. * we do not need to check for ipsr.db because we do clear ibr.x, dbr.r, and dbr.w
  3168. * ensuring that no real breakpoint can be installed via this call.
  3169. *
  3170. * IMPORTANT: regs can be NULL in this function
  3171. */
  3172. first_time = ctx->ctx_fl_using_dbreg == 0;
  3173. /*
  3174. * don't bother if we are loaded and task is being debugged
  3175. */
  3176. if (is_loaded && (thread->flags & IA64_THREAD_DBG_VALID) != 0) {
  3177. DPRINT(("debug registers already in use for [%d]\n", task->pid));
  3178. return -EBUSY;
  3179. }
  3180. /*
  3181. * check for debug registers in system wide mode
  3182. *
  3183. * If though a check is done in pfm_context_load(),
  3184. * we must repeat it here, in case the registers are
  3185. * written after the context is loaded
  3186. */
  3187. if (is_loaded) {
  3188. LOCK_PFS(flags);
  3189. if (first_time && is_system) {
  3190. if (pfm_sessions.pfs_ptrace_use_dbregs)
  3191. ret = -EBUSY;
  3192. else
  3193. pfm_sessions.pfs_sys_use_dbregs++;
  3194. }
  3195. UNLOCK_PFS(flags);
  3196. }
  3197. if (ret != 0) return ret;
  3198. /*
  3199. * mark ourself as user of the debug registers for
  3200. * perfmon purposes.
  3201. */
  3202. ctx->ctx_fl_using_dbreg = 1;
  3203. /*
  3204. * clear hardware registers to make sure we don't
  3205. * pick up stale state.
  3206. *
  3207. * for a system wide session, we do not use
  3208. * thread.dbr, thread.ibr because this process
  3209. * never leaves the current CPU and the state
  3210. * is shared by all processes running on it
  3211. */
  3212. if (first_time && can_access_pmu) {
  3213. DPRINT(("[%d] clearing ibrs, dbrs\n", task->pid));
  3214. for (i=0; i < pmu_conf->num_ibrs; i++) {
  3215. ia64_set_ibr(i, 0UL);
  3216. ia64_dv_serialize_instruction();
  3217. }
  3218. ia64_srlz_i();
  3219. for (i=0; i < pmu_conf->num_dbrs; i++) {
  3220. ia64_set_dbr(i, 0UL);
  3221. ia64_dv_serialize_data();
  3222. }
  3223. ia64_srlz_d();
  3224. }
  3225. /*
  3226. * Now install the values into the registers
  3227. */
  3228. for (i = 0; i < count; i++, req++) {
  3229. rnum = req->dbreg_num;
  3230. dbreg.val = req->dbreg_value;
  3231. ret = -EINVAL;
  3232. if ((mode == PFM_CODE_RR && rnum >= PFM_NUM_IBRS) || ((mode == PFM_DATA_RR) && rnum >= PFM_NUM_DBRS)) {
  3233. DPRINT(("invalid register %u val=0x%lx mode=%d i=%d count=%d\n",
  3234. rnum, dbreg.val, mode, i, count));
  3235. goto abort_mission;
  3236. }
  3237. /*
  3238. * make sure we do not install enabled breakpoint
  3239. */
  3240. if (rnum & 0x1) {
  3241. if (mode == PFM_CODE_RR)
  3242. dbreg.ibr.ibr_x = 0;
  3243. else
  3244. dbreg.dbr.dbr_r = dbreg.dbr.dbr_w = 0;
  3245. }
  3246. PFM_REG_RETFLAG_SET(req->dbreg_flags, 0);
  3247. /*
  3248. * Debug registers, just like PMC, can only be modified
  3249. * by a kernel call. Moreover, perfmon() access to those
  3250. * registers are centralized in this routine. The hardware
  3251. * does not modify the value of these registers, therefore,
  3252. * if we save them as they are written, we can avoid having
  3253. * to save them on context switch out. This is made possible
  3254. * by the fact that when perfmon uses debug registers, ptrace()
  3255. * won't be able to modify them concurrently.
  3256. */
  3257. if (mode == PFM_CODE_RR) {
  3258. CTX_USED_IBR(ctx, rnum);
  3259. if (can_access_pmu) {
  3260. ia64_set_ibr(rnum, dbreg.val);
  3261. ia64_dv_serialize_instruction();
  3262. }
  3263. ctx->ctx_ibrs[rnum] = dbreg.val;
  3264. DPRINT(("write ibr%u=0x%lx used_ibrs=0x%x ld=%d apmu=%d\n",
  3265. rnum, dbreg.val, ctx->ctx_used_ibrs[0], is_loaded, can_access_pmu));
  3266. } else {
  3267. CTX_USED_DBR(ctx, rnum);
  3268. if (can_access_pmu) {
  3269. ia64_set_dbr(rnum, dbreg.val);
  3270. ia64_dv_serialize_data();
  3271. }
  3272. ctx->ctx_dbrs[rnum] = dbreg.val;
  3273. DPRINT(("write dbr%u=0x%lx used_dbrs=0x%x ld=%d apmu=%d\n",
  3274. rnum, dbreg.val, ctx->ctx_used_dbrs[0], is_loaded, can_access_pmu));
  3275. }
  3276. }
  3277. return 0;
  3278. abort_mission:
  3279. /*
  3280. * in case it was our first attempt, we undo the global modifications
  3281. */
  3282. if (first_time) {
  3283. LOCK_PFS(flags);
  3284. if (ctx->ctx_fl_system) {
  3285. pfm_sessions.pfs_sys_use_dbregs--;
  3286. }
  3287. UNLOCK_PFS(flags);
  3288. ctx->ctx_fl_using_dbreg = 0;
  3289. }
  3290. /*
  3291. * install error return flag
  3292. */
  3293. PFM_REG_RETFLAG_SET(req->dbreg_flags, PFM_REG_RETFL_EINVAL);
  3294. return ret;
  3295. }
  3296. static int
  3297. pfm_write_ibrs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
  3298. {
  3299. return pfm_write_ibr_dbr(PFM_CODE_RR, ctx, arg, count, regs);
  3300. }
  3301. static int
  3302. pfm_write_dbrs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
  3303. {
  3304. return pfm_write_ibr_dbr(PFM_DATA_RR, ctx, arg, count, regs);
  3305. }
  3306. int
  3307. pfm_mod_write_ibrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
  3308. {
  3309. pfm_context_t *ctx;
  3310. if (req == NULL) return -EINVAL;
  3311. ctx = GET_PMU_CTX();
  3312. if (ctx == NULL) return -EINVAL;
  3313. /*
  3314. * for now limit to current task, which is enough when calling
  3315. * from overflow handler
  3316. */
  3317. if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
  3318. return pfm_write_ibrs(ctx, req, nreq, regs);
  3319. }
  3320. EXPORT_SYMBOL(pfm_mod_write_ibrs);
  3321. int
  3322. pfm_mod_write_dbrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
  3323. {
  3324. pfm_context_t *ctx;
  3325. if (req == NULL) return -EINVAL;
  3326. ctx = GET_PMU_CTX();
  3327. if (ctx == NULL) return -EINVAL;
  3328. /*
  3329. * for now limit to current task, which is enough when calling
  3330. * from overflow handler
  3331. */
  3332. if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
  3333. return pfm_write_dbrs(ctx, req, nreq, regs);
  3334. }
  3335. EXPORT_SYMBOL(pfm_mod_write_dbrs);
  3336. static int
  3337. pfm_get_features(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
  3338. {
  3339. pfarg_features_t *req = (pfarg_features_t *)arg;
  3340. req->ft_version = PFM_VERSION;
  3341. return 0;
  3342. }
  3343. static int
  3344. pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
  3345. {
  3346. struct pt_regs *tregs;
  3347. struct task_struct *task = PFM_CTX_TASK(ctx);
  3348. int state, is_system;
  3349. state = ctx->ctx_state;
  3350. is_system = ctx->ctx_fl_system;
  3351. /*
  3352. * context must be attached to issue the stop command (includes LOADED,MASKED,ZOMBIE)
  3353. */
  3354. if (state == PFM_CTX_UNLOADED) return -EINVAL;
  3355. /*
  3356. * In system wide and when the context is loaded, access can only happen
  3357. * when the caller is running on the CPU being monitored by the session.
  3358. * It does not have to be the owner (ctx_task) of the context per se.
  3359. */
  3360. if (is_system && ctx->ctx_cpu != smp_processor_id()) {
  3361. DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
  3362. return -EBUSY;
  3363. }
  3364. DPRINT(("task [%d] ctx_state=%d is_system=%d\n",
  3365. PFM_CTX_TASK(ctx)->pid,
  3366. state,
  3367. is_system));
  3368. /*
  3369. * in system mode, we need to update the PMU directly
  3370. * and the user level state of the caller, which may not
  3371. * necessarily be the creator of the context.
  3372. */
  3373. if (is_system) {
  3374. /*
  3375. * Update local PMU first
  3376. *
  3377. * disable dcr pp
  3378. */
  3379. ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) & ~IA64_DCR_PP);
  3380. ia64_srlz_i();
  3381. /*
  3382. * update local cpuinfo
  3383. */
  3384. PFM_CPUINFO_CLEAR(PFM_CPUINFO_DCR_PP);
  3385. /*
  3386. * stop monitoring, does srlz.i
  3387. */
  3388. pfm_clear_psr_pp();
  3389. /*
  3390. * stop monitoring in the caller
  3391. */
  3392. ia64_psr(regs)->pp = 0;
  3393. return 0;
  3394. }
  3395. /*
  3396. * per-task mode
  3397. */
  3398. if (task == current) {
  3399. /* stop monitoring at kernel level */
  3400. pfm_clear_psr_up();
  3401. /*
  3402. * stop monitoring at the user level
  3403. */
  3404. ia64_psr(regs)->up = 0;
  3405. } else {
  3406. tregs = task_pt_regs(task);
  3407. /*
  3408. * stop monitoring at the user level
  3409. */
  3410. ia64_psr(tregs)->up = 0;
  3411. /*
  3412. * monitoring disabled in kernel at next reschedule
  3413. */
  3414. ctx->ctx_saved_psr_up = 0;
  3415. DPRINT(("task=[%d]\n", task->pid));
  3416. }
  3417. return 0;
  3418. }
  3419. static int
  3420. pfm_start(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
  3421. {
  3422. struct pt_regs *tregs;
  3423. int state, is_system;
  3424. state = ctx->ctx_state;
  3425. is_system = ctx->ctx_fl_system;
  3426. if (state != PFM_CTX_LOADED) return -EINVAL;
  3427. /*
  3428. * In system wide and when the context is loaded, access can only happen
  3429. * when the caller is running on the CPU being monitored by the session.
  3430. * It does not have to be the owner (ctx_task) of the context per se.
  3431. */
  3432. if (is_system && ctx->ctx_cpu != smp_processor_id()) {
  3433. DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
  3434. return -EBUSY;
  3435. }
  3436. /*
  3437. * in system mode, we need to update the PMU directly
  3438. * and the user level state of the caller, which may not
  3439. * necessarily be the creator of the context.
  3440. */
  3441. if (is_system) {
  3442. /*
  3443. * set user level psr.pp for the caller
  3444. */
  3445. ia64_psr(regs)->pp = 1;
  3446. /*
  3447. * now update the local PMU and cpuinfo
  3448. */
  3449. PFM_CPUINFO_SET(PFM_CPUINFO_DCR_PP);
  3450. /*
  3451. * start monitoring at kernel level
  3452. */
  3453. pfm_set_psr_pp();
  3454. /* enable dcr pp */
  3455. ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) | IA64_DCR_PP);
  3456. ia64_srlz_i();
  3457. return 0;
  3458. }
  3459. /*
  3460. * per-process mode
  3461. */
  3462. if (ctx->ctx_task == current) {
  3463. /* start monitoring at kernel level */
  3464. pfm_set_psr_up();
  3465. /*
  3466. * activate monitoring at user level
  3467. */
  3468. ia64_psr(regs)->up = 1;
  3469. } else {
  3470. tregs = task_pt_regs(ctx->ctx_task);
  3471. /*
  3472. * start monitoring at the kernel level the next
  3473. * time the task is scheduled
  3474. */
  3475. ctx->ctx_saved_psr_up = IA64_PSR_UP;
  3476. /*
  3477. * activate monitoring at user level
  3478. */
  3479. ia64_psr(tregs)->up = 1;
  3480. }
  3481. return 0;
  3482. }
  3483. static int
  3484. pfm_get_pmc_reset(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
  3485. {
  3486. pfarg_reg_t *req = (pfarg_reg_t *)arg;
  3487. unsigned int cnum;
  3488. int i;
  3489. int ret = -EINVAL;
  3490. for (i = 0; i < count; i++, req++) {
  3491. cnum = req->reg_num;
  3492. if (!PMC_IS_IMPL(cnum)) goto abort_mission;
  3493. req->reg_value = PMC_DFL_VAL(cnum);
  3494. PFM_REG_RETFLAG_SET(req->reg_flags, 0);
  3495. DPRINT(("pmc_reset_val pmc[%u]=0x%lx\n", cnum, req->reg_value));
  3496. }
  3497. return 0;
  3498. abort_mission:
  3499. PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
  3500. return ret;
  3501. }
  3502. static int
  3503. pfm_check_task_exist(pfm_context_t *ctx)
  3504. {
  3505. struct task_struct *g, *t;
  3506. int ret = -ESRCH;
  3507. read_lock(&tasklist_lock);
  3508. do_each_thread (g, t) {
  3509. if (t->thread.pfm_context == ctx) {
  3510. ret = 0;
  3511. break;
  3512. }
  3513. } while_each_thread (g, t);
  3514. read_unlock(&tasklist_lock);
  3515. DPRINT(("pfm_check_task_exist: ret=%d ctx=%p\n", ret, ctx));
  3516. return ret;
  3517. }
  3518. static int
  3519. pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
  3520. {
  3521. struct task_struct *task;
  3522. struct thread_struct *thread;
  3523. struct pfm_context_t *old;
  3524. unsigned long flags;
  3525. #ifndef CONFIG_SMP
  3526. struct task_struct *owner_task = NULL;
  3527. #endif
  3528. pfarg_load_t *req = (pfarg_load_t *)arg;
  3529. unsigned long *pmcs_source, *pmds_source;
  3530. int the_cpu;
  3531. int ret = 0;
  3532. int state, is_system, set_dbregs = 0;
  3533. state = ctx->ctx_state;
  3534. is_system = ctx->ctx_fl_system;
  3535. /*
  3536. * can only load from unloaded or terminated state
  3537. */
  3538. if (state != PFM_CTX_UNLOADED) {
  3539. DPRINT(("cannot load to [%d], invalid ctx_state=%d\n",
  3540. req->load_pid,
  3541. ctx->ctx_state));
  3542. return -EBUSY;
  3543. }
  3544. DPRINT(("load_pid [%d] using_dbreg=%d\n", req->load_pid, ctx->ctx_fl_using_dbreg));
  3545. if (CTX_OVFL_NOBLOCK(ctx) == 0 && req->load_pid == current->pid) {
  3546. DPRINT(("cannot use blocking mode on self\n"));
  3547. return -EINVAL;
  3548. }
  3549. ret = pfm_get_task(ctx, req->load_pid, &task);
  3550. if (ret) {
  3551. DPRINT(("load_pid [%d] get_task=%d\n", req->load_pid, ret));
  3552. return ret;
  3553. }
  3554. ret = -EINVAL;
  3555. /*
  3556. * system wide is self monitoring only
  3557. */
  3558. if (is_system && task != current) {
  3559. DPRINT(("system wide is self monitoring only load_pid=%d\n",
  3560. req->load_pid));
  3561. goto error;
  3562. }
  3563. thread = &task->thread;
  3564. ret = 0;
  3565. /*
  3566. * cannot load a context which is using range restrictions,
  3567. * into a task that is being debugged.
  3568. */
  3569. if (ctx->ctx_fl_using_dbreg) {
  3570. if (thread->flags & IA64_THREAD_DBG_VALID) {
  3571. ret = -EBUSY;
  3572. DPRINT(("load_pid [%d] task is debugged, cannot load range restrictions\n", req->load_pid));
  3573. goto error;
  3574. }
  3575. LOCK_PFS(flags);
  3576. if (is_system) {
  3577. if (pfm_sessions.pfs_ptrace_use_dbregs) {
  3578. DPRINT(("cannot load [%d] dbregs in use\n", task->pid));
  3579. ret = -EBUSY;
  3580. } else {
  3581. pfm_sessions.pfs_sys_use_dbregs++;
  3582. DPRINT(("load [%d] increased sys_use_dbreg=%u\n", task->pid, pfm_sessions.pfs_sys_use_dbregs));
  3583. set_dbregs = 1;
  3584. }
  3585. }
  3586. UNLOCK_PFS(flags);
  3587. if (ret) goto error;
  3588. }
  3589. /*
  3590. * SMP system-wide monitoring implies self-monitoring.
  3591. *
  3592. * The programming model expects the task to
  3593. * be pinned on a CPU throughout the session.
  3594. * Here we take note of the current CPU at the
  3595. * time the context is loaded. No call from
  3596. * another CPU will be allowed.
  3597. *
  3598. * The pinning via shed_setaffinity()
  3599. * must be done by the calling task prior
  3600. * to this call.
  3601. *
  3602. * systemwide: keep track of CPU this session is supposed to run on
  3603. */
  3604. the_cpu = ctx->ctx_cpu = smp_processor_id();
  3605. ret = -EBUSY;
  3606. /*
  3607. * now reserve the session
  3608. */
  3609. ret = pfm_reserve_session(current, is_system, the_cpu);
  3610. if (ret) goto error;
  3611. /*
  3612. * task is necessarily stopped at this point.
  3613. *
  3614. * If the previous context was zombie, then it got removed in
  3615. * pfm_save_regs(). Therefore we should not see it here.
  3616. * If we see a context, then this is an active context
  3617. *
  3618. * XXX: needs to be atomic
  3619. */
  3620. DPRINT(("before cmpxchg() old_ctx=%p new_ctx=%p\n",
  3621. thread->pfm_context, ctx));
  3622. ret = -EBUSY;
  3623. old = ia64_cmpxchg(acq, &thread->pfm_context, NULL, ctx, sizeof(pfm_context_t *));
  3624. if (old != NULL) {
  3625. DPRINT(("load_pid [%d] already has a context\n", req->load_pid));
  3626. goto error_unres;
  3627. }
  3628. pfm_reset_msgq(ctx);
  3629. ctx->ctx_state = PFM_CTX_LOADED;
  3630. /*
  3631. * link context to task
  3632. */
  3633. ctx->ctx_task = task;
  3634. if (is_system) {
  3635. /*
  3636. * we load as stopped
  3637. */
  3638. PFM_CPUINFO_SET(PFM_CPUINFO_SYST_WIDE);
  3639. PFM_CPUINFO_CLEAR(PFM_CPUINFO_DCR_PP);
  3640. if (ctx->ctx_fl_excl_idle) PFM_CPUINFO_SET(PFM_CPUINFO_EXCL_IDLE);
  3641. } else {
  3642. thread->flags |= IA64_THREAD_PM_VALID;
  3643. }
  3644. /*
  3645. * propagate into thread-state
  3646. */
  3647. pfm_copy_pmds(task, ctx);
  3648. pfm_copy_pmcs(task, ctx);
  3649. pmcs_source = ctx->th_pmcs;
  3650. pmds_source = ctx->th_pmds;
  3651. /*
  3652. * always the case for system-wide
  3653. */
  3654. if (task == current) {
  3655. if (is_system == 0) {
  3656. /* allow user level control */
  3657. ia64_psr(regs)->sp = 0;
  3658. DPRINT(("clearing psr.sp for [%d]\n", task->pid));
  3659. SET_LAST_CPU(ctx, smp_processor_id());
  3660. INC_ACTIVATION();
  3661. SET_ACTIVATION(ctx);
  3662. #ifndef CONFIG_SMP
  3663. /*
  3664. * push the other task out, if any
  3665. */
  3666. owner_task = GET_PMU_OWNER();
  3667. if (owner_task) pfm_lazy_save_regs(owner_task);
  3668. #endif
  3669. }
  3670. /*
  3671. * load all PMD from ctx to PMU (as opposed to thread state)
  3672. * restore all PMC from ctx to PMU
  3673. */
  3674. pfm_restore_pmds(pmds_source, ctx->ctx_all_pmds[0]);
  3675. pfm_restore_pmcs(pmcs_source, ctx->ctx_all_pmcs[0]);
  3676. ctx->ctx_reload_pmcs[0] = 0UL;
  3677. ctx->ctx_reload_pmds[0] = 0UL;
  3678. /*
  3679. * guaranteed safe by earlier check against DBG_VALID
  3680. */
  3681. if (ctx->ctx_fl_using_dbreg) {
  3682. pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
  3683. pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
  3684. }
  3685. /*
  3686. * set new ownership
  3687. */
  3688. SET_PMU_OWNER(task, ctx);
  3689. DPRINT(("context loaded on PMU for [%d]\n", task->pid));
  3690. } else {
  3691. /*
  3692. * when not current, task MUST be stopped, so this is safe
  3693. */
  3694. regs = task_pt_regs(task);
  3695. /* force a full reload */
  3696. ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;
  3697. SET_LAST_CPU(ctx, -1);
  3698. /* initial saved psr (stopped) */
  3699. ctx->ctx_saved_psr_up = 0UL;
  3700. ia64_psr(regs)->up = ia64_psr(regs)->pp = 0;
  3701. }
  3702. ret = 0;
  3703. error_unres:
  3704. if (ret) pfm_unreserve_session(ctx, ctx->ctx_fl_system, the_cpu);
  3705. error:
  3706. /*
  3707. * we must undo the dbregs setting (for system-wide)
  3708. */
  3709. if (ret && set_dbregs) {
  3710. LOCK_PFS(flags);
  3711. pfm_sessions.pfs_sys_use_dbregs--;
  3712. UNLOCK_PFS(flags);
  3713. }
  3714. /*
  3715. * release task, there is now a link with the context
  3716. */
  3717. if (is_system == 0 && task != current) {
  3718. pfm_put_task(task);
  3719. if (ret == 0) {
  3720. ret = pfm_check_task_exist(ctx);
  3721. if (ret) {
  3722. ctx->ctx_state = PFM_CTX_UNLOADED;
  3723. ctx->ctx_task = NULL;
  3724. }
  3725. }
  3726. }
  3727. return ret;
  3728. }
  3729. /*
  3730. * in this function, we do not need to increase the use count
  3731. * for the task via get_task_struct(), because we hold the
  3732. * context lock. If the task were to disappear while having
  3733. * a context attached, it would go through pfm_exit_thread()
  3734. * which also grabs the context lock and would therefore be blocked
  3735. * until we are here.
  3736. */
  3737. static void pfm_flush_pmds(struct task_struct *, pfm_context_t *ctx);
  3738. static int
  3739. pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
  3740. {
  3741. struct task_struct *task = PFM_CTX_TASK(ctx);
  3742. struct pt_regs *tregs;
  3743. int prev_state, is_system;
  3744. int ret;
  3745. DPRINT(("ctx_state=%d task [%d]\n", ctx->ctx_state, task ? task->pid : -1));
  3746. prev_state = ctx->ctx_state;
  3747. is_system = ctx->ctx_fl_system;
  3748. /*
  3749. * unload only when necessary
  3750. */
  3751. if (prev_state == PFM_CTX_UNLOADED) {
  3752. DPRINT(("ctx_state=%d, nothing to do\n", prev_state));
  3753. return 0;
  3754. }
  3755. /*
  3756. * clear psr and dcr bits
  3757. */
  3758. ret = pfm_stop(ctx, NULL, 0, regs);
  3759. if (ret) return ret;
  3760. ctx->ctx_state = PFM_CTX_UNLOADED;
  3761. /*
  3762. * in system mode, we need to update the PMU directly
  3763. * and the user level state of the caller, which may not
  3764. * necessarily be the creator of the context.
  3765. */
  3766. if (is_system) {
  3767. /*
  3768. * Update cpuinfo
  3769. *
  3770. * local PMU is taken care of in pfm_stop()
  3771. */
  3772. PFM_CPUINFO_CLEAR(PFM_CPUINFO_SYST_WIDE);
  3773. PFM_CPUINFO_CLEAR(PFM_CPUINFO_EXCL_IDLE);
  3774. /*
  3775. * save PMDs in context
  3776. * release ownership
  3777. */
  3778. pfm_flush_pmds(current, ctx);
  3779. /*
  3780. * at this point we are done with the PMU
  3781. * so we can unreserve the resource.
  3782. */
  3783. if (prev_state != PFM_CTX_ZOMBIE)
  3784. pfm_unreserve_session(ctx, 1 , ctx->ctx_cpu);
  3785. /*
  3786. * disconnect context from task
  3787. */
  3788. task->thread.pfm_context = NULL;
  3789. /*
  3790. * disconnect task from context
  3791. */
  3792. ctx->ctx_task = NULL;
  3793. /*
  3794. * There is nothing more to cleanup here.
  3795. */
  3796. return 0;
  3797. }
  3798. /*
  3799. * per-task mode
  3800. */
  3801. tregs = task == current ? regs : task_pt_regs(task);
  3802. if (task == current) {
  3803. /*
  3804. * cancel user level control
  3805. */
  3806. ia64_psr(regs)->sp = 1;
  3807. DPRINT(("setting psr.sp for [%d]\n", task->pid));
  3808. }
  3809. /*
  3810. * save PMDs to context
  3811. * release ownership
  3812. */
  3813. pfm_flush_pmds(task, ctx);
  3814. /*
  3815. * at this point we are done with the PMU
  3816. * so we can unreserve the resource.
  3817. *
  3818. * when state was ZOMBIE, we have already unreserved.
  3819. */
  3820. if (prev_state != PFM_CTX_ZOMBIE)
  3821. pfm_unreserve_session(ctx, 0 , ctx->ctx_cpu);
  3822. /*
  3823. * reset activation counter and psr
  3824. */
  3825. ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;
  3826. SET_LAST_CPU(ctx, -1);
  3827. /*
  3828. * PMU state will not be restored
  3829. */
  3830. task->thread.flags &= ~IA64_THREAD_PM_VALID;
  3831. /*
  3832. * break links between context and task
  3833. */
  3834. task->thread.pfm_context = NULL;
  3835. ctx->ctx_task = NULL;
  3836. PFM_SET_WORK_PENDING(task, 0);
  3837. ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_NONE;
  3838. ctx->ctx_fl_can_restart = 0;
  3839. ctx->ctx_fl_going_zombie = 0;
  3840. DPRINT(("disconnected [%d] from context\n", task->pid));
  3841. return 0;
  3842. }
  3843. /*
  3844. * called only from exit_thread(): task == current
  3845. * we come here only if current has a context attached (loaded or masked)
  3846. */
  3847. void
  3848. pfm_exit_thread(struct task_struct *task)
  3849. {
  3850. pfm_context_t *ctx;
  3851. unsigned long flags;
  3852. struct pt_regs *regs = task_pt_regs(task);
  3853. int ret, state;
  3854. int free_ok = 0;
  3855. ctx = PFM_GET_CTX(task);
  3856. PROTECT_CTX(ctx, flags);
  3857. DPRINT(("state=%d task [%d]\n", ctx->ctx_state, task->pid));
  3858. state = ctx->ctx_state;
  3859. switch(state) {
  3860. case PFM_CTX_UNLOADED:
  3861. /*
  3862. * only comes to thios function if pfm_context is not NULL, i.e., cannot
  3863. * be in unloaded state
  3864. */
  3865. printk(KERN_ERR "perfmon: pfm_exit_thread [%d] ctx unloaded\n", task->pid);
  3866. break;
  3867. case PFM_CTX_LOADED:
  3868. case PFM_CTX_MASKED:
  3869. ret = pfm_context_unload(ctx, NULL, 0, regs);
  3870. if (ret) {
  3871. printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task->pid, state, ret);
  3872. }
  3873. DPRINT(("ctx unloaded for current state was %d\n", state));
  3874. pfm_end_notify_user(ctx);
  3875. break;
  3876. case PFM_CTX_ZOMBIE:
  3877. ret = pfm_context_unload(ctx, NULL, 0, regs);
  3878. if (ret) {
  3879. printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task->pid, state, ret);
  3880. }
  3881. free_ok = 1;
  3882. break;
  3883. default:
  3884. printk(KERN_ERR "perfmon: pfm_exit_thread [%d] unexpected state=%d\n", task->pid, state);
  3885. break;
  3886. }
  3887. UNPROTECT_CTX(ctx, flags);
  3888. { u64 psr = pfm_get_psr();
  3889. BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
  3890. BUG_ON(GET_PMU_OWNER());
  3891. BUG_ON(ia64_psr(regs)->up);
  3892. BUG_ON(ia64_psr(regs)->pp);
  3893. }
  3894. /*
  3895. * All memory free operations (especially for vmalloc'ed memory)
  3896. * MUST be done with interrupts ENABLED.
  3897. */
  3898. if (free_ok) pfm_context_free(ctx);
  3899. }
  3900. /*
  3901. * functions MUST be listed in the increasing order of their index (see permfon.h)
  3902. */
  3903. #define PFM_CMD(name, flags, arg_count, arg_type, getsz) { name, #name, flags, arg_count, sizeof(arg_type), getsz }
  3904. #define PFM_CMD_S(name, flags) { name, #name, flags, 0, 0, NULL }
  3905. #define PFM_CMD_PCLRWS (PFM_CMD_FD|PFM_CMD_ARG_RW|PFM_CMD_STOP)
  3906. #define PFM_CMD_PCLRW (PFM_CMD_FD|PFM_CMD_ARG_RW)
  3907. #define PFM_CMD_NONE { NULL, "no-cmd", 0, 0, 0, NULL}
  3908. static pfm_cmd_desc_t pfm_cmd_tab[]={
  3909. /* 0 */PFM_CMD_NONE,
  3910. /* 1 */PFM_CMD(pfm_write_pmcs, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
  3911. /* 2 */PFM_CMD(pfm_write_pmds, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
  3912. /* 3 */PFM_CMD(pfm_read_pmds, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
  3913. /* 4 */PFM_CMD_S(pfm_stop, PFM_CMD_PCLRWS),
  3914. /* 5 */PFM_CMD_S(pfm_start, PFM_CMD_PCLRWS),
  3915. /* 6 */PFM_CMD_NONE,
  3916. /* 7 */PFM_CMD_NONE,
  3917. /* 8 */PFM_CMD(pfm_context_create, PFM_CMD_ARG_RW, 1, pfarg_context_t, pfm_ctx_getsize),
  3918. /* 9 */PFM_CMD_NONE,
  3919. /* 10 */PFM_CMD_S(pfm_restart, PFM_CMD_PCLRW),
  3920. /* 11 */PFM_CMD_NONE,
  3921. /* 12 */PFM_CMD(pfm_get_features, PFM_CMD_ARG_RW, 1, pfarg_features_t, NULL),
  3922. /* 13 */PFM_CMD(pfm_debug, 0, 1, unsigned int, NULL),
  3923. /* 14 */PFM_CMD_NONE,
  3924. /* 15 */PFM_CMD(pfm_get_pmc_reset, PFM_CMD_ARG_RW, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
  3925. /* 16 */PFM_CMD(pfm_context_load, PFM_CMD_PCLRWS, 1, pfarg_load_t, NULL),
  3926. /* 17 */PFM_CMD_S(pfm_context_unload, PFM_CMD_PCLRWS),
  3927. /* 18 */PFM_CMD_NONE,
  3928. /* 19 */PFM_CMD_NONE,
  3929. /* 20 */PFM_CMD_NONE,
  3930. /* 21 */PFM_CMD_NONE,
  3931. /* 22 */PFM_CMD_NONE,
  3932. /* 23 */PFM_CMD_NONE,
  3933. /* 24 */PFM_CMD_NONE,
  3934. /* 25 */PFM_CMD_NONE,
  3935. /* 26 */PFM_CMD_NONE,
  3936. /* 27 */PFM_CMD_NONE,
  3937. /* 28 */PFM_CMD_NONE,
  3938. /* 29 */PFM_CMD_NONE,
  3939. /* 30 */PFM_CMD_NONE,
  3940. /* 31 */PFM_CMD_NONE,
  3941. /* 32 */PFM_CMD(pfm_write_ibrs, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_dbreg_t, NULL),
  3942. /* 33 */PFM_CMD(pfm_write_dbrs, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_dbreg_t, NULL)
  3943. };
  3944. #define PFM_CMD_COUNT (sizeof(pfm_cmd_tab)/sizeof(pfm_cmd_desc_t))
  3945. static int
  3946. pfm_check_task_state(pfm_context_t *ctx, int cmd, unsigned long flags)
  3947. {
  3948. struct task_struct *task;
  3949. int state, old_state;
  3950. recheck:
  3951. state = ctx->ctx_state;
  3952. task = ctx->ctx_task;
  3953. if (task == NULL) {
  3954. DPRINT(("context %d no task, state=%d\n", ctx->ctx_fd, state));
  3955. return 0;
  3956. }
  3957. DPRINT(("context %d state=%d [%d] task_state=%ld must_stop=%d\n",
  3958. ctx->ctx_fd,
  3959. state,
  3960. task->pid,
  3961. task->state, PFM_CMD_STOPPED(cmd)));
  3962. /*
  3963. * self-monitoring always ok.
  3964. *
  3965. * for system-wide the caller can either be the creator of the
  3966. * context (to one to which the context is attached to) OR
  3967. * a task running on the same CPU as the session.
  3968. */
  3969. if (task == current || ctx->ctx_fl_system) return 0;
  3970. /*
  3971. * we are monitoring another thread
  3972. */
  3973. switch(state) {
  3974. case PFM_CTX_UNLOADED:
  3975. /*
  3976. * if context is UNLOADED we are safe to go
  3977. */
  3978. return 0;
  3979. case PFM_CTX_ZOMBIE:
  3980. /*
  3981. * no command can operate on a zombie context
  3982. */
  3983. DPRINT(("cmd %d state zombie cannot operate on context\n", cmd));
  3984. return -EINVAL;
  3985. case PFM_CTX_MASKED:
  3986. /*
  3987. * PMU state has been saved to software even though
  3988. * the thread may still be running.
  3989. */
  3990. if (cmd != PFM_UNLOAD_CONTEXT) return 0;
  3991. }
  3992. /*
  3993. * context is LOADED or MASKED. Some commands may need to have
  3994. * the task stopped.
  3995. *
  3996. * We could lift this restriction for UP but it would mean that
  3997. * the user has no guarantee the task would not run between
  3998. * two successive calls to perfmonctl(). That's probably OK.
  3999. * If this user wants to ensure the task does not run, then
  4000. * the task must be stopped.
  4001. */
  4002. if (PFM_CMD_STOPPED(cmd)) {
  4003. if ((task->state != TASK_STOPPED) && (task->state != TASK_TRACED)) {
  4004. DPRINT(("[%d] task not in stopped state\n", task->pid));
  4005. return -EBUSY;
  4006. }
  4007. /*
  4008. * task is now stopped, wait for ctxsw out
  4009. *
  4010. * This is an interesting point in the code.
  4011. * We need to unprotect the context because
  4012. * the pfm_save_regs() routines needs to grab
  4013. * the same lock. There are danger in doing
  4014. * this because it leaves a window open for
  4015. * another task to get access to the context
  4016. * and possibly change its state. The one thing
  4017. * that is not possible is for the context to disappear
  4018. * because we are protected by the VFS layer, i.e.,
  4019. * get_fd()/put_fd().
  4020. */
  4021. old_state = state;
  4022. UNPROTECT_CTX(ctx, flags);
  4023. wait_task_inactive(task);
  4024. PROTECT_CTX(ctx, flags);
  4025. /*
  4026. * we must recheck to verify if state has changed
  4027. */
  4028. if (ctx->ctx_state != old_state) {
  4029. DPRINT(("old_state=%d new_state=%d\n", old_state, ctx->ctx_state));
  4030. goto recheck;
  4031. }
  4032. }
  4033. return 0;
  4034. }
  4035. /*
  4036. * system-call entry point (must return long)
  4037. */
  4038. asmlinkage long
  4039. sys_perfmonctl (int fd, int cmd, void __user *arg, int count)
  4040. {
  4041. struct file *file = NULL;
  4042. pfm_context_t *ctx = NULL;
  4043. unsigned long flags = 0UL;
  4044. void *args_k = NULL;
  4045. long ret; /* will expand int return types */
  4046. size_t base_sz, sz, xtra_sz = 0;
  4047. int narg, completed_args = 0, call_made = 0, cmd_flags;
  4048. int (*func)(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
  4049. int (*getsize)(void *arg, size_t *sz);
  4050. #define PFM_MAX_ARGSIZE 4096
  4051. /*
  4052. * reject any call if perfmon was disabled at initialization
  4053. */
  4054. if (unlikely(pmu_conf == NULL)) return -ENOSYS;
  4055. if (unlikely(cmd < 0 || cmd >= PFM_CMD_COUNT)) {
  4056. DPRINT(("invalid cmd=%d\n", cmd));
  4057. return -EINVAL;
  4058. }
  4059. func = pfm_cmd_tab[cmd].cmd_func;
  4060. narg = pfm_cmd_tab[cmd].cmd_narg;
  4061. base_sz = pfm_cmd_tab[cmd].cmd_argsize;
  4062. getsize = pfm_cmd_tab[cmd].cmd_getsize;
  4063. cmd_flags = pfm_cmd_tab[cmd].cmd_flags;
  4064. if (unlikely(func == NULL)) {
  4065. DPRINT(("invalid cmd=%d\n", cmd));
  4066. return -EINVAL;
  4067. }
  4068. DPRINT(("cmd=%s idx=%d narg=0x%x argsz=%lu count=%d\n",
  4069. PFM_CMD_NAME(cmd),
  4070. cmd,
  4071. narg,
  4072. base_sz,
  4073. count));
  4074. /*
  4075. * check if number of arguments matches what the command expects
  4076. */
  4077. if (unlikely((narg == PFM_CMD_ARG_MANY && count <= 0) || (narg > 0 && narg != count)))
  4078. return -EINVAL;
  4079. restart_args:
  4080. sz = xtra_sz + base_sz*count;
  4081. /*
  4082. * limit abuse to min page size
  4083. */
  4084. if (unlikely(sz > PFM_MAX_ARGSIZE)) {
  4085. printk(KERN_ERR "perfmon: [%d] argument too big %lu\n", current->pid, sz);
  4086. return -E2BIG;
  4087. }
  4088. /*
  4089. * allocate default-sized argument buffer
  4090. */
  4091. if (likely(count && args_k == NULL)) {
  4092. args_k = kmalloc(PFM_MAX_ARGSIZE, GFP_KERNEL);
  4093. if (args_k == NULL) return -ENOMEM;
  4094. }
  4095. ret = -EFAULT;
  4096. /*
  4097. * copy arguments
  4098. *
  4099. * assume sz = 0 for command without parameters
  4100. */
  4101. if (sz && copy_from_user(args_k, arg, sz)) {
  4102. DPRINT(("cannot copy_from_user %lu bytes @%p\n", sz, arg));
  4103. goto error_args;
  4104. }
  4105. /*
  4106. * check if command supports extra parameters
  4107. */
  4108. if (completed_args == 0 && getsize) {
  4109. /*
  4110. * get extra parameters size (based on main argument)
  4111. */
  4112. ret = (*getsize)(args_k, &xtra_sz);
  4113. if (ret) goto error_args;
  4114. completed_args = 1;
  4115. DPRINT(("restart_args sz=%lu xtra_sz=%lu\n", sz, xtra_sz));
  4116. /* retry if necessary */
  4117. if (likely(xtra_sz)) goto restart_args;
  4118. }
  4119. if (unlikely((cmd_flags & PFM_CMD_FD) == 0)) goto skip_fd;
  4120. ret = -EBADF;
  4121. file = fget(fd);
  4122. if (unlikely(file == NULL)) {
  4123. DPRINT(("invalid fd %d\n", fd));
  4124. goto error_args;
  4125. }
  4126. if (unlikely(PFM_IS_FILE(file) == 0)) {
  4127. DPRINT(("fd %d not related to perfmon\n", fd));
  4128. goto error_args;
  4129. }
  4130. ctx = (pfm_context_t *)file->private_data;
  4131. if (unlikely(ctx == NULL)) {
  4132. DPRINT(("no context for fd %d\n", fd));
  4133. goto error_args;
  4134. }
  4135. prefetch(&ctx->ctx_state);
  4136. PROTECT_CTX(ctx, flags);
  4137. /*
  4138. * check task is stopped
  4139. */
  4140. ret = pfm_check_task_state(ctx, cmd, flags);
  4141. if (unlikely(ret)) goto abort_locked;
  4142. skip_fd:
  4143. ret = (*func)(ctx, args_k, count, task_pt_regs(current));
  4144. call_made = 1;
  4145. abort_locked:
  4146. if (likely(ctx)) {
  4147. DPRINT(("context unlocked\n"));
  4148. UNPROTECT_CTX(ctx, flags);
  4149. }
  4150. /* copy argument back to user, if needed */
  4151. if (call_made && PFM_CMD_RW_ARG(cmd) && copy_to_user(arg, args_k, base_sz*count)) ret = -EFAULT;
  4152. error_args:
  4153. if (file)
  4154. fput(file);
  4155. kfree(args_k);
  4156. DPRINT(("cmd=%s ret=%ld\n", PFM_CMD_NAME(cmd), ret));
  4157. return ret;
  4158. }
  4159. static void
  4160. pfm_resume_after_ovfl(pfm_context_t *ctx, unsigned long ovfl_regs, struct pt_regs *regs)
  4161. {
  4162. pfm_buffer_fmt_t *fmt = ctx->ctx_buf_fmt;
  4163. pfm_ovfl_ctrl_t rst_ctrl;
  4164. int state;
  4165. int ret = 0;
  4166. state = ctx->ctx_state;
  4167. /*
  4168. * Unlock sampling buffer and reset index atomically
  4169. * XXX: not really needed when blocking
  4170. */
  4171. if (CTX_HAS_SMPL(ctx)) {
  4172. rst_ctrl.bits.mask_monitoring = 0;
  4173. rst_ctrl.bits.reset_ovfl_pmds = 0;
  4174. if (state == PFM_CTX_LOADED)
  4175. ret = pfm_buf_fmt_restart_active(fmt, current, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
  4176. else
  4177. ret = pfm_buf_fmt_restart(fmt, current, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
  4178. } else {
  4179. rst_ctrl.bits.mask_monitoring = 0;
  4180. rst_ctrl.bits.reset_ovfl_pmds = 1;
  4181. }
  4182. if (ret == 0) {
  4183. if (rst_ctrl.bits.reset_ovfl_pmds) {
  4184. pfm_reset_regs(ctx, &ovfl_regs, PFM_PMD_LONG_RESET);
  4185. }
  4186. if (rst_ctrl.bits.mask_monitoring == 0) {
  4187. DPRINT(("resuming monitoring\n"));
  4188. if (ctx->ctx_state == PFM_CTX_MASKED) pfm_restore_monitoring(current);
  4189. } else {
  4190. DPRINT(("stopping monitoring\n"));
  4191. //pfm_stop_monitoring(current, regs);
  4192. }
  4193. ctx->ctx_state = PFM_CTX_LOADED;
  4194. }
  4195. }
  4196. /*
  4197. * context MUST BE LOCKED when calling
  4198. * can only be called for current
  4199. */
  4200. static void
  4201. pfm_context_force_terminate(pfm_context_t *ctx, struct pt_regs *regs)
  4202. {
  4203. int ret;
  4204. DPRINT(("entering for [%d]\n", current->pid));
  4205. ret = pfm_context_unload(ctx, NULL, 0, regs);
  4206. if (ret) {
  4207. printk(KERN_ERR "pfm_context_force_terminate: [%d] unloaded failed with %d\n", current->pid, ret);
  4208. }
  4209. /*
  4210. * and wakeup controlling task, indicating we are now disconnected
  4211. */
  4212. wake_up_interruptible(&ctx->ctx_zombieq);
  4213. /*
  4214. * given that context is still locked, the controlling
  4215. * task will only get access when we return from
  4216. * pfm_handle_work().
  4217. */
  4218. }
  4219. static int pfm_ovfl_notify_user(pfm_context_t *ctx, unsigned long ovfl_pmds);
  4220. /*
  4221. * pfm_handle_work() can be called with interrupts enabled
  4222. * (TIF_NEED_RESCHED) or disabled. The down_interruptible
  4223. * call may sleep, therefore we must re-enable interrupts
  4224. * to avoid deadlocks. It is safe to do so because this function
  4225. * is called ONLY when returning to user level (PUStk=1), in which case
  4226. * there is no risk of kernel stack overflow due to deep
  4227. * interrupt nesting.
  4228. */
  4229. void
  4230. pfm_handle_work(void)
  4231. {
  4232. pfm_context_t *ctx;
  4233. struct pt_regs *regs;
  4234. unsigned long flags, dummy_flags;
  4235. unsigned long ovfl_regs;
  4236. unsigned int reason;
  4237. int ret;
  4238. ctx = PFM_GET_CTX(current);
  4239. if (ctx == NULL) {
  4240. printk(KERN_ERR "perfmon: [%d] has no PFM context\n", current->pid);
  4241. return;
  4242. }
  4243. PROTECT_CTX(ctx, flags);
  4244. PFM_SET_WORK_PENDING(current, 0);
  4245. pfm_clear_task_notify();
  4246. regs = task_pt_regs(current);
  4247. /*
  4248. * extract reason for being here and clear
  4249. */
  4250. reason = ctx->ctx_fl_trap_reason;
  4251. ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_NONE;
  4252. ovfl_regs = ctx->ctx_ovfl_regs[0];
  4253. DPRINT(("reason=%d state=%d\n", reason, ctx->ctx_state));
  4254. /*
  4255. * must be done before we check for simple-reset mode
  4256. */
  4257. if (ctx->ctx_fl_going_zombie || ctx->ctx_state == PFM_CTX_ZOMBIE) goto do_zombie;
  4258. //if (CTX_OVFL_NOBLOCK(ctx)) goto skip_blocking;
  4259. if (reason == PFM_TRAP_REASON_RESET) goto skip_blocking;
  4260. /*
  4261. * restore interrupt mask to what it was on entry.
  4262. * Could be enabled/diasbled.
  4263. */
  4264. UNPROTECT_CTX(ctx, flags);
  4265. /*
  4266. * force interrupt enable because of down_interruptible()
  4267. */
  4268. local_irq_enable();
  4269. DPRINT(("before block sleeping\n"));
  4270. /*
  4271. * may go through without blocking on SMP systems
  4272. * if restart has been received already by the time we call down()
  4273. */
  4274. ret = wait_for_completion_interruptible(&ctx->ctx_restart_done);
  4275. DPRINT(("after block sleeping ret=%d\n", ret));
  4276. /*
  4277. * lock context and mask interrupts again
  4278. * We save flags into a dummy because we may have
  4279. * altered interrupts mask compared to entry in this
  4280. * function.
  4281. */
  4282. PROTECT_CTX(ctx, dummy_flags);
  4283. /*
  4284. * we need to read the ovfl_regs only after wake-up
  4285. * because we may have had pfm_write_pmds() in between
  4286. * and that can changed PMD values and therefore
  4287. * ovfl_regs is reset for these new PMD values.
  4288. */
  4289. ovfl_regs = ctx->ctx_ovfl_regs[0];
  4290. if (ctx->ctx_fl_going_zombie) {
  4291. do_zombie:
  4292. DPRINT(("context is zombie, bailing out\n"));
  4293. pfm_context_force_terminate(ctx, regs);
  4294. goto nothing_to_do;
  4295. }
  4296. /*
  4297. * in case of interruption of down() we don't restart anything
  4298. */
  4299. if (ret < 0) goto nothing_to_do;
  4300. skip_blocking:
  4301. pfm_resume_after_ovfl(ctx, ovfl_regs, regs);
  4302. ctx->ctx_ovfl_regs[0] = 0UL;
  4303. nothing_to_do:
  4304. /*
  4305. * restore flags as they were upon entry
  4306. */
  4307. UNPROTECT_CTX(ctx, flags);
  4308. }
  4309. static int
  4310. pfm_notify_user(pfm_context_t *ctx, pfm_msg_t *msg)
  4311. {
  4312. if (ctx->ctx_state == PFM_CTX_ZOMBIE) {
  4313. DPRINT(("ignoring overflow notification, owner is zombie\n"));
  4314. return 0;
  4315. }
  4316. DPRINT(("waking up somebody\n"));
  4317. if (msg) wake_up_interruptible(&ctx->ctx_msgq_wait);
  4318. /*
  4319. * safe, we are not in intr handler, nor in ctxsw when
  4320. * we come here
  4321. */
  4322. kill_fasync (&ctx->ctx_async_queue, SIGIO, POLL_IN);
  4323. return 0;
  4324. }
  4325. static int
  4326. pfm_ovfl_notify_user(pfm_context_t *ctx, unsigned long ovfl_pmds)
  4327. {
  4328. pfm_msg_t *msg = NULL;
  4329. if (ctx->ctx_fl_no_msg == 0) {
  4330. msg = pfm_get_new_msg(ctx);
  4331. if (msg == NULL) {
  4332. printk(KERN_ERR "perfmon: pfm_ovfl_notify_user no more notification msgs\n");
  4333. return -1;
  4334. }
  4335. msg->pfm_ovfl_msg.msg_type = PFM_MSG_OVFL;
  4336. msg->pfm_ovfl_msg.msg_ctx_fd = ctx->ctx_fd;
  4337. msg->pfm_ovfl_msg.msg_active_set = 0;
  4338. msg->pfm_ovfl_msg.msg_ovfl_pmds[0] = ovfl_pmds;
  4339. msg->pfm_ovfl_msg.msg_ovfl_pmds[1] = 0UL;
  4340. msg->pfm_ovfl_msg.msg_ovfl_pmds[2] = 0UL;
  4341. msg->pfm_ovfl_msg.msg_ovfl_pmds[3] = 0UL;
  4342. msg->pfm_ovfl_msg.msg_tstamp = 0UL;
  4343. }
  4344. DPRINT(("ovfl msg: msg=%p no_msg=%d fd=%d ovfl_pmds=0x%lx\n",
  4345. msg,
  4346. ctx->ctx_fl_no_msg,
  4347. ctx->ctx_fd,
  4348. ovfl_pmds));
  4349. return pfm_notify_user(ctx, msg);
  4350. }
  4351. static int
  4352. pfm_end_notify_user(pfm_context_t *ctx)
  4353. {
  4354. pfm_msg_t *msg;
  4355. msg = pfm_get_new_msg(ctx);
  4356. if (msg == NULL) {
  4357. printk(KERN_ERR "perfmon: pfm_end_notify_user no more notification msgs\n");
  4358. return -1;
  4359. }
  4360. /* no leak */
  4361. memset(msg, 0, sizeof(*msg));
  4362. msg->pfm_end_msg.msg_type = PFM_MSG_END;
  4363. msg->pfm_end_msg.msg_ctx_fd = ctx->ctx_fd;
  4364. msg->pfm_ovfl_msg.msg_tstamp = 0UL;
  4365. DPRINT(("end msg: msg=%p no_msg=%d ctx_fd=%d\n",
  4366. msg,
  4367. ctx->ctx_fl_no_msg,
  4368. ctx->ctx_fd));
  4369. return pfm_notify_user(ctx, msg);
  4370. }
  4371. /*
  4372. * main overflow processing routine.
  4373. * it can be called from the interrupt path or explicitely during the context switch code
  4374. */
  4375. static void
  4376. pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, struct pt_regs *regs)
  4377. {
  4378. pfm_ovfl_arg_t *ovfl_arg;
  4379. unsigned long mask;
  4380. unsigned long old_val, ovfl_val, new_val;
  4381. unsigned long ovfl_notify = 0UL, ovfl_pmds = 0UL, smpl_pmds = 0UL, reset_pmds;
  4382. unsigned long tstamp;
  4383. pfm_ovfl_ctrl_t ovfl_ctrl;
  4384. unsigned int i, has_smpl;
  4385. int must_notify = 0;
  4386. if (unlikely(ctx->ctx_state == PFM_CTX_ZOMBIE)) goto stop_monitoring;
  4387. /*
  4388. * sanity test. Should never happen
  4389. */
  4390. if (unlikely((pmc0 & 0x1) == 0)) goto sanity_check;
  4391. tstamp = ia64_get_itc();
  4392. mask = pmc0 >> PMU_FIRST_COUNTER;
  4393. ovfl_val = pmu_conf->ovfl_val;
  4394. has_smpl = CTX_HAS_SMPL(ctx);
  4395. DPRINT_ovfl(("pmc0=0x%lx pid=%d iip=0x%lx, %s "
  4396. "used_pmds=0x%lx\n",
  4397. pmc0,
  4398. task ? task->pid: -1,
  4399. (regs ? regs->cr_iip : 0),
  4400. CTX_OVFL_NOBLOCK(ctx) ? "nonblocking" : "blocking",
  4401. ctx->ctx_used_pmds[0]));
  4402. /*
  4403. * first we update the virtual counters
  4404. * assume there was a prior ia64_srlz_d() issued
  4405. */
  4406. for (i = PMU_FIRST_COUNTER; mask ; i++, mask >>= 1) {
  4407. /* skip pmd which did not overflow */
  4408. if ((mask & 0x1) == 0) continue;
  4409. /*
  4410. * Note that the pmd is not necessarily 0 at this point as qualified events
  4411. * may have happened before the PMU was frozen. The residual count is not
  4412. * taken into consideration here but will be with any read of the pmd via
  4413. * pfm_read_pmds().
  4414. */
  4415. old_val = new_val = ctx->ctx_pmds[i].val;
  4416. new_val += 1 + ovfl_val;
  4417. ctx->ctx_pmds[i].val = new_val;
  4418. /*
  4419. * check for overflow condition
  4420. */
  4421. if (likely(old_val > new_val)) {
  4422. ovfl_pmds |= 1UL << i;
  4423. if (PMC_OVFL_NOTIFY(ctx, i)) ovfl_notify |= 1UL << i;
  4424. }
  4425. DPRINT_ovfl(("ctx_pmd[%d].val=0x%lx old_val=0x%lx pmd=0x%lx ovfl_pmds=0x%lx ovfl_notify=0x%lx\n",
  4426. i,
  4427. new_val,
  4428. old_val,
  4429. ia64_get_pmd(i) & ovfl_val,
  4430. ovfl_pmds,
  4431. ovfl_notify));
  4432. }
  4433. /*
  4434. * there was no 64-bit overflow, nothing else to do
  4435. */
  4436. if (ovfl_pmds == 0UL) return;
  4437. /*
  4438. * reset all control bits
  4439. */
  4440. ovfl_ctrl.val = 0;
  4441. reset_pmds = 0UL;
  4442. /*
  4443. * if a sampling format module exists, then we "cache" the overflow by
  4444. * calling the module's handler() routine.
  4445. */
  4446. if (has_smpl) {
  4447. unsigned long start_cycles, end_cycles;
  4448. unsigned long pmd_mask;
  4449. int j, k, ret = 0;
  4450. int this_cpu = smp_processor_id();
  4451. pmd_mask = ovfl_pmds >> PMU_FIRST_COUNTER;
  4452. ovfl_arg = &ctx->ctx_ovfl_arg;
  4453. prefetch(ctx->ctx_smpl_hdr);
  4454. for(i=PMU_FIRST_COUNTER; pmd_mask && ret == 0; i++, pmd_mask >>=1) {
  4455. mask = 1UL << i;
  4456. if ((pmd_mask & 0x1) == 0) continue;
  4457. ovfl_arg->ovfl_pmd = (unsigned char )i;
  4458. ovfl_arg->ovfl_notify = ovfl_notify & mask ? 1 : 0;
  4459. ovfl_arg->active_set = 0;
  4460. ovfl_arg->ovfl_ctrl.val = 0; /* module must fill in all fields */
  4461. ovfl_arg->smpl_pmds[0] = smpl_pmds = ctx->ctx_pmds[i].smpl_pmds[0];
  4462. ovfl_arg->pmd_value = ctx->ctx_pmds[i].val;
  4463. ovfl_arg->pmd_last_reset = ctx->ctx_pmds[i].lval;
  4464. ovfl_arg->pmd_eventid = ctx->ctx_pmds[i].eventid;
  4465. /*
  4466. * copy values of pmds of interest. Sampling format may copy them
  4467. * into sampling buffer.
  4468. */
  4469. if (smpl_pmds) {
  4470. for(j=0, k=0; smpl_pmds; j++, smpl_pmds >>=1) {
  4471. if ((smpl_pmds & 0x1) == 0) continue;
  4472. ovfl_arg->smpl_pmds_values[k++] = PMD_IS_COUNTING(j) ? pfm_read_soft_counter(ctx, j) : ia64_get_pmd(j);
  4473. DPRINT_ovfl(("smpl_pmd[%d]=pmd%u=0x%lx\n", k-1, j, ovfl_arg->smpl_pmds_values[k-1]));
  4474. }
  4475. }
  4476. pfm_stats[this_cpu].pfm_smpl_handler_calls++;
  4477. start_cycles = ia64_get_itc();
  4478. /*
  4479. * call custom buffer format record (handler) routine
  4480. */
  4481. ret = (*ctx->ctx_buf_fmt->fmt_handler)(task, ctx->ctx_smpl_hdr, ovfl_arg, regs, tstamp);
  4482. end_cycles = ia64_get_itc();
  4483. /*
  4484. * For those controls, we take the union because they have
  4485. * an all or nothing behavior.
  4486. */
  4487. ovfl_ctrl.bits.notify_user |= ovfl_arg->ovfl_ctrl.bits.notify_user;
  4488. ovfl_ctrl.bits.block_task |= ovfl_arg->ovfl_ctrl.bits.block_task;
  4489. ovfl_ctrl.bits.mask_monitoring |= ovfl_arg->ovfl_ctrl.bits.mask_monitoring;
  4490. /*
  4491. * build the bitmask of pmds to reset now
  4492. */
  4493. if (ovfl_arg->ovfl_ctrl.bits.reset_ovfl_pmds) reset_pmds |= mask;
  4494. pfm_stats[this_cpu].pfm_smpl_handler_cycles += end_cycles - start_cycles;
  4495. }
  4496. /*
  4497. * when the module cannot handle the rest of the overflows, we abort right here
  4498. */
  4499. if (ret && pmd_mask) {
  4500. DPRINT(("handler aborts leftover ovfl_pmds=0x%lx\n",
  4501. pmd_mask<<PMU_FIRST_COUNTER));
  4502. }
  4503. /*
  4504. * remove the pmds we reset now from the set of pmds to reset in pfm_restart()
  4505. */
  4506. ovfl_pmds &= ~reset_pmds;
  4507. } else {
  4508. /*
  4509. * when no sampling module is used, then the default
  4510. * is to notify on overflow if requested by user
  4511. */
  4512. ovfl_ctrl.bits.notify_user = ovfl_notify ? 1 : 0;
  4513. ovfl_ctrl.bits.block_task = ovfl_notify ? 1 : 0;
  4514. ovfl_ctrl.bits.mask_monitoring = ovfl_notify ? 1 : 0; /* XXX: change for saturation */
  4515. ovfl_ctrl.bits.reset_ovfl_pmds = ovfl_notify ? 0 : 1;
  4516. /*
  4517. * if needed, we reset all overflowed pmds
  4518. */
  4519. if (ovfl_notify == 0) reset_pmds = ovfl_pmds;
  4520. }
  4521. DPRINT_ovfl(("ovfl_pmds=0x%lx reset_pmds=0x%lx\n", ovfl_pmds, reset_pmds));
  4522. /*
  4523. * reset the requested PMD registers using the short reset values
  4524. */
  4525. if (reset_pmds) {
  4526. unsigned long bm = reset_pmds;
  4527. pfm_reset_regs(ctx, &bm, PFM_PMD_SHORT_RESET);
  4528. }
  4529. if (ovfl_notify && ovfl_ctrl.bits.notify_user) {
  4530. /*
  4531. * keep track of what to reset when unblocking
  4532. */
  4533. ctx->ctx_ovfl_regs[0] = ovfl_pmds;
  4534. /*
  4535. * check for blocking context
  4536. */
  4537. if (CTX_OVFL_NOBLOCK(ctx) == 0 && ovfl_ctrl.bits.block_task) {
  4538. ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_BLOCK;
  4539. /*
  4540. * set the perfmon specific checking pending work for the task
  4541. */
  4542. PFM_SET_WORK_PENDING(task, 1);
  4543. /*
  4544. * when coming from ctxsw, current still points to the
  4545. * previous task, therefore we must work with task and not current.
  4546. */
  4547. pfm_set_task_notify(task);
  4548. }
  4549. /*
  4550. * defer until state is changed (shorten spin window). the context is locked
  4551. * anyway, so the signal receiver would come spin for nothing.
  4552. */
  4553. must_notify = 1;
  4554. }
  4555. DPRINT_ovfl(("owner [%d] pending=%ld reason=%u ovfl_pmds=0x%lx ovfl_notify=0x%lx masked=%d\n",
  4556. GET_PMU_OWNER() ? GET_PMU_OWNER()->pid : -1,
  4557. PFM_GET_WORK_PENDING(task),
  4558. ctx->ctx_fl_trap_reason,
  4559. ovfl_pmds,
  4560. ovfl_notify,
  4561. ovfl_ctrl.bits.mask_monitoring ? 1 : 0));
  4562. /*
  4563. * in case monitoring must be stopped, we toggle the psr bits
  4564. */
  4565. if (ovfl_ctrl.bits.mask_monitoring) {
  4566. pfm_mask_monitoring(task);
  4567. ctx->ctx_state = PFM_CTX_MASKED;
  4568. ctx->ctx_fl_can_restart = 1;
  4569. }
  4570. /*
  4571. * send notification now
  4572. */
  4573. if (must_notify) pfm_ovfl_notify_user(ctx, ovfl_notify);
  4574. return;
  4575. sanity_check:
  4576. printk(KERN_ERR "perfmon: CPU%d overflow handler [%d] pmc0=0x%lx\n",
  4577. smp_processor_id(),
  4578. task ? task->pid : -1,
  4579. pmc0);
  4580. return;
  4581. stop_monitoring:
  4582. /*
  4583. * in SMP, zombie context is never restored but reclaimed in pfm_load_regs().
  4584. * Moreover, zombies are also reclaimed in pfm_save_regs(). Therefore we can
  4585. * come here as zombie only if the task is the current task. In which case, we
  4586. * can access the PMU hardware directly.
  4587. *
  4588. * Note that zombies do have PM_VALID set. So here we do the minimal.
  4589. *
  4590. * In case the context was zombified it could not be reclaimed at the time
  4591. * the monitoring program exited. At this point, the PMU reservation has been
  4592. * returned, the sampiing buffer has been freed. We must convert this call
  4593. * into a spurious interrupt. However, we must also avoid infinite overflows
  4594. * by stopping monitoring for this task. We can only come here for a per-task
  4595. * context. All we need to do is to stop monitoring using the psr bits which
  4596. * are always task private. By re-enabling secure montioring, we ensure that
  4597. * the monitored task will not be able to re-activate monitoring.
  4598. * The task will eventually be context switched out, at which point the context
  4599. * will be reclaimed (that includes releasing ownership of the PMU).
  4600. *
  4601. * So there might be a window of time where the number of per-task session is zero
  4602. * yet one PMU might have a owner and get at most one overflow interrupt for a zombie
  4603. * context. This is safe because if a per-task session comes in, it will push this one
  4604. * out and by the virtue on pfm_save_regs(), this one will disappear. If a system wide
  4605. * session is force on that CPU, given that we use task pinning, pfm_save_regs() will
  4606. * also push our zombie context out.
  4607. *
  4608. * Overall pretty hairy stuff....
  4609. */
  4610. DPRINT(("ctx is zombie for [%d], converted to spurious\n", task ? task->pid: -1));
  4611. pfm_clear_psr_up();
  4612. ia64_psr(regs)->up = 0;
  4613. ia64_psr(regs)->sp = 1;
  4614. return;
  4615. }
  4616. static int
  4617. pfm_do_interrupt_handler(int irq, void *arg, struct pt_regs *regs)
  4618. {
  4619. struct task_struct *task;
  4620. pfm_context_t *ctx;
  4621. unsigned long flags;
  4622. u64 pmc0;
  4623. int this_cpu = smp_processor_id();
  4624. int retval = 0;
  4625. pfm_stats[this_cpu].pfm_ovfl_intr_count++;
  4626. /*
  4627. * srlz.d done before arriving here
  4628. */
  4629. pmc0 = ia64_get_pmc(0);
  4630. task = GET_PMU_OWNER();
  4631. ctx = GET_PMU_CTX();
  4632. /*
  4633. * if we have some pending bits set
  4634. * assumes : if any PMC0.bit[63-1] is set, then PMC0.fr = 1
  4635. */
  4636. if (PMC0_HAS_OVFL(pmc0) && task) {
  4637. /*
  4638. * we assume that pmc0.fr is always set here
  4639. */
  4640. /* sanity check */
  4641. if (!ctx) goto report_spurious1;
  4642. if (ctx->ctx_fl_system == 0 && (task->thread.flags & IA64_THREAD_PM_VALID) == 0)
  4643. goto report_spurious2;
  4644. PROTECT_CTX_NOPRINT(ctx, flags);
  4645. pfm_overflow_handler(task, ctx, pmc0, regs);
  4646. UNPROTECT_CTX_NOPRINT(ctx, flags);
  4647. } else {
  4648. pfm_stats[this_cpu].pfm_spurious_ovfl_intr_count++;
  4649. retval = -1;
  4650. }
  4651. /*
  4652. * keep it unfrozen at all times
  4653. */
  4654. pfm_unfreeze_pmu();
  4655. return retval;
  4656. report_spurious1:
  4657. printk(KERN_INFO "perfmon: spurious overflow interrupt on CPU%d: process %d has no PFM context\n",
  4658. this_cpu, task->pid);
  4659. pfm_unfreeze_pmu();
  4660. return -1;
  4661. report_spurious2:
  4662. printk(KERN_INFO "perfmon: spurious overflow interrupt on CPU%d: process %d, invalid flag\n",
  4663. this_cpu,
  4664. task->pid);
  4665. pfm_unfreeze_pmu();
  4666. return -1;
  4667. }
  4668. static irqreturn_t
  4669. pfm_interrupt_handler(int irq, void *arg)
  4670. {
  4671. unsigned long start_cycles, total_cycles;
  4672. unsigned long min, max;
  4673. int this_cpu;
  4674. int ret;
  4675. struct pt_regs *regs = get_irq_regs();
  4676. this_cpu = get_cpu();
  4677. if (likely(!pfm_alt_intr_handler)) {
  4678. min = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min;
  4679. max = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max;
  4680. start_cycles = ia64_get_itc();
  4681. ret = pfm_do_interrupt_handler(irq, arg, regs);
  4682. total_cycles = ia64_get_itc();
  4683. /*
  4684. * don't measure spurious interrupts
  4685. */
  4686. if (likely(ret == 0)) {
  4687. total_cycles -= start_cycles;
  4688. if (total_cycles < min) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min = total_cycles;
  4689. if (total_cycles > max) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max = total_cycles;
  4690. pfm_stats[this_cpu].pfm_ovfl_intr_cycles += total_cycles;
  4691. }
  4692. }
  4693. else {
  4694. (*pfm_alt_intr_handler->handler)(irq, arg, regs);
  4695. }
  4696. put_cpu_no_resched();
  4697. return IRQ_HANDLED;
  4698. }
  4699. /*
  4700. * /proc/perfmon interface, for debug only
  4701. */
  4702. #define PFM_PROC_SHOW_HEADER ((void *)NR_CPUS+1)
  4703. static void *
  4704. pfm_proc_start(struct seq_file *m, loff_t *pos)
  4705. {
  4706. if (*pos == 0) {
  4707. return PFM_PROC_SHOW_HEADER;
  4708. }
  4709. while (*pos <= NR_CPUS) {
  4710. if (cpu_online(*pos - 1)) {
  4711. return (void *)*pos;
  4712. }
  4713. ++*pos;
  4714. }
  4715. return NULL;
  4716. }
  4717. static void *
  4718. pfm_proc_next(struct seq_file *m, void *v, loff_t *pos)
  4719. {
  4720. ++*pos;
  4721. return pfm_proc_start(m, pos);
  4722. }
  4723. static void
  4724. pfm_proc_stop(struct seq_file *m, void *v)
  4725. {
  4726. }
  4727. static void
  4728. pfm_proc_show_header(struct seq_file *m)
  4729. {
  4730. struct list_head * pos;
  4731. pfm_buffer_fmt_t * entry;
  4732. unsigned long flags;
  4733. seq_printf(m,
  4734. "perfmon version : %u.%u\n"
  4735. "model : %s\n"
  4736. "fastctxsw : %s\n"
  4737. "expert mode : %s\n"
  4738. "ovfl_mask : 0x%lx\n"
  4739. "PMU flags : 0x%x\n",
  4740. PFM_VERSION_MAJ, PFM_VERSION_MIN,
  4741. pmu_conf->pmu_name,
  4742. pfm_sysctl.fastctxsw > 0 ? "Yes": "No",
  4743. pfm_sysctl.expert_mode > 0 ? "Yes": "No",
  4744. pmu_conf->ovfl_val,
  4745. pmu_conf->flags);
  4746. LOCK_PFS(flags);
  4747. seq_printf(m,
  4748. "proc_sessions : %u\n"
  4749. "sys_sessions : %u\n"
  4750. "sys_use_dbregs : %u\n"
  4751. "ptrace_use_dbregs : %u\n",
  4752. pfm_sessions.pfs_task_sessions,
  4753. pfm_sessions.pfs_sys_sessions,
  4754. pfm_sessions.pfs_sys_use_dbregs,
  4755. pfm_sessions.pfs_ptrace_use_dbregs);
  4756. UNLOCK_PFS(flags);
  4757. spin_lock(&pfm_buffer_fmt_lock);
  4758. list_for_each(pos, &pfm_buffer_fmt_list) {
  4759. entry = list_entry(pos, pfm_buffer_fmt_t, fmt_list);
  4760. seq_printf(m, "format : %02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x %s\n",
  4761. entry->fmt_uuid[0],
  4762. entry->fmt_uuid[1],
  4763. entry->fmt_uuid[2],
  4764. entry->fmt_uuid[3],
  4765. entry->fmt_uuid[4],
  4766. entry->fmt_uuid[5],
  4767. entry->fmt_uuid[6],
  4768. entry->fmt_uuid[7],
  4769. entry->fmt_uuid[8],
  4770. entry->fmt_uuid[9],
  4771. entry->fmt_uuid[10],
  4772. entry->fmt_uuid[11],
  4773. entry->fmt_uuid[12],
  4774. entry->fmt_uuid[13],
  4775. entry->fmt_uuid[14],
  4776. entry->fmt_uuid[15],
  4777. entry->fmt_name);
  4778. }
  4779. spin_unlock(&pfm_buffer_fmt_lock);
  4780. }
  4781. static int
  4782. pfm_proc_show(struct seq_file *m, void *v)
  4783. {
  4784. unsigned long psr;
  4785. unsigned int i;
  4786. int cpu;
  4787. if (v == PFM_PROC_SHOW_HEADER) {
  4788. pfm_proc_show_header(m);
  4789. return 0;
  4790. }
  4791. /* show info for CPU (v - 1) */
  4792. cpu = (long)v - 1;
  4793. seq_printf(m,
  4794. "CPU%-2d overflow intrs : %lu\n"
  4795. "CPU%-2d overflow cycles : %lu\n"
  4796. "CPU%-2d overflow min : %lu\n"
  4797. "CPU%-2d overflow max : %lu\n"
  4798. "CPU%-2d smpl handler calls : %lu\n"
  4799. "CPU%-2d smpl handler cycles : %lu\n"
  4800. "CPU%-2d spurious intrs : %lu\n"
  4801. "CPU%-2d replay intrs : %lu\n"
  4802. "CPU%-2d syst_wide : %d\n"
  4803. "CPU%-2d dcr_pp : %d\n"
  4804. "CPU%-2d exclude idle : %d\n"
  4805. "CPU%-2d owner : %d\n"
  4806. "CPU%-2d context : %p\n"
  4807. "CPU%-2d activations : %lu\n",
  4808. cpu, pfm_stats[cpu].pfm_ovfl_intr_count,
  4809. cpu, pfm_stats[cpu].pfm_ovfl_intr_cycles,
  4810. cpu, pfm_stats[cpu].pfm_ovfl_intr_cycles_min,
  4811. cpu, pfm_stats[cpu].pfm_ovfl_intr_cycles_max,
  4812. cpu, pfm_stats[cpu].pfm_smpl_handler_calls,
  4813. cpu, pfm_stats[cpu].pfm_smpl_handler_cycles,
  4814. cpu, pfm_stats[cpu].pfm_spurious_ovfl_intr_count,
  4815. cpu, pfm_stats[cpu].pfm_replay_ovfl_intr_count,
  4816. cpu, pfm_get_cpu_data(pfm_syst_info, cpu) & PFM_CPUINFO_SYST_WIDE ? 1 : 0,
  4817. cpu, pfm_get_cpu_data(pfm_syst_info, cpu) & PFM_CPUINFO_DCR_PP ? 1 : 0,
  4818. cpu, pfm_get_cpu_data(pfm_syst_info, cpu) & PFM_CPUINFO_EXCL_IDLE ? 1 : 0,
  4819. cpu, pfm_get_cpu_data(pmu_owner, cpu) ? pfm_get_cpu_data(pmu_owner, cpu)->pid: -1,
  4820. cpu, pfm_get_cpu_data(pmu_ctx, cpu),
  4821. cpu, pfm_get_cpu_data(pmu_activation_number, cpu));
  4822. if (num_online_cpus() == 1 && pfm_sysctl.debug > 0) {
  4823. psr = pfm_get_psr();
  4824. ia64_srlz_d();
  4825. seq_printf(m,
  4826. "CPU%-2d psr : 0x%lx\n"
  4827. "CPU%-2d pmc0 : 0x%lx\n",
  4828. cpu, psr,
  4829. cpu, ia64_get_pmc(0));
  4830. for (i=0; PMC_IS_LAST(i) == 0; i++) {
  4831. if (PMC_IS_COUNTING(i) == 0) continue;
  4832. seq_printf(m,
  4833. "CPU%-2d pmc%u : 0x%lx\n"
  4834. "CPU%-2d pmd%u : 0x%lx\n",
  4835. cpu, i, ia64_get_pmc(i),
  4836. cpu, i, ia64_get_pmd(i));
  4837. }
  4838. }
  4839. return 0;
  4840. }
  4841. struct seq_operations pfm_seq_ops = {
  4842. .start = pfm_proc_start,
  4843. .next = pfm_proc_next,
  4844. .stop = pfm_proc_stop,
  4845. .show = pfm_proc_show
  4846. };
  4847. static int
  4848. pfm_proc_open(struct inode *inode, struct file *file)
  4849. {
  4850. return seq_open(file, &pfm_seq_ops);
  4851. }
  4852. /*
  4853. * we come here as soon as local_cpu_data->pfm_syst_wide is set. this happens
  4854. * during pfm_enable() hence before pfm_start(). We cannot assume monitoring
  4855. * is active or inactive based on mode. We must rely on the value in
  4856. * local_cpu_data->pfm_syst_info
  4857. */
  4858. void
  4859. pfm_syst_wide_update_task(struct task_struct *task, unsigned long info, int is_ctxswin)
  4860. {
  4861. struct pt_regs *regs;
  4862. unsigned long dcr;
  4863. unsigned long dcr_pp;
  4864. dcr_pp = info & PFM_CPUINFO_DCR_PP ? 1 : 0;
  4865. /*
  4866. * pid 0 is guaranteed to be the idle task. There is one such task with pid 0
  4867. * on every CPU, so we can rely on the pid to identify the idle task.
  4868. */
  4869. if ((info & PFM_CPUINFO_EXCL_IDLE) == 0 || task->pid) {
  4870. regs = task_pt_regs(task);
  4871. ia64_psr(regs)->pp = is_ctxswin ? dcr_pp : 0;
  4872. return;
  4873. }
  4874. /*
  4875. * if monitoring has started
  4876. */
  4877. if (dcr_pp) {
  4878. dcr = ia64_getreg(_IA64_REG_CR_DCR);
  4879. /*
  4880. * context switching in?
  4881. */
  4882. if (is_ctxswin) {
  4883. /* mask monitoring for the idle task */
  4884. ia64_setreg(_IA64_REG_CR_DCR, dcr & ~IA64_DCR_PP);
  4885. pfm_clear_psr_pp();
  4886. ia64_srlz_i();
  4887. return;
  4888. }
  4889. /*
  4890. * context switching out
  4891. * restore monitoring for next task
  4892. *
  4893. * Due to inlining this odd if-then-else construction generates
  4894. * better code.
  4895. */
  4896. ia64_setreg(_IA64_REG_CR_DCR, dcr |IA64_DCR_PP);
  4897. pfm_set_psr_pp();
  4898. ia64_srlz_i();
  4899. }
  4900. }
  4901. #ifdef CONFIG_SMP
  4902. static void
  4903. pfm_force_cleanup(pfm_context_t *ctx, struct pt_regs *regs)
  4904. {
  4905. struct task_struct *task = ctx->ctx_task;
  4906. ia64_psr(regs)->up = 0;
  4907. ia64_psr(regs)->sp = 1;
  4908. if (GET_PMU_OWNER() == task) {
  4909. DPRINT(("cleared ownership for [%d]\n", ctx->ctx_task->pid));
  4910. SET_PMU_OWNER(NULL, NULL);
  4911. }
  4912. /*
  4913. * disconnect the task from the context and vice-versa
  4914. */
  4915. PFM_SET_WORK_PENDING(task, 0);
  4916. task->thread.pfm_context = NULL;
  4917. task->thread.flags &= ~IA64_THREAD_PM_VALID;
  4918. DPRINT(("force cleanup for [%d]\n", task->pid));
  4919. }
  4920. /*
  4921. * in 2.6, interrupts are masked when we come here and the runqueue lock is held
  4922. */
  4923. void
  4924. pfm_save_regs(struct task_struct *task)
  4925. {
  4926. pfm_context_t *ctx;
  4927. unsigned long flags;
  4928. u64 psr;
  4929. ctx = PFM_GET_CTX(task);
  4930. if (ctx == NULL) return;
  4931. /*
  4932. * we always come here with interrupts ALREADY disabled by
  4933. * the scheduler. So we simply need to protect against concurrent
  4934. * access, not CPU concurrency.
  4935. */
  4936. flags = pfm_protect_ctx_ctxsw(ctx);
  4937. if (ctx->ctx_state == PFM_CTX_ZOMBIE) {
  4938. struct pt_regs *regs = task_pt_regs(task);
  4939. pfm_clear_psr_up();
  4940. pfm_force_cleanup(ctx, regs);
  4941. BUG_ON(ctx->ctx_smpl_hdr);
  4942. pfm_unprotect_ctx_ctxsw(ctx, flags);
  4943. pfm_context_free(ctx);
  4944. return;
  4945. }
  4946. /*
  4947. * save current PSR: needed because we modify it
  4948. */
  4949. ia64_srlz_d();
  4950. psr = pfm_get_psr();
  4951. BUG_ON(psr & (IA64_PSR_I));
  4952. /*
  4953. * stop monitoring:
  4954. * This is the last instruction which may generate an overflow
  4955. *
  4956. * We do not need to set psr.sp because, it is irrelevant in kernel.
  4957. * It will be restored from ipsr when going back to user level
  4958. */
  4959. pfm_clear_psr_up();
  4960. /*
  4961. * keep a copy of psr.up (for reload)
  4962. */
  4963. ctx->ctx_saved_psr_up = psr & IA64_PSR_UP;
  4964. /*
  4965. * release ownership of this PMU.
  4966. * PM interrupts are masked, so nothing
  4967. * can happen.
  4968. */
  4969. SET_PMU_OWNER(NULL, NULL);
  4970. /*
  4971. * we systematically save the PMD as we have no
  4972. * guarantee we will be schedule at that same
  4973. * CPU again.
  4974. */
  4975. pfm_save_pmds(ctx->th_pmds, ctx->ctx_used_pmds[0]);
  4976. /*
  4977. * save pmc0 ia64_srlz_d() done in pfm_save_pmds()
  4978. * we will need it on the restore path to check
  4979. * for pending overflow.
  4980. */
  4981. ctx->th_pmcs[0] = ia64_get_pmc(0);
  4982. /*
  4983. * unfreeze PMU if had pending overflows
  4984. */
  4985. if (ctx->th_pmcs[0] & ~0x1UL) pfm_unfreeze_pmu();
  4986. /*
  4987. * finally, allow context access.
  4988. * interrupts will still be masked after this call.
  4989. */
  4990. pfm_unprotect_ctx_ctxsw(ctx, flags);
  4991. }
  4992. #else /* !CONFIG_SMP */
  4993. void
  4994. pfm_save_regs(struct task_struct *task)
  4995. {
  4996. pfm_context_t *ctx;
  4997. u64 psr;
  4998. ctx = PFM_GET_CTX(task);
  4999. if (ctx == NULL) return;
  5000. /*
  5001. * save current PSR: needed because we modify it
  5002. */
  5003. psr = pfm_get_psr();
  5004. BUG_ON(psr & (IA64_PSR_I));
  5005. /*
  5006. * stop monitoring:
  5007. * This is the last instruction which may generate an overflow
  5008. *
  5009. * We do not need to set psr.sp because, it is irrelevant in kernel.
  5010. * It will be restored from ipsr when going back to user level
  5011. */
  5012. pfm_clear_psr_up();
  5013. /*
  5014. * keep a copy of psr.up (for reload)
  5015. */
  5016. ctx->ctx_saved_psr_up = psr & IA64_PSR_UP;
  5017. }
  5018. static void
  5019. pfm_lazy_save_regs (struct task_struct *task)
  5020. {
  5021. pfm_context_t *ctx;
  5022. unsigned long flags;
  5023. { u64 psr = pfm_get_psr();
  5024. BUG_ON(psr & IA64_PSR_UP);
  5025. }
  5026. ctx = PFM_GET_CTX(task);
  5027. /*
  5028. * we need to mask PMU overflow here to
  5029. * make sure that we maintain pmc0 until
  5030. * we save it. overflow interrupts are
  5031. * treated as spurious if there is no
  5032. * owner.
  5033. *
  5034. * XXX: I don't think this is necessary
  5035. */
  5036. PROTECT_CTX(ctx,flags);
  5037. /*
  5038. * release ownership of this PMU.
  5039. * must be done before we save the registers.
  5040. *
  5041. * after this call any PMU interrupt is treated
  5042. * as spurious.
  5043. */
  5044. SET_PMU_OWNER(NULL, NULL);
  5045. /*
  5046. * save all the pmds we use
  5047. */
  5048. pfm_save_pmds(ctx->th_pmds, ctx->ctx_used_pmds[0]);
  5049. /*
  5050. * save pmc0 ia64_srlz_d() done in pfm_save_pmds()
  5051. * it is needed to check for pended overflow
  5052. * on the restore path
  5053. */
  5054. ctx->th_pmcs[0] = ia64_get_pmc(0);
  5055. /*
  5056. * unfreeze PMU if had pending overflows
  5057. */
  5058. if (ctx->th_pmcs[0] & ~0x1UL) pfm_unfreeze_pmu();
  5059. /*
  5060. * now get can unmask PMU interrupts, they will
  5061. * be treated as purely spurious and we will not
  5062. * lose any information
  5063. */
  5064. UNPROTECT_CTX(ctx,flags);
  5065. }
  5066. #endif /* CONFIG_SMP */
  5067. #ifdef CONFIG_SMP
  5068. /*
  5069. * in 2.6, interrupts are masked when we come here and the runqueue lock is held
  5070. */
  5071. void
  5072. pfm_load_regs (struct task_struct *task)
  5073. {
  5074. pfm_context_t *ctx;
  5075. unsigned long pmc_mask = 0UL, pmd_mask = 0UL;
  5076. unsigned long flags;
  5077. u64 psr, psr_up;
  5078. int need_irq_resend;
  5079. ctx = PFM_GET_CTX(task);
  5080. if (unlikely(ctx == NULL)) return;
  5081. BUG_ON(GET_PMU_OWNER());
  5082. /*
  5083. * possible on unload
  5084. */
  5085. if (unlikely((task->thread.flags & IA64_THREAD_PM_VALID) == 0)) return;
  5086. /*
  5087. * we always come here with interrupts ALREADY disabled by
  5088. * the scheduler. So we simply need to protect against concurrent
  5089. * access, not CPU concurrency.
  5090. */
  5091. flags = pfm_protect_ctx_ctxsw(ctx);
  5092. psr = pfm_get_psr();
  5093. need_irq_resend = pmu_conf->flags & PFM_PMU_IRQ_RESEND;
  5094. BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
  5095. BUG_ON(psr & IA64_PSR_I);
  5096. if (unlikely(ctx->ctx_state == PFM_CTX_ZOMBIE)) {
  5097. struct pt_regs *regs = task_pt_regs(task);
  5098. BUG_ON(ctx->ctx_smpl_hdr);
  5099. pfm_force_cleanup(ctx, regs);
  5100. pfm_unprotect_ctx_ctxsw(ctx, flags);
  5101. /*
  5102. * this one (kmalloc'ed) is fine with interrupts disabled
  5103. */
  5104. pfm_context_free(ctx);
  5105. return;
  5106. }
  5107. /*
  5108. * we restore ALL the debug registers to avoid picking up
  5109. * stale state.
  5110. */
  5111. if (ctx->ctx_fl_using_dbreg) {
  5112. pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
  5113. pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
  5114. }
  5115. /*
  5116. * retrieve saved psr.up
  5117. */
  5118. psr_up = ctx->ctx_saved_psr_up;
  5119. /*
  5120. * if we were the last user of the PMU on that CPU,
  5121. * then nothing to do except restore psr
  5122. */
  5123. if (GET_LAST_CPU(ctx) == smp_processor_id() && ctx->ctx_last_activation == GET_ACTIVATION()) {
  5124. /*
  5125. * retrieve partial reload masks (due to user modifications)
  5126. */
  5127. pmc_mask = ctx->ctx_reload_pmcs[0];
  5128. pmd_mask = ctx->ctx_reload_pmds[0];
  5129. } else {
  5130. /*
  5131. * To avoid leaking information to the user level when psr.sp=0,
  5132. * we must reload ALL implemented pmds (even the ones we don't use).
  5133. * In the kernel we only allow PFM_READ_PMDS on registers which
  5134. * we initialized or requested (sampling) so there is no risk there.
  5135. */
  5136. pmd_mask = pfm_sysctl.fastctxsw ? ctx->ctx_used_pmds[0] : ctx->ctx_all_pmds[0];
  5137. /*
  5138. * ALL accessible PMCs are systematically reloaded, unused registers
  5139. * get their default (from pfm_reset_pmu_state()) values to avoid picking
  5140. * up stale configuration.
  5141. *
  5142. * PMC0 is never in the mask. It is always restored separately.
  5143. */
  5144. pmc_mask = ctx->ctx_all_pmcs[0];
  5145. }
  5146. /*
  5147. * when context is MASKED, we will restore PMC with plm=0
  5148. * and PMD with stale information, but that's ok, nothing
  5149. * will be captured.
  5150. *
  5151. * XXX: optimize here
  5152. */
  5153. if (pmd_mask) pfm_restore_pmds(ctx->th_pmds, pmd_mask);
  5154. if (pmc_mask) pfm_restore_pmcs(ctx->th_pmcs, pmc_mask);
  5155. /*
  5156. * check for pending overflow at the time the state
  5157. * was saved.
  5158. */
  5159. if (unlikely(PMC0_HAS_OVFL(ctx->th_pmcs[0]))) {
  5160. /*
  5161. * reload pmc0 with the overflow information
  5162. * On McKinley PMU, this will trigger a PMU interrupt
  5163. */
  5164. ia64_set_pmc(0, ctx->th_pmcs[0]);
  5165. ia64_srlz_d();
  5166. ctx->th_pmcs[0] = 0UL;
  5167. /*
  5168. * will replay the PMU interrupt
  5169. */
  5170. if (need_irq_resend) ia64_resend_irq(IA64_PERFMON_VECTOR);
  5171. pfm_stats[smp_processor_id()].pfm_replay_ovfl_intr_count++;
  5172. }
  5173. /*
  5174. * we just did a reload, so we reset the partial reload fields
  5175. */
  5176. ctx->ctx_reload_pmcs[0] = 0UL;
  5177. ctx->ctx_reload_pmds[0] = 0UL;
  5178. SET_LAST_CPU(ctx, smp_processor_id());
  5179. /*
  5180. * dump activation value for this PMU
  5181. */
  5182. INC_ACTIVATION();
  5183. /*
  5184. * record current activation for this context
  5185. */
  5186. SET_ACTIVATION(ctx);
  5187. /*
  5188. * establish new ownership.
  5189. */
  5190. SET_PMU_OWNER(task, ctx);
  5191. /*
  5192. * restore the psr.up bit. measurement
  5193. * is active again.
  5194. * no PMU interrupt can happen at this point
  5195. * because we still have interrupts disabled.
  5196. */
  5197. if (likely(psr_up)) pfm_set_psr_up();
  5198. /*
  5199. * allow concurrent access to context
  5200. */
  5201. pfm_unprotect_ctx_ctxsw(ctx, flags);
  5202. }
  5203. #else /* !CONFIG_SMP */
  5204. /*
  5205. * reload PMU state for UP kernels
  5206. * in 2.5 we come here with interrupts disabled
  5207. */
  5208. void
  5209. pfm_load_regs (struct task_struct *task)
  5210. {
  5211. pfm_context_t *ctx;
  5212. struct task_struct *owner;
  5213. unsigned long pmd_mask, pmc_mask;
  5214. u64 psr, psr_up;
  5215. int need_irq_resend;
  5216. owner = GET_PMU_OWNER();
  5217. ctx = PFM_GET_CTX(task);
  5218. psr = pfm_get_psr();
  5219. BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
  5220. BUG_ON(psr & IA64_PSR_I);
  5221. /*
  5222. * we restore ALL the debug registers to avoid picking up
  5223. * stale state.
  5224. *
  5225. * This must be done even when the task is still the owner
  5226. * as the registers may have been modified via ptrace()
  5227. * (not perfmon) by the previous task.
  5228. */
  5229. if (ctx->ctx_fl_using_dbreg) {
  5230. pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
  5231. pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
  5232. }
  5233. /*
  5234. * retrieved saved psr.up
  5235. */
  5236. psr_up = ctx->ctx_saved_psr_up;
  5237. need_irq_resend = pmu_conf->flags & PFM_PMU_IRQ_RESEND;
  5238. /*
  5239. * short path, our state is still there, just
  5240. * need to restore psr and we go
  5241. *
  5242. * we do not touch either PMC nor PMD. the psr is not touched
  5243. * by the overflow_handler. So we are safe w.r.t. to interrupt
  5244. * concurrency even without interrupt masking.
  5245. */
  5246. if (likely(owner == task)) {
  5247. if (likely(psr_up)) pfm_set_psr_up();
  5248. return;
  5249. }
  5250. /*
  5251. * someone else is still using the PMU, first push it out and
  5252. * then we'll be able to install our stuff !
  5253. *
  5254. * Upon return, there will be no owner for the current PMU
  5255. */
  5256. if (owner) pfm_lazy_save_regs(owner);
  5257. /*
  5258. * To avoid leaking information to the user level when psr.sp=0,
  5259. * we must reload ALL implemented pmds (even the ones we don't use).
  5260. * In the kernel we only allow PFM_READ_PMDS on registers which
  5261. * we initialized or requested (sampling) so there is no risk there.
  5262. */
  5263. pmd_mask = pfm_sysctl.fastctxsw ? ctx->ctx_used_pmds[0] : ctx->ctx_all_pmds[0];
  5264. /*
  5265. * ALL accessible PMCs are systematically reloaded, unused registers
  5266. * get their default (from pfm_reset_pmu_state()) values to avoid picking
  5267. * up stale configuration.
  5268. *
  5269. * PMC0 is never in the mask. It is always restored separately
  5270. */
  5271. pmc_mask = ctx->ctx_all_pmcs[0];
  5272. pfm_restore_pmds(ctx->th_pmds, pmd_mask);
  5273. pfm_restore_pmcs(ctx->th_pmcs, pmc_mask);
  5274. /*
  5275. * check for pending overflow at the time the state
  5276. * was saved.
  5277. */
  5278. if (unlikely(PMC0_HAS_OVFL(ctx->th_pmcs[0]))) {
  5279. /*
  5280. * reload pmc0 with the overflow information
  5281. * On McKinley PMU, this will trigger a PMU interrupt
  5282. */
  5283. ia64_set_pmc(0, ctx->th_pmcs[0]);
  5284. ia64_srlz_d();
  5285. ctx->th_pmcs[0] = 0UL;
  5286. /*
  5287. * will replay the PMU interrupt
  5288. */
  5289. if (need_irq_resend) ia64_resend_irq(IA64_PERFMON_VECTOR);
  5290. pfm_stats[smp_processor_id()].pfm_replay_ovfl_intr_count++;
  5291. }
  5292. /*
  5293. * establish new ownership.
  5294. */
  5295. SET_PMU_OWNER(task, ctx);
  5296. /*
  5297. * restore the psr.up bit. measurement
  5298. * is active again.
  5299. * no PMU interrupt can happen at this point
  5300. * because we still have interrupts disabled.
  5301. */
  5302. if (likely(psr_up)) pfm_set_psr_up();
  5303. }
  5304. #endif /* CONFIG_SMP */
  5305. /*
  5306. * this function assumes monitoring is stopped
  5307. */
  5308. static void
  5309. pfm_flush_pmds(struct task_struct *task, pfm_context_t *ctx)
  5310. {
  5311. u64 pmc0;
  5312. unsigned long mask2, val, pmd_val, ovfl_val;
  5313. int i, can_access_pmu = 0;
  5314. int is_self;
  5315. /*
  5316. * is the caller the task being monitored (or which initiated the
  5317. * session for system wide measurements)
  5318. */
  5319. is_self = ctx->ctx_task == task ? 1 : 0;
  5320. /*
  5321. * can access PMU is task is the owner of the PMU state on the current CPU
  5322. * or if we are running on the CPU bound to the context in system-wide mode
  5323. * (that is not necessarily the task the context is attached to in this mode).
  5324. * In system-wide we always have can_access_pmu true because a task running on an
  5325. * invalid processor is flagged earlier in the call stack (see pfm_stop).
  5326. */
  5327. can_access_pmu = (GET_PMU_OWNER() == task) || (ctx->ctx_fl_system && ctx->ctx_cpu == smp_processor_id());
  5328. if (can_access_pmu) {
  5329. /*
  5330. * Mark the PMU as not owned
  5331. * This will cause the interrupt handler to do nothing in case an overflow
  5332. * interrupt was in-flight
  5333. * This also guarantees that pmc0 will contain the final state
  5334. * It virtually gives us full control on overflow processing from that point
  5335. * on.
  5336. */
  5337. SET_PMU_OWNER(NULL, NULL);
  5338. DPRINT(("releasing ownership\n"));
  5339. /*
  5340. * read current overflow status:
  5341. *
  5342. * we are guaranteed to read the final stable state
  5343. */
  5344. ia64_srlz_d();
  5345. pmc0 = ia64_get_pmc(0); /* slow */
  5346. /*
  5347. * reset freeze bit, overflow status information destroyed
  5348. */
  5349. pfm_unfreeze_pmu();
  5350. } else {
  5351. pmc0 = ctx->th_pmcs[0];
  5352. /*
  5353. * clear whatever overflow status bits there were
  5354. */
  5355. ctx->th_pmcs[0] = 0;
  5356. }
  5357. ovfl_val = pmu_conf->ovfl_val;
  5358. /*
  5359. * we save all the used pmds
  5360. * we take care of overflows for counting PMDs
  5361. *
  5362. * XXX: sampling situation is not taken into account here
  5363. */
  5364. mask2 = ctx->ctx_used_pmds[0];
  5365. DPRINT(("is_self=%d ovfl_val=0x%lx mask2=0x%lx\n", is_self, ovfl_val, mask2));
  5366. for (i = 0; mask2; i++, mask2>>=1) {
  5367. /* skip non used pmds */
  5368. if ((mask2 & 0x1) == 0) continue;
  5369. /*
  5370. * can access PMU always true in system wide mode
  5371. */
  5372. val = pmd_val = can_access_pmu ? ia64_get_pmd(i) : ctx->th_pmds[i];
  5373. if (PMD_IS_COUNTING(i)) {
  5374. DPRINT(("[%d] pmd[%d] ctx_pmd=0x%lx hw_pmd=0x%lx\n",
  5375. task->pid,
  5376. i,
  5377. ctx->ctx_pmds[i].val,
  5378. val & ovfl_val));
  5379. /*
  5380. * we rebuild the full 64 bit value of the counter
  5381. */
  5382. val = ctx->ctx_pmds[i].val + (val & ovfl_val);
  5383. /*
  5384. * now everything is in ctx_pmds[] and we need
  5385. * to clear the saved context from save_regs() such that
  5386. * pfm_read_pmds() gets the correct value
  5387. */
  5388. pmd_val = 0UL;
  5389. /*
  5390. * take care of overflow inline
  5391. */
  5392. if (pmc0 & (1UL << i)) {
  5393. val += 1 + ovfl_val;
  5394. DPRINT(("[%d] pmd[%d] overflowed\n", task->pid, i));
  5395. }
  5396. }
  5397. DPRINT(("[%d] ctx_pmd[%d]=0x%lx pmd_val=0x%lx\n", task->pid, i, val, pmd_val));
  5398. if (is_self) ctx->th_pmds[i] = pmd_val;
  5399. ctx->ctx_pmds[i].val = val;
  5400. }
  5401. }
  5402. static struct irqaction perfmon_irqaction = {
  5403. .handler = pfm_interrupt_handler,
  5404. .flags = IRQF_DISABLED,
  5405. .name = "perfmon"
  5406. };
  5407. static void
  5408. pfm_alt_save_pmu_state(void *data)
  5409. {
  5410. struct pt_regs *regs;
  5411. regs = task_pt_regs(current);
  5412. DPRINT(("called\n"));
  5413. /*
  5414. * should not be necessary but
  5415. * let's take not risk
  5416. */
  5417. pfm_clear_psr_up();
  5418. pfm_clear_psr_pp();
  5419. ia64_psr(regs)->pp = 0;
  5420. /*
  5421. * This call is required
  5422. * May cause a spurious interrupt on some processors
  5423. */
  5424. pfm_freeze_pmu();
  5425. ia64_srlz_d();
  5426. }
  5427. void
  5428. pfm_alt_restore_pmu_state(void *data)
  5429. {
  5430. struct pt_regs *regs;
  5431. regs = task_pt_regs(current);
  5432. DPRINT(("called\n"));
  5433. /*
  5434. * put PMU back in state expected
  5435. * by perfmon
  5436. */
  5437. pfm_clear_psr_up();
  5438. pfm_clear_psr_pp();
  5439. ia64_psr(regs)->pp = 0;
  5440. /*
  5441. * perfmon runs with PMU unfrozen at all times
  5442. */
  5443. pfm_unfreeze_pmu();
  5444. ia64_srlz_d();
  5445. }
  5446. int
  5447. pfm_install_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl)
  5448. {
  5449. int ret, i;
  5450. int reserve_cpu;
  5451. /* some sanity checks */
  5452. if (hdl == NULL || hdl->handler == NULL) return -EINVAL;
  5453. /* do the easy test first */
  5454. if (pfm_alt_intr_handler) return -EBUSY;
  5455. /* one at a time in the install or remove, just fail the others */
  5456. if (!spin_trylock(&pfm_alt_install_check)) {
  5457. return -EBUSY;
  5458. }
  5459. /* reserve our session */
  5460. for_each_online_cpu(reserve_cpu) {
  5461. ret = pfm_reserve_session(NULL, 1, reserve_cpu);
  5462. if (ret) goto cleanup_reserve;
  5463. }
  5464. /* save the current system wide pmu states */
  5465. ret = on_each_cpu(pfm_alt_save_pmu_state, NULL, 0, 1);
  5466. if (ret) {
  5467. DPRINT(("on_each_cpu() failed: %d\n", ret));
  5468. goto cleanup_reserve;
  5469. }
  5470. /* officially change to the alternate interrupt handler */
  5471. pfm_alt_intr_handler = hdl;
  5472. spin_unlock(&pfm_alt_install_check);
  5473. return 0;
  5474. cleanup_reserve:
  5475. for_each_online_cpu(i) {
  5476. /* don't unreserve more than we reserved */
  5477. if (i >= reserve_cpu) break;
  5478. pfm_unreserve_session(NULL, 1, i);
  5479. }
  5480. spin_unlock(&pfm_alt_install_check);
  5481. return ret;
  5482. }
  5483. EXPORT_SYMBOL_GPL(pfm_install_alt_pmu_interrupt);
  5484. int
  5485. pfm_remove_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl)
  5486. {
  5487. int i;
  5488. int ret;
  5489. if (hdl == NULL) return -EINVAL;
  5490. /* cannot remove someone else's handler! */
  5491. if (pfm_alt_intr_handler != hdl) return -EINVAL;
  5492. /* one at a time in the install or remove, just fail the others */
  5493. if (!spin_trylock(&pfm_alt_install_check)) {
  5494. return -EBUSY;
  5495. }
  5496. pfm_alt_intr_handler = NULL;
  5497. ret = on_each_cpu(pfm_alt_restore_pmu_state, NULL, 0, 1);
  5498. if (ret) {
  5499. DPRINT(("on_each_cpu() failed: %d\n", ret));
  5500. }
  5501. for_each_online_cpu(i) {
  5502. pfm_unreserve_session(NULL, 1, i);
  5503. }
  5504. spin_unlock(&pfm_alt_install_check);
  5505. return 0;
  5506. }
  5507. EXPORT_SYMBOL_GPL(pfm_remove_alt_pmu_interrupt);
  5508. /*
  5509. * perfmon initialization routine, called from the initcall() table
  5510. */
  5511. static int init_pfm_fs(void);
  5512. static int __init
  5513. pfm_probe_pmu(void)
  5514. {
  5515. pmu_config_t **p;
  5516. int family;
  5517. family = local_cpu_data->family;
  5518. p = pmu_confs;
  5519. while(*p) {
  5520. if ((*p)->probe) {
  5521. if ((*p)->probe() == 0) goto found;
  5522. } else if ((*p)->pmu_family == family || (*p)->pmu_family == 0xff) {
  5523. goto found;
  5524. }
  5525. p++;
  5526. }
  5527. return -1;
  5528. found:
  5529. pmu_conf = *p;
  5530. return 0;
  5531. }
  5532. static const struct file_operations pfm_proc_fops = {
  5533. .open = pfm_proc_open,
  5534. .read = seq_read,
  5535. .llseek = seq_lseek,
  5536. .release = seq_release,
  5537. };
  5538. int __init
  5539. pfm_init(void)
  5540. {
  5541. unsigned int n, n_counters, i;
  5542. printk("perfmon: version %u.%u IRQ %u\n",
  5543. PFM_VERSION_MAJ,
  5544. PFM_VERSION_MIN,
  5545. IA64_PERFMON_VECTOR);
  5546. if (pfm_probe_pmu()) {
  5547. printk(KERN_INFO "perfmon: disabled, there is no support for processor family %d\n",
  5548. local_cpu_data->family);
  5549. return -ENODEV;
  5550. }
  5551. /*
  5552. * compute the number of implemented PMD/PMC from the
  5553. * description tables
  5554. */
  5555. n = 0;
  5556. for (i=0; PMC_IS_LAST(i) == 0; i++) {
  5557. if (PMC_IS_IMPL(i) == 0) continue;
  5558. pmu_conf->impl_pmcs[i>>6] |= 1UL << (i&63);
  5559. n++;
  5560. }
  5561. pmu_conf->num_pmcs = n;
  5562. n = 0; n_counters = 0;
  5563. for (i=0; PMD_IS_LAST(i) == 0; i++) {
  5564. if (PMD_IS_IMPL(i) == 0) continue;
  5565. pmu_conf->impl_pmds[i>>6] |= 1UL << (i&63);
  5566. n++;
  5567. if (PMD_IS_COUNTING(i)) n_counters++;
  5568. }
  5569. pmu_conf->num_pmds = n;
  5570. pmu_conf->num_counters = n_counters;
  5571. /*
  5572. * sanity checks on the number of debug registers
  5573. */
  5574. if (pmu_conf->use_rr_dbregs) {
  5575. if (pmu_conf->num_ibrs > IA64_NUM_DBG_REGS) {
  5576. printk(KERN_INFO "perfmon: unsupported number of code debug registers (%u)\n", pmu_conf->num_ibrs);
  5577. pmu_conf = NULL;
  5578. return -1;
  5579. }
  5580. if (pmu_conf->num_dbrs > IA64_NUM_DBG_REGS) {
  5581. printk(KERN_INFO "perfmon: unsupported number of data debug registers (%u)\n", pmu_conf->num_ibrs);
  5582. pmu_conf = NULL;
  5583. return -1;
  5584. }
  5585. }
  5586. printk("perfmon: %s PMU detected, %u PMCs, %u PMDs, %u counters (%lu bits)\n",
  5587. pmu_conf->pmu_name,
  5588. pmu_conf->num_pmcs,
  5589. pmu_conf->num_pmds,
  5590. pmu_conf->num_counters,
  5591. ffz(pmu_conf->ovfl_val));
  5592. /* sanity check */
  5593. if (pmu_conf->num_pmds >= PFM_NUM_PMD_REGS || pmu_conf->num_pmcs >= PFM_NUM_PMC_REGS) {
  5594. printk(KERN_ERR "perfmon: not enough pmc/pmd, perfmon disabled\n");
  5595. pmu_conf = NULL;
  5596. return -1;
  5597. }
  5598. /*
  5599. * create /proc/perfmon (mostly for debugging purposes)
  5600. */
  5601. perfmon_dir = create_proc_entry("perfmon", S_IRUGO, NULL);
  5602. if (perfmon_dir == NULL) {
  5603. printk(KERN_ERR "perfmon: cannot create /proc entry, perfmon disabled\n");
  5604. pmu_conf = NULL;
  5605. return -1;
  5606. }
  5607. /*
  5608. * install customized file operations for /proc/perfmon entry
  5609. */
  5610. perfmon_dir->proc_fops = &pfm_proc_fops;
  5611. /*
  5612. * create /proc/sys/kernel/perfmon (for debugging purposes)
  5613. */
  5614. pfm_sysctl_header = register_sysctl_table(pfm_sysctl_root);
  5615. /*
  5616. * initialize all our spinlocks
  5617. */
  5618. spin_lock_init(&pfm_sessions.pfs_lock);
  5619. spin_lock_init(&pfm_buffer_fmt_lock);
  5620. init_pfm_fs();
  5621. for(i=0; i < NR_CPUS; i++) pfm_stats[i].pfm_ovfl_intr_cycles_min = ~0UL;
  5622. return 0;
  5623. }
  5624. __initcall(pfm_init);
  5625. /*
  5626. * this function is called before pfm_init()
  5627. */
  5628. void
  5629. pfm_init_percpu (void)
  5630. {
  5631. static int first_time=1;
  5632. /*
  5633. * make sure no measurement is active
  5634. * (may inherit programmed PMCs from EFI).
  5635. */
  5636. pfm_clear_psr_pp();
  5637. pfm_clear_psr_up();
  5638. /*
  5639. * we run with the PMU not frozen at all times
  5640. */
  5641. pfm_unfreeze_pmu();
  5642. if (first_time) {
  5643. register_percpu_irq(IA64_PERFMON_VECTOR, &perfmon_irqaction);
  5644. first_time=0;
  5645. }
  5646. ia64_setreg(_IA64_REG_CR_PMV, IA64_PERFMON_VECTOR);
  5647. ia64_srlz_d();
  5648. }
  5649. /*
  5650. * used for debug purposes only
  5651. */
  5652. void
  5653. dump_pmu_state(const char *from)
  5654. {
  5655. struct task_struct *task;
  5656. struct pt_regs *regs;
  5657. pfm_context_t *ctx;
  5658. unsigned long psr, dcr, info, flags;
  5659. int i, this_cpu;
  5660. local_irq_save(flags);
  5661. this_cpu = smp_processor_id();
  5662. regs = task_pt_regs(current);
  5663. info = PFM_CPUINFO_GET();
  5664. dcr = ia64_getreg(_IA64_REG_CR_DCR);
  5665. if (info == 0 && ia64_psr(regs)->pp == 0 && (dcr & IA64_DCR_PP) == 0) {
  5666. local_irq_restore(flags);
  5667. return;
  5668. }
  5669. printk("CPU%d from %s() current [%d] iip=0x%lx %s\n",
  5670. this_cpu,
  5671. from,
  5672. current->pid,
  5673. regs->cr_iip,
  5674. current->comm);
  5675. task = GET_PMU_OWNER();
  5676. ctx = GET_PMU_CTX();
  5677. printk("->CPU%d owner [%d] ctx=%p\n", this_cpu, task ? task->pid : -1, ctx);
  5678. psr = pfm_get_psr();
  5679. printk("->CPU%d pmc0=0x%lx psr.pp=%d psr.up=%d dcr.pp=%d syst_info=0x%lx user_psr.up=%d user_psr.pp=%d\n",
  5680. this_cpu,
  5681. ia64_get_pmc(0),
  5682. psr & IA64_PSR_PP ? 1 : 0,
  5683. psr & IA64_PSR_UP ? 1 : 0,
  5684. dcr & IA64_DCR_PP ? 1 : 0,
  5685. info,
  5686. ia64_psr(regs)->up,
  5687. ia64_psr(regs)->pp);
  5688. ia64_psr(regs)->up = 0;
  5689. ia64_psr(regs)->pp = 0;
  5690. for (i=1; PMC_IS_LAST(i) == 0; i++) {
  5691. if (PMC_IS_IMPL(i) == 0) continue;
  5692. printk("->CPU%d pmc[%d]=0x%lx thread_pmc[%d]=0x%lx\n", this_cpu, i, ia64_get_pmc(i), i, ctx->th_pmcs[i]);
  5693. }
  5694. for (i=1; PMD_IS_LAST(i) == 0; i++) {
  5695. if (PMD_IS_IMPL(i) == 0) continue;
  5696. printk("->CPU%d pmd[%d]=0x%lx thread_pmd[%d]=0x%lx\n", this_cpu, i, ia64_get_pmd(i), i, ctx->th_pmds[i]);
  5697. }
  5698. if (ctx) {
  5699. printk("->CPU%d ctx_state=%d vaddr=%p addr=%p fd=%d ctx_task=[%d] saved_psr_up=0x%lx\n",
  5700. this_cpu,
  5701. ctx->ctx_state,
  5702. ctx->ctx_smpl_vaddr,
  5703. ctx->ctx_smpl_hdr,
  5704. ctx->ctx_msgq_head,
  5705. ctx->ctx_msgq_tail,
  5706. ctx->ctx_saved_psr_up);
  5707. }
  5708. local_irq_restore(flags);
  5709. }
  5710. /*
  5711. * called from process.c:copy_thread(). task is new child.
  5712. */
  5713. void
  5714. pfm_inherit(struct task_struct *task, struct pt_regs *regs)
  5715. {
  5716. struct thread_struct *thread;
  5717. DPRINT(("perfmon: pfm_inherit clearing state for [%d]\n", task->pid));
  5718. thread = &task->thread;
  5719. /*
  5720. * cut links inherited from parent (current)
  5721. */
  5722. thread->pfm_context = NULL;
  5723. PFM_SET_WORK_PENDING(task, 0);
  5724. /*
  5725. * the psr bits are already set properly in copy_threads()
  5726. */
  5727. }
  5728. #else /* !CONFIG_PERFMON */
  5729. asmlinkage long
  5730. sys_perfmonctl (int fd, int cmd, void *arg, int count)
  5731. {
  5732. return -ENOSYS;
  5733. }
  5734. #endif /* CONFIG_PERFMON */