perfmon.c 169 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841
  1. /*
  2. * This file implements the perfmon-2 subsystem which is used
  3. * to program the IA-64 Performance Monitoring Unit (PMU).
  4. *
  5. * The initial version of perfmon.c was written by
  6. * Ganesh Venkitachalam, IBM Corp.
  7. *
  8. * Then it was modified for perfmon-1.x by Stephane Eranian and
  9. * David Mosberger, Hewlett Packard Co.
  10. *
  11. * Version Perfmon-2.x is a rewrite of perfmon-1.x
  12. * by Stephane Eranian, Hewlett Packard Co.
  13. *
  14. * Copyright (C) 1999-2005 Hewlett Packard Co
  15. * Stephane Eranian <eranian@hpl.hp.com>
  16. * David Mosberger-Tang <davidm@hpl.hp.com>
  17. *
  18. * More information about perfmon available at:
  19. * http://www.hpl.hp.com/research/linux/perfmon
  20. */
  21. #include <linux/config.h>
  22. #include <linux/module.h>
  23. #include <linux/kernel.h>
  24. #include <linux/sched.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/smp_lock.h>
  27. #include <linux/proc_fs.h>
  28. #include <linux/seq_file.h>
  29. #include <linux/init.h>
  30. #include <linux/vmalloc.h>
  31. #include <linux/mm.h>
  32. #include <linux/sysctl.h>
  33. #include <linux/list.h>
  34. #include <linux/file.h>
  35. #include <linux/poll.h>
  36. #include <linux/vfs.h>
  37. #include <linux/pagemap.h>
  38. #include <linux/mount.h>
  39. #include <linux/bitops.h>
  40. #include <asm/errno.h>
  41. #include <asm/intrinsics.h>
  42. #include <asm/page.h>
  43. #include <asm/perfmon.h>
  44. #include <asm/processor.h>
  45. #include <asm/signal.h>
  46. #include <asm/system.h>
  47. #include <asm/uaccess.h>
  48. #include <asm/delay.h>
  49. #ifdef CONFIG_PERFMON
  50. /*
  51. * perfmon context state
  52. */
  53. #define PFM_CTX_UNLOADED 1 /* context is not loaded onto any task */
  54. #define PFM_CTX_LOADED 2 /* context is loaded onto a task */
  55. #define PFM_CTX_MASKED 3 /* context is loaded but monitoring is masked due to overflow */
  56. #define PFM_CTX_ZOMBIE 4 /* owner of the context is closing it */
  57. #define PFM_INVALID_ACTIVATION (~0UL)
  58. /*
  59. * depth of message queue
  60. */
  61. #define PFM_MAX_MSGS 32
  62. #define PFM_CTXQ_EMPTY(g) ((g)->ctx_msgq_head == (g)->ctx_msgq_tail)
  63. /*
  64. * type of a PMU register (bitmask).
  65. * bitmask structure:
  66. * bit0 : register implemented
  67. * bit1 : end marker
  68. * bit2-3 : reserved
  69. * bit4 : pmc has pmc.pm
  70. * bit5 : pmc controls a counter (has pmc.oi), pmd is used as counter
  71. * bit6-7 : register type
  72. * bit8-31: reserved
  73. */
  74. #define PFM_REG_NOTIMPL 0x0 /* not implemented at all */
  75. #define PFM_REG_IMPL 0x1 /* register implemented */
  76. #define PFM_REG_END 0x2 /* end marker */
  77. #define PFM_REG_MONITOR (0x1<<4|PFM_REG_IMPL) /* a PMC with a pmc.pm field only */
  78. #define PFM_REG_COUNTING (0x2<<4|PFM_REG_MONITOR) /* a monitor + pmc.oi+ PMD used as a counter */
  79. #define PFM_REG_CONTROL (0x4<<4|PFM_REG_IMPL) /* PMU control register */
  80. #define PFM_REG_CONFIG (0x8<<4|PFM_REG_IMPL) /* configuration register */
  81. #define PFM_REG_BUFFER (0xc<<4|PFM_REG_IMPL) /* PMD used as buffer */
  82. #define PMC_IS_LAST(i) (pmu_conf->pmc_desc[i].type & PFM_REG_END)
  83. #define PMD_IS_LAST(i) (pmu_conf->pmd_desc[i].type & PFM_REG_END)
  84. #define PMC_OVFL_NOTIFY(ctx, i) ((ctx)->ctx_pmds[i].flags & PFM_REGFL_OVFL_NOTIFY)
  85. /* i assumed unsigned */
  86. #define PMC_IS_IMPL(i) (i< PMU_MAX_PMCS && (pmu_conf->pmc_desc[i].type & PFM_REG_IMPL))
  87. #define PMD_IS_IMPL(i) (i< PMU_MAX_PMDS && (pmu_conf->pmd_desc[i].type & PFM_REG_IMPL))
  88. /* XXX: these assume that register i is implemented */
  89. #define PMD_IS_COUNTING(i) ((pmu_conf->pmd_desc[i].type & PFM_REG_COUNTING) == PFM_REG_COUNTING)
  90. #define PMC_IS_COUNTING(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_COUNTING) == PFM_REG_COUNTING)
  91. #define PMC_IS_MONITOR(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_MONITOR) == PFM_REG_MONITOR)
  92. #define PMC_IS_CONTROL(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_CONTROL) == PFM_REG_CONTROL)
  93. #define PMC_DFL_VAL(i) pmu_conf->pmc_desc[i].default_value
  94. #define PMC_RSVD_MASK(i) pmu_conf->pmc_desc[i].reserved_mask
  95. #define PMD_PMD_DEP(i) pmu_conf->pmd_desc[i].dep_pmd[0]
  96. #define PMC_PMD_DEP(i) pmu_conf->pmc_desc[i].dep_pmd[0]
  97. #define PFM_NUM_IBRS IA64_NUM_DBG_REGS
  98. #define PFM_NUM_DBRS IA64_NUM_DBG_REGS
  99. #define CTX_OVFL_NOBLOCK(c) ((c)->ctx_fl_block == 0)
  100. #define CTX_HAS_SMPL(c) ((c)->ctx_fl_is_sampling)
  101. #define PFM_CTX_TASK(h) (h)->ctx_task
  102. #define PMU_PMC_OI 5 /* position of pmc.oi bit */
  103. /* XXX: does not support more than 64 PMDs */
  104. #define CTX_USED_PMD(ctx, mask) (ctx)->ctx_used_pmds[0] |= (mask)
  105. #define CTX_IS_USED_PMD(ctx, c) (((ctx)->ctx_used_pmds[0] & (1UL << (c))) != 0UL)
  106. #define CTX_USED_MONITOR(ctx, mask) (ctx)->ctx_used_monitors[0] |= (mask)
  107. #define CTX_USED_IBR(ctx,n) (ctx)->ctx_used_ibrs[(n)>>6] |= 1UL<< ((n) % 64)
  108. #define CTX_USED_DBR(ctx,n) (ctx)->ctx_used_dbrs[(n)>>6] |= 1UL<< ((n) % 64)
  109. #define CTX_USES_DBREGS(ctx) (((pfm_context_t *)(ctx))->ctx_fl_using_dbreg==1)
  110. #define PFM_CODE_RR 0 /* requesting code range restriction */
  111. #define PFM_DATA_RR 1 /* requestion data range restriction */
  112. #define PFM_CPUINFO_CLEAR(v) pfm_get_cpu_var(pfm_syst_info) &= ~(v)
  113. #define PFM_CPUINFO_SET(v) pfm_get_cpu_var(pfm_syst_info) |= (v)
  114. #define PFM_CPUINFO_GET() pfm_get_cpu_var(pfm_syst_info)
  115. #define RDEP(x) (1UL<<(x))
  116. /*
  117. * context protection macros
  118. * in SMP:
  119. * - we need to protect against CPU concurrency (spin_lock)
  120. * - we need to protect against PMU overflow interrupts (local_irq_disable)
  121. * in UP:
  122. * - we need to protect against PMU overflow interrupts (local_irq_disable)
  123. *
  124. * spin_lock_irqsave()/spin_lock_irqrestore():
  125. * in SMP: local_irq_disable + spin_lock
  126. * in UP : local_irq_disable
  127. *
  128. * spin_lock()/spin_lock():
  129. * in UP : removed automatically
  130. * in SMP: protect against context accesses from other CPU. interrupts
  131. * are not masked. This is useful for the PMU interrupt handler
  132. * because we know we will not get PMU concurrency in that code.
  133. */
  134. #define PROTECT_CTX(c, f) \
  135. do { \
  136. DPRINT(("spinlock_irq_save ctx %p by [%d]\n", c, current->pid)); \
  137. spin_lock_irqsave(&(c)->ctx_lock, f); \
  138. DPRINT(("spinlocked ctx %p by [%d]\n", c, current->pid)); \
  139. } while(0)
  140. #define UNPROTECT_CTX(c, f) \
  141. do { \
  142. DPRINT(("spinlock_irq_restore ctx %p by [%d]\n", c, current->pid)); \
  143. spin_unlock_irqrestore(&(c)->ctx_lock, f); \
  144. } while(0)
  145. #define PROTECT_CTX_NOPRINT(c, f) \
  146. do { \
  147. spin_lock_irqsave(&(c)->ctx_lock, f); \
  148. } while(0)
  149. #define UNPROTECT_CTX_NOPRINT(c, f) \
  150. do { \
  151. spin_unlock_irqrestore(&(c)->ctx_lock, f); \
  152. } while(0)
  153. #define PROTECT_CTX_NOIRQ(c) \
  154. do { \
  155. spin_lock(&(c)->ctx_lock); \
  156. } while(0)
  157. #define UNPROTECT_CTX_NOIRQ(c) \
  158. do { \
  159. spin_unlock(&(c)->ctx_lock); \
  160. } while(0)
  161. #ifdef CONFIG_SMP
  162. #define GET_ACTIVATION() pfm_get_cpu_var(pmu_activation_number)
  163. #define INC_ACTIVATION() pfm_get_cpu_var(pmu_activation_number)++
  164. #define SET_ACTIVATION(c) (c)->ctx_last_activation = GET_ACTIVATION()
  165. #else /* !CONFIG_SMP */
  166. #define SET_ACTIVATION(t) do {} while(0)
  167. #define GET_ACTIVATION(t) do {} while(0)
  168. #define INC_ACTIVATION(t) do {} while(0)
  169. #endif /* CONFIG_SMP */
  170. #define SET_PMU_OWNER(t, c) do { pfm_get_cpu_var(pmu_owner) = (t); pfm_get_cpu_var(pmu_ctx) = (c); } while(0)
  171. #define GET_PMU_OWNER() pfm_get_cpu_var(pmu_owner)
  172. #define GET_PMU_CTX() pfm_get_cpu_var(pmu_ctx)
  173. #define LOCK_PFS(g) spin_lock_irqsave(&pfm_sessions.pfs_lock, g)
  174. #define UNLOCK_PFS(g) spin_unlock_irqrestore(&pfm_sessions.pfs_lock, g)
  175. #define PFM_REG_RETFLAG_SET(flags, val) do { flags &= ~PFM_REG_RETFL_MASK; flags |= (val); } while(0)
  176. /*
  177. * cmp0 must be the value of pmc0
  178. */
  179. #define PMC0_HAS_OVFL(cmp0) (cmp0 & ~0x1UL)
  180. #define PFMFS_MAGIC 0xa0b4d889
  181. /*
  182. * debugging
  183. */
  184. #define PFM_DEBUGGING 1
  185. #ifdef PFM_DEBUGGING
  186. #define DPRINT(a) \
  187. do { \
  188. if (unlikely(pfm_sysctl.debug >0)) { printk("%s.%d: CPU%d [%d] ", __FUNCTION__, __LINE__, smp_processor_id(), current->pid); printk a; } \
  189. } while (0)
  190. #define DPRINT_ovfl(a) \
  191. do { \
  192. if (unlikely(pfm_sysctl.debug > 0 && pfm_sysctl.debug_ovfl >0)) { printk("%s.%d: CPU%d [%d] ", __FUNCTION__, __LINE__, smp_processor_id(), current->pid); printk a; } \
  193. } while (0)
  194. #endif
  195. /*
  196. * 64-bit software counter structure
  197. *
  198. * the next_reset_type is applied to the next call to pfm_reset_regs()
  199. */
  200. typedef struct {
  201. unsigned long val; /* virtual 64bit counter value */
  202. unsigned long lval; /* last reset value */
  203. unsigned long long_reset; /* reset value on sampling overflow */
  204. unsigned long short_reset; /* reset value on overflow */
  205. unsigned long reset_pmds[4]; /* which other pmds to reset when this counter overflows */
  206. unsigned long smpl_pmds[4]; /* which pmds are accessed when counter overflow */
  207. unsigned long seed; /* seed for random-number generator */
  208. unsigned long mask; /* mask for random-number generator */
  209. unsigned int flags; /* notify/do not notify */
  210. unsigned long eventid; /* overflow event identifier */
  211. } pfm_counter_t;
  212. /*
  213. * context flags
  214. */
  215. typedef struct {
  216. unsigned int block:1; /* when 1, task will blocked on user notifications */
  217. unsigned int system:1; /* do system wide monitoring */
  218. unsigned int using_dbreg:1; /* using range restrictions (debug registers) */
  219. unsigned int is_sampling:1; /* true if using a custom format */
  220. unsigned int excl_idle:1; /* exclude idle task in system wide session */
  221. unsigned int going_zombie:1; /* context is zombie (MASKED+blocking) */
  222. unsigned int trap_reason:2; /* reason for going into pfm_handle_work() */
  223. unsigned int no_msg:1; /* no message sent on overflow */
  224. unsigned int can_restart:1; /* allowed to issue a PFM_RESTART */
  225. unsigned int reserved:22;
  226. } pfm_context_flags_t;
  227. #define PFM_TRAP_REASON_NONE 0x0 /* default value */
  228. #define PFM_TRAP_REASON_BLOCK 0x1 /* we need to block on overflow */
  229. #define PFM_TRAP_REASON_RESET 0x2 /* we need to reset PMDs */
  230. /*
  231. * perfmon context: encapsulates all the state of a monitoring session
  232. */
  233. typedef struct pfm_context {
  234. spinlock_t ctx_lock; /* context protection */
  235. pfm_context_flags_t ctx_flags; /* bitmask of flags (block reason incl.) */
  236. unsigned int ctx_state; /* state: active/inactive (no bitfield) */
  237. struct task_struct *ctx_task; /* task to which context is attached */
  238. unsigned long ctx_ovfl_regs[4]; /* which registers overflowed (notification) */
  239. struct semaphore ctx_restart_sem; /* use for blocking notification mode */
  240. unsigned long ctx_used_pmds[4]; /* bitmask of PMD used */
  241. unsigned long ctx_all_pmds[4]; /* bitmask of all accessible PMDs */
  242. unsigned long ctx_reload_pmds[4]; /* bitmask of force reload PMD on ctxsw in */
  243. unsigned long ctx_all_pmcs[4]; /* bitmask of all accessible PMCs */
  244. unsigned long ctx_reload_pmcs[4]; /* bitmask of force reload PMC on ctxsw in */
  245. unsigned long ctx_used_monitors[4]; /* bitmask of monitor PMC being used */
  246. unsigned long ctx_pmcs[IA64_NUM_PMC_REGS]; /* saved copies of PMC values */
  247. unsigned int ctx_used_ibrs[1]; /* bitmask of used IBR (speedup ctxsw in) */
  248. unsigned int ctx_used_dbrs[1]; /* bitmask of used DBR (speedup ctxsw in) */
  249. unsigned long ctx_dbrs[IA64_NUM_DBG_REGS]; /* DBR values (cache) when not loaded */
  250. unsigned long ctx_ibrs[IA64_NUM_DBG_REGS]; /* IBR values (cache) when not loaded */
  251. pfm_counter_t ctx_pmds[IA64_NUM_PMD_REGS]; /* software state for PMDS */
  252. u64 ctx_saved_psr_up; /* only contains psr.up value */
  253. unsigned long ctx_last_activation; /* context last activation number for last_cpu */
  254. unsigned int ctx_last_cpu; /* CPU id of current or last CPU used (SMP only) */
  255. unsigned int ctx_cpu; /* cpu to which perfmon is applied (system wide) */
  256. int ctx_fd; /* file descriptor used my this context */
  257. pfm_ovfl_arg_t ctx_ovfl_arg; /* argument to custom buffer format handler */
  258. pfm_buffer_fmt_t *ctx_buf_fmt; /* buffer format callbacks */
  259. void *ctx_smpl_hdr; /* points to sampling buffer header kernel vaddr */
  260. unsigned long ctx_smpl_size; /* size of sampling buffer */
  261. void *ctx_smpl_vaddr; /* user level virtual address of smpl buffer */
  262. wait_queue_head_t ctx_msgq_wait;
  263. pfm_msg_t ctx_msgq[PFM_MAX_MSGS];
  264. int ctx_msgq_head;
  265. int ctx_msgq_tail;
  266. struct fasync_struct *ctx_async_queue;
  267. wait_queue_head_t ctx_zombieq; /* termination cleanup wait queue */
  268. } pfm_context_t;
  269. /*
  270. * magic number used to verify that structure is really
  271. * a perfmon context
  272. */
  273. #define PFM_IS_FILE(f) ((f)->f_op == &pfm_file_ops)
  274. #define PFM_GET_CTX(t) ((pfm_context_t *)(t)->thread.pfm_context)
  275. #ifdef CONFIG_SMP
  276. #define SET_LAST_CPU(ctx, v) (ctx)->ctx_last_cpu = (v)
  277. #define GET_LAST_CPU(ctx) (ctx)->ctx_last_cpu
  278. #else
  279. #define SET_LAST_CPU(ctx, v) do {} while(0)
  280. #define GET_LAST_CPU(ctx) do {} while(0)
  281. #endif
  282. #define ctx_fl_block ctx_flags.block
  283. #define ctx_fl_system ctx_flags.system
  284. #define ctx_fl_using_dbreg ctx_flags.using_dbreg
  285. #define ctx_fl_is_sampling ctx_flags.is_sampling
  286. #define ctx_fl_excl_idle ctx_flags.excl_idle
  287. #define ctx_fl_going_zombie ctx_flags.going_zombie
  288. #define ctx_fl_trap_reason ctx_flags.trap_reason
  289. #define ctx_fl_no_msg ctx_flags.no_msg
  290. #define ctx_fl_can_restart ctx_flags.can_restart
  291. #define PFM_SET_WORK_PENDING(t, v) do { (t)->thread.pfm_needs_checking = v; } while(0);
  292. #define PFM_GET_WORK_PENDING(t) (t)->thread.pfm_needs_checking
  293. /*
  294. * global information about all sessions
  295. * mostly used to synchronize between system wide and per-process
  296. */
  297. typedef struct {
  298. spinlock_t pfs_lock; /* lock the structure */
  299. unsigned int pfs_task_sessions; /* number of per task sessions */
  300. unsigned int pfs_sys_sessions; /* number of per system wide sessions */
  301. unsigned int pfs_sys_use_dbregs; /* incremented when a system wide session uses debug regs */
  302. unsigned int pfs_ptrace_use_dbregs; /* incremented when a process uses debug regs */
  303. struct task_struct *pfs_sys_session[NR_CPUS]; /* point to task owning a system-wide session */
  304. } pfm_session_t;
  305. /*
  306. * information about a PMC or PMD.
  307. * dep_pmd[]: a bitmask of dependent PMD registers
  308. * dep_pmc[]: a bitmask of dependent PMC registers
  309. */
  310. typedef int (*pfm_reg_check_t)(struct task_struct *task, pfm_context_t *ctx, unsigned int cnum, unsigned long *val, struct pt_regs *regs);
  311. typedef struct {
  312. unsigned int type;
  313. int pm_pos;
  314. unsigned long default_value; /* power-on default value */
  315. unsigned long reserved_mask; /* bitmask of reserved bits */
  316. pfm_reg_check_t read_check;
  317. pfm_reg_check_t write_check;
  318. unsigned long dep_pmd[4];
  319. unsigned long dep_pmc[4];
  320. } pfm_reg_desc_t;
  321. /* assume cnum is a valid monitor */
  322. #define PMC_PM(cnum, val) (((val) >> (pmu_conf->pmc_desc[cnum].pm_pos)) & 0x1)
  323. /*
  324. * This structure is initialized at boot time and contains
  325. * a description of the PMU main characteristics.
  326. *
  327. * If the probe function is defined, detection is based
  328. * on its return value:
  329. * - 0 means recognized PMU
  330. * - anything else means not supported
  331. * When the probe function is not defined, then the pmu_family field
  332. * is used and it must match the host CPU family such that:
  333. * - cpu->family & config->pmu_family != 0
  334. */
  335. typedef struct {
  336. unsigned long ovfl_val; /* overflow value for counters */
  337. pfm_reg_desc_t *pmc_desc; /* detailed PMC register dependencies descriptions */
  338. pfm_reg_desc_t *pmd_desc; /* detailed PMD register dependencies descriptions */
  339. unsigned int num_pmcs; /* number of PMCS: computed at init time */
  340. unsigned int num_pmds; /* number of PMDS: computed at init time */
  341. unsigned long impl_pmcs[4]; /* bitmask of implemented PMCS */
  342. unsigned long impl_pmds[4]; /* bitmask of implemented PMDS */
  343. char *pmu_name; /* PMU family name */
  344. unsigned int pmu_family; /* cpuid family pattern used to identify pmu */
  345. unsigned int flags; /* pmu specific flags */
  346. unsigned int num_ibrs; /* number of IBRS: computed at init time */
  347. unsigned int num_dbrs; /* number of DBRS: computed at init time */
  348. unsigned int num_counters; /* PMC/PMD counting pairs : computed at init time */
  349. int (*probe)(void); /* customized probe routine */
  350. unsigned int use_rr_dbregs:1; /* set if debug registers used for range restriction */
  351. } pmu_config_t;
  352. /*
  353. * PMU specific flags
  354. */
  355. #define PFM_PMU_IRQ_RESEND 1 /* PMU needs explicit IRQ resend */
  356. /*
  357. * debug register related type definitions
  358. */
  359. typedef struct {
  360. unsigned long ibr_mask:56;
  361. unsigned long ibr_plm:4;
  362. unsigned long ibr_ig:3;
  363. unsigned long ibr_x:1;
  364. } ibr_mask_reg_t;
  365. typedef struct {
  366. unsigned long dbr_mask:56;
  367. unsigned long dbr_plm:4;
  368. unsigned long dbr_ig:2;
  369. unsigned long dbr_w:1;
  370. unsigned long dbr_r:1;
  371. } dbr_mask_reg_t;
  372. typedef union {
  373. unsigned long val;
  374. ibr_mask_reg_t ibr;
  375. dbr_mask_reg_t dbr;
  376. } dbreg_t;
  377. /*
  378. * perfmon command descriptions
  379. */
  380. typedef struct {
  381. int (*cmd_func)(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
  382. char *cmd_name;
  383. int cmd_flags;
  384. unsigned int cmd_narg;
  385. size_t cmd_argsize;
  386. int (*cmd_getsize)(void *arg, size_t *sz);
  387. } pfm_cmd_desc_t;
  388. #define PFM_CMD_FD 0x01 /* command requires a file descriptor */
  389. #define PFM_CMD_ARG_READ 0x02 /* command must read argument(s) */
  390. #define PFM_CMD_ARG_RW 0x04 /* command must read/write argument(s) */
  391. #define PFM_CMD_STOP 0x08 /* command does not work on zombie context */
  392. #define PFM_CMD_NAME(cmd) pfm_cmd_tab[(cmd)].cmd_name
  393. #define PFM_CMD_READ_ARG(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_ARG_READ)
  394. #define PFM_CMD_RW_ARG(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_ARG_RW)
  395. #define PFM_CMD_USE_FD(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_FD)
  396. #define PFM_CMD_STOPPED(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_STOP)
  397. #define PFM_CMD_ARG_MANY -1 /* cannot be zero */
  398. typedef struct {
  399. unsigned long pfm_spurious_ovfl_intr_count; /* keep track of spurious ovfl interrupts */
  400. unsigned long pfm_replay_ovfl_intr_count; /* keep track of replayed ovfl interrupts */
  401. unsigned long pfm_ovfl_intr_count; /* keep track of ovfl interrupts */
  402. unsigned long pfm_ovfl_intr_cycles; /* cycles spent processing ovfl interrupts */
  403. unsigned long pfm_ovfl_intr_cycles_min; /* min cycles spent processing ovfl interrupts */
  404. unsigned long pfm_ovfl_intr_cycles_max; /* max cycles spent processing ovfl interrupts */
  405. unsigned long pfm_smpl_handler_calls;
  406. unsigned long pfm_smpl_handler_cycles;
  407. char pad[SMP_CACHE_BYTES] ____cacheline_aligned;
  408. } pfm_stats_t;
  409. /*
  410. * perfmon internal variables
  411. */
  412. static pfm_stats_t pfm_stats[NR_CPUS];
  413. static pfm_session_t pfm_sessions; /* global sessions information */
  414. static spinlock_t pfm_alt_install_check = SPIN_LOCK_UNLOCKED;
  415. static pfm_intr_handler_desc_t *pfm_alt_intr_handler;
  416. static struct proc_dir_entry *perfmon_dir;
  417. static pfm_uuid_t pfm_null_uuid = {0,};
  418. static spinlock_t pfm_buffer_fmt_lock;
  419. static LIST_HEAD(pfm_buffer_fmt_list);
  420. static pmu_config_t *pmu_conf;
  421. /* sysctl() controls */
  422. pfm_sysctl_t pfm_sysctl;
  423. EXPORT_SYMBOL(pfm_sysctl);
  424. static ctl_table pfm_ctl_table[]={
  425. {1, "debug", &pfm_sysctl.debug, sizeof(int), 0666, NULL, &proc_dointvec, NULL,},
  426. {2, "debug_ovfl", &pfm_sysctl.debug_ovfl, sizeof(int), 0666, NULL, &proc_dointvec, NULL,},
  427. {3, "fastctxsw", &pfm_sysctl.fastctxsw, sizeof(int), 0600, NULL, &proc_dointvec, NULL,},
  428. {4, "expert_mode", &pfm_sysctl.expert_mode, sizeof(int), 0600, NULL, &proc_dointvec, NULL,},
  429. { 0, },
  430. };
  431. static ctl_table pfm_sysctl_dir[] = {
  432. {1, "perfmon", NULL, 0, 0755, pfm_ctl_table, },
  433. {0,},
  434. };
  435. static ctl_table pfm_sysctl_root[] = {
  436. {1, "kernel", NULL, 0, 0755, pfm_sysctl_dir, },
  437. {0,},
  438. };
  439. static struct ctl_table_header *pfm_sysctl_header;
  440. static int pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
  441. static int pfm_flush(struct file *filp);
  442. #define pfm_get_cpu_var(v) __ia64_per_cpu_var(v)
  443. #define pfm_get_cpu_data(a,b) per_cpu(a, b)
  444. static inline void
  445. pfm_put_task(struct task_struct *task)
  446. {
  447. if (task != current) put_task_struct(task);
  448. }
  449. static inline void
  450. pfm_set_task_notify(struct task_struct *task)
  451. {
  452. struct thread_info *info;
  453. info = (struct thread_info *) ((char *) task + IA64_TASK_SIZE);
  454. set_bit(TIF_NOTIFY_RESUME, &info->flags);
  455. }
  456. static inline void
  457. pfm_clear_task_notify(void)
  458. {
  459. clear_thread_flag(TIF_NOTIFY_RESUME);
  460. }
  461. static inline void
  462. pfm_reserve_page(unsigned long a)
  463. {
  464. SetPageReserved(vmalloc_to_page((void *)a));
  465. }
  466. static inline void
  467. pfm_unreserve_page(unsigned long a)
  468. {
  469. ClearPageReserved(vmalloc_to_page((void*)a));
  470. }
  471. static inline unsigned long
  472. pfm_protect_ctx_ctxsw(pfm_context_t *x)
  473. {
  474. spin_lock(&(x)->ctx_lock);
  475. return 0UL;
  476. }
  477. static inline unsigned long
  478. pfm_unprotect_ctx_ctxsw(pfm_context_t *x, unsigned long f)
  479. {
  480. spin_unlock(&(x)->ctx_lock);
  481. }
  482. static inline unsigned int
  483. pfm_do_munmap(struct mm_struct *mm, unsigned long addr, size_t len, int acct)
  484. {
  485. return do_munmap(mm, addr, len);
  486. }
  487. static inline unsigned long
  488. pfm_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags, unsigned long exec)
  489. {
  490. return get_unmapped_area(file, addr, len, pgoff, flags);
  491. }
  492. static struct super_block *
  493. pfmfs_get_sb(struct file_system_type *fs_type, int flags, const char *dev_name, void *data)
  494. {
  495. return get_sb_pseudo(fs_type, "pfm:", NULL, PFMFS_MAGIC);
  496. }
  497. static struct file_system_type pfm_fs_type = {
  498. .name = "pfmfs",
  499. .get_sb = pfmfs_get_sb,
  500. .kill_sb = kill_anon_super,
  501. };
  502. DEFINE_PER_CPU(unsigned long, pfm_syst_info);
  503. DEFINE_PER_CPU(struct task_struct *, pmu_owner);
  504. DEFINE_PER_CPU(pfm_context_t *, pmu_ctx);
  505. DEFINE_PER_CPU(unsigned long, pmu_activation_number);
  506. EXPORT_PER_CPU_SYMBOL_GPL(pfm_syst_info);
  507. /* forward declaration */
  508. static struct file_operations pfm_file_ops;
  509. /*
  510. * forward declarations
  511. */
  512. #ifndef CONFIG_SMP
  513. static void pfm_lazy_save_regs (struct task_struct *ta);
  514. #endif
  515. void dump_pmu_state(const char *);
  516. static int pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
  517. #include "perfmon_itanium.h"
  518. #include "perfmon_mckinley.h"
  519. #include "perfmon_generic.h"
  520. static pmu_config_t *pmu_confs[]={
  521. &pmu_conf_mck,
  522. &pmu_conf_ita,
  523. &pmu_conf_gen, /* must be last */
  524. NULL
  525. };
  526. static int pfm_end_notify_user(pfm_context_t *ctx);
  527. static inline void
  528. pfm_clear_psr_pp(void)
  529. {
  530. ia64_rsm(IA64_PSR_PP);
  531. ia64_srlz_i();
  532. }
  533. static inline void
  534. pfm_set_psr_pp(void)
  535. {
  536. ia64_ssm(IA64_PSR_PP);
  537. ia64_srlz_i();
  538. }
  539. static inline void
  540. pfm_clear_psr_up(void)
  541. {
  542. ia64_rsm(IA64_PSR_UP);
  543. ia64_srlz_i();
  544. }
  545. static inline void
  546. pfm_set_psr_up(void)
  547. {
  548. ia64_ssm(IA64_PSR_UP);
  549. ia64_srlz_i();
  550. }
  551. static inline unsigned long
  552. pfm_get_psr(void)
  553. {
  554. unsigned long tmp;
  555. tmp = ia64_getreg(_IA64_REG_PSR);
  556. ia64_srlz_i();
  557. return tmp;
  558. }
  559. static inline void
  560. pfm_set_psr_l(unsigned long val)
  561. {
  562. ia64_setreg(_IA64_REG_PSR_L, val);
  563. ia64_srlz_i();
  564. }
  565. static inline void
  566. pfm_freeze_pmu(void)
  567. {
  568. ia64_set_pmc(0,1UL);
  569. ia64_srlz_d();
  570. }
  571. static inline void
  572. pfm_unfreeze_pmu(void)
  573. {
  574. ia64_set_pmc(0,0UL);
  575. ia64_srlz_d();
  576. }
  577. static inline void
  578. pfm_restore_ibrs(unsigned long *ibrs, unsigned int nibrs)
  579. {
  580. int i;
  581. for (i=0; i < nibrs; i++) {
  582. ia64_set_ibr(i, ibrs[i]);
  583. ia64_dv_serialize_instruction();
  584. }
  585. ia64_srlz_i();
  586. }
  587. static inline void
  588. pfm_restore_dbrs(unsigned long *dbrs, unsigned int ndbrs)
  589. {
  590. int i;
  591. for (i=0; i < ndbrs; i++) {
  592. ia64_set_dbr(i, dbrs[i]);
  593. ia64_dv_serialize_data();
  594. }
  595. ia64_srlz_d();
  596. }
  597. /*
  598. * PMD[i] must be a counter. no check is made
  599. */
  600. static inline unsigned long
  601. pfm_read_soft_counter(pfm_context_t *ctx, int i)
  602. {
  603. return ctx->ctx_pmds[i].val + (ia64_get_pmd(i) & pmu_conf->ovfl_val);
  604. }
  605. /*
  606. * PMD[i] must be a counter. no check is made
  607. */
  608. static inline void
  609. pfm_write_soft_counter(pfm_context_t *ctx, int i, unsigned long val)
  610. {
  611. unsigned long ovfl_val = pmu_conf->ovfl_val;
  612. ctx->ctx_pmds[i].val = val & ~ovfl_val;
  613. /*
  614. * writing to unimplemented part is ignore, so we do not need to
  615. * mask off top part
  616. */
  617. ia64_set_pmd(i, val & ovfl_val);
  618. }
  619. static pfm_msg_t *
  620. pfm_get_new_msg(pfm_context_t *ctx)
  621. {
  622. int idx, next;
  623. next = (ctx->ctx_msgq_tail+1) % PFM_MAX_MSGS;
  624. DPRINT(("ctx_fd=%p head=%d tail=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail));
  625. if (next == ctx->ctx_msgq_head) return NULL;
  626. idx = ctx->ctx_msgq_tail;
  627. ctx->ctx_msgq_tail = next;
  628. DPRINT(("ctx=%p head=%d tail=%d msg=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail, idx));
  629. return ctx->ctx_msgq+idx;
  630. }
  631. static pfm_msg_t *
  632. pfm_get_next_msg(pfm_context_t *ctx)
  633. {
  634. pfm_msg_t *msg;
  635. DPRINT(("ctx=%p head=%d tail=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail));
  636. if (PFM_CTXQ_EMPTY(ctx)) return NULL;
  637. /*
  638. * get oldest message
  639. */
  640. msg = ctx->ctx_msgq+ctx->ctx_msgq_head;
  641. /*
  642. * and move forward
  643. */
  644. ctx->ctx_msgq_head = (ctx->ctx_msgq_head+1) % PFM_MAX_MSGS;
  645. DPRINT(("ctx=%p head=%d tail=%d type=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail, msg->pfm_gen_msg.msg_type));
  646. return msg;
  647. }
  648. static void
  649. pfm_reset_msgq(pfm_context_t *ctx)
  650. {
  651. ctx->ctx_msgq_head = ctx->ctx_msgq_tail = 0;
  652. DPRINT(("ctx=%p msgq reset\n", ctx));
  653. }
  654. static void *
  655. pfm_rvmalloc(unsigned long size)
  656. {
  657. void *mem;
  658. unsigned long addr;
  659. size = PAGE_ALIGN(size);
  660. mem = vmalloc(size);
  661. if (mem) {
  662. //printk("perfmon: CPU%d pfm_rvmalloc(%ld)=%p\n", smp_processor_id(), size, mem);
  663. memset(mem, 0, size);
  664. addr = (unsigned long)mem;
  665. while (size > 0) {
  666. pfm_reserve_page(addr);
  667. addr+=PAGE_SIZE;
  668. size-=PAGE_SIZE;
  669. }
  670. }
  671. return mem;
  672. }
  673. static void
  674. pfm_rvfree(void *mem, unsigned long size)
  675. {
  676. unsigned long addr;
  677. if (mem) {
  678. DPRINT(("freeing physical buffer @%p size=%lu\n", mem, size));
  679. addr = (unsigned long) mem;
  680. while ((long) size > 0) {
  681. pfm_unreserve_page(addr);
  682. addr+=PAGE_SIZE;
  683. size-=PAGE_SIZE;
  684. }
  685. vfree(mem);
  686. }
  687. return;
  688. }
  689. static pfm_context_t *
  690. pfm_context_alloc(void)
  691. {
  692. pfm_context_t *ctx;
  693. /*
  694. * allocate context descriptor
  695. * must be able to free with interrupts disabled
  696. */
  697. ctx = kmalloc(sizeof(pfm_context_t), GFP_KERNEL);
  698. if (ctx) {
  699. memset(ctx, 0, sizeof(pfm_context_t));
  700. DPRINT(("alloc ctx @%p\n", ctx));
  701. }
  702. return ctx;
  703. }
  704. static void
  705. pfm_context_free(pfm_context_t *ctx)
  706. {
  707. if (ctx) {
  708. DPRINT(("free ctx @%p\n", ctx));
  709. kfree(ctx);
  710. }
  711. }
  712. static void
  713. pfm_mask_monitoring(struct task_struct *task)
  714. {
  715. pfm_context_t *ctx = PFM_GET_CTX(task);
  716. struct thread_struct *th = &task->thread;
  717. unsigned long mask, val, ovfl_mask;
  718. int i;
  719. DPRINT_ovfl(("masking monitoring for [%d]\n", task->pid));
  720. ovfl_mask = pmu_conf->ovfl_val;
  721. /*
  722. * monitoring can only be masked as a result of a valid
  723. * counter overflow. In UP, it means that the PMU still
  724. * has an owner. Note that the owner can be different
  725. * from the current task. However the PMU state belongs
  726. * to the owner.
  727. * In SMP, a valid overflow only happens when task is
  728. * current. Therefore if we come here, we know that
  729. * the PMU state belongs to the current task, therefore
  730. * we can access the live registers.
  731. *
  732. * So in both cases, the live register contains the owner's
  733. * state. We can ONLY touch the PMU registers and NOT the PSR.
  734. *
  735. * As a consequence to this call, the thread->pmds[] array
  736. * contains stale information which must be ignored
  737. * when context is reloaded AND monitoring is active (see
  738. * pfm_restart).
  739. */
  740. mask = ctx->ctx_used_pmds[0];
  741. for (i = 0; mask; i++, mask>>=1) {
  742. /* skip non used pmds */
  743. if ((mask & 0x1) == 0) continue;
  744. val = ia64_get_pmd(i);
  745. if (PMD_IS_COUNTING(i)) {
  746. /*
  747. * we rebuild the full 64 bit value of the counter
  748. */
  749. ctx->ctx_pmds[i].val += (val & ovfl_mask);
  750. } else {
  751. ctx->ctx_pmds[i].val = val;
  752. }
  753. DPRINT_ovfl(("pmd[%d]=0x%lx hw_pmd=0x%lx\n",
  754. i,
  755. ctx->ctx_pmds[i].val,
  756. val & ovfl_mask));
  757. }
  758. /*
  759. * mask monitoring by setting the privilege level to 0
  760. * we cannot use psr.pp/psr.up for this, it is controlled by
  761. * the user
  762. *
  763. * if task is current, modify actual registers, otherwise modify
  764. * thread save state, i.e., what will be restored in pfm_load_regs()
  765. */
  766. mask = ctx->ctx_used_monitors[0] >> PMU_FIRST_COUNTER;
  767. for(i= PMU_FIRST_COUNTER; mask; i++, mask>>=1) {
  768. if ((mask & 0x1) == 0UL) continue;
  769. ia64_set_pmc(i, th->pmcs[i] & ~0xfUL);
  770. th->pmcs[i] &= ~0xfUL;
  771. DPRINT_ovfl(("pmc[%d]=0x%lx\n", i, th->pmcs[i]));
  772. }
  773. /*
  774. * make all of this visible
  775. */
  776. ia64_srlz_d();
  777. }
  778. /*
  779. * must always be done with task == current
  780. *
  781. * context must be in MASKED state when calling
  782. */
  783. static void
  784. pfm_restore_monitoring(struct task_struct *task)
  785. {
  786. pfm_context_t *ctx = PFM_GET_CTX(task);
  787. struct thread_struct *th = &task->thread;
  788. unsigned long mask, ovfl_mask;
  789. unsigned long psr, val;
  790. int i, is_system;
  791. is_system = ctx->ctx_fl_system;
  792. ovfl_mask = pmu_conf->ovfl_val;
  793. if (task != current) {
  794. printk(KERN_ERR "perfmon.%d: invalid task[%d] current[%d]\n", __LINE__, task->pid, current->pid);
  795. return;
  796. }
  797. if (ctx->ctx_state != PFM_CTX_MASKED) {
  798. printk(KERN_ERR "perfmon.%d: task[%d] current[%d] invalid state=%d\n", __LINE__,
  799. task->pid, current->pid, ctx->ctx_state);
  800. return;
  801. }
  802. psr = pfm_get_psr();
  803. /*
  804. * monitoring is masked via the PMC.
  805. * As we restore their value, we do not want each counter to
  806. * restart right away. We stop monitoring using the PSR,
  807. * restore the PMC (and PMD) and then re-establish the psr
  808. * as it was. Note that there can be no pending overflow at
  809. * this point, because monitoring was MASKED.
  810. *
  811. * system-wide session are pinned and self-monitoring
  812. */
  813. if (is_system && (PFM_CPUINFO_GET() & PFM_CPUINFO_DCR_PP)) {
  814. /* disable dcr pp */
  815. ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) & ~IA64_DCR_PP);
  816. pfm_clear_psr_pp();
  817. } else {
  818. pfm_clear_psr_up();
  819. }
  820. /*
  821. * first, we restore the PMD
  822. */
  823. mask = ctx->ctx_used_pmds[0];
  824. for (i = 0; mask; i++, mask>>=1) {
  825. /* skip non used pmds */
  826. if ((mask & 0x1) == 0) continue;
  827. if (PMD_IS_COUNTING(i)) {
  828. /*
  829. * we split the 64bit value according to
  830. * counter width
  831. */
  832. val = ctx->ctx_pmds[i].val & ovfl_mask;
  833. ctx->ctx_pmds[i].val &= ~ovfl_mask;
  834. } else {
  835. val = ctx->ctx_pmds[i].val;
  836. }
  837. ia64_set_pmd(i, val);
  838. DPRINT(("pmd[%d]=0x%lx hw_pmd=0x%lx\n",
  839. i,
  840. ctx->ctx_pmds[i].val,
  841. val));
  842. }
  843. /*
  844. * restore the PMCs
  845. */
  846. mask = ctx->ctx_used_monitors[0] >> PMU_FIRST_COUNTER;
  847. for(i= PMU_FIRST_COUNTER; mask; i++, mask>>=1) {
  848. if ((mask & 0x1) == 0UL) continue;
  849. th->pmcs[i] = ctx->ctx_pmcs[i];
  850. ia64_set_pmc(i, th->pmcs[i]);
  851. DPRINT(("[%d] pmc[%d]=0x%lx\n", task->pid, i, th->pmcs[i]));
  852. }
  853. ia64_srlz_d();
  854. /*
  855. * must restore DBR/IBR because could be modified while masked
  856. * XXX: need to optimize
  857. */
  858. if (ctx->ctx_fl_using_dbreg) {
  859. pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
  860. pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
  861. }
  862. /*
  863. * now restore PSR
  864. */
  865. if (is_system && (PFM_CPUINFO_GET() & PFM_CPUINFO_DCR_PP)) {
  866. /* enable dcr pp */
  867. ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) | IA64_DCR_PP);
  868. ia64_srlz_i();
  869. }
  870. pfm_set_psr_l(psr);
  871. }
  872. static inline void
  873. pfm_save_pmds(unsigned long *pmds, unsigned long mask)
  874. {
  875. int i;
  876. ia64_srlz_d();
  877. for (i=0; mask; i++, mask>>=1) {
  878. if (mask & 0x1) pmds[i] = ia64_get_pmd(i);
  879. }
  880. }
  881. /*
  882. * reload from thread state (used for ctxw only)
  883. */
  884. static inline void
  885. pfm_restore_pmds(unsigned long *pmds, unsigned long mask)
  886. {
  887. int i;
  888. unsigned long val, ovfl_val = pmu_conf->ovfl_val;
  889. for (i=0; mask; i++, mask>>=1) {
  890. if ((mask & 0x1) == 0) continue;
  891. val = PMD_IS_COUNTING(i) ? pmds[i] & ovfl_val : pmds[i];
  892. ia64_set_pmd(i, val);
  893. }
  894. ia64_srlz_d();
  895. }
  896. /*
  897. * propagate PMD from context to thread-state
  898. */
  899. static inline void
  900. pfm_copy_pmds(struct task_struct *task, pfm_context_t *ctx)
  901. {
  902. struct thread_struct *thread = &task->thread;
  903. unsigned long ovfl_val = pmu_conf->ovfl_val;
  904. unsigned long mask = ctx->ctx_all_pmds[0];
  905. unsigned long val;
  906. int i;
  907. DPRINT(("mask=0x%lx\n", mask));
  908. for (i=0; mask; i++, mask>>=1) {
  909. val = ctx->ctx_pmds[i].val;
  910. /*
  911. * We break up the 64 bit value into 2 pieces
  912. * the lower bits go to the machine state in the
  913. * thread (will be reloaded on ctxsw in).
  914. * The upper part stays in the soft-counter.
  915. */
  916. if (PMD_IS_COUNTING(i)) {
  917. ctx->ctx_pmds[i].val = val & ~ovfl_val;
  918. val &= ovfl_val;
  919. }
  920. thread->pmds[i] = val;
  921. DPRINT(("pmd[%d]=0x%lx soft_val=0x%lx\n",
  922. i,
  923. thread->pmds[i],
  924. ctx->ctx_pmds[i].val));
  925. }
  926. }
  927. /*
  928. * propagate PMC from context to thread-state
  929. */
  930. static inline void
  931. pfm_copy_pmcs(struct task_struct *task, pfm_context_t *ctx)
  932. {
  933. struct thread_struct *thread = &task->thread;
  934. unsigned long mask = ctx->ctx_all_pmcs[0];
  935. int i;
  936. DPRINT(("mask=0x%lx\n", mask));
  937. for (i=0; mask; i++, mask>>=1) {
  938. /* masking 0 with ovfl_val yields 0 */
  939. thread->pmcs[i] = ctx->ctx_pmcs[i];
  940. DPRINT(("pmc[%d]=0x%lx\n", i, thread->pmcs[i]));
  941. }
  942. }
  943. static inline void
  944. pfm_restore_pmcs(unsigned long *pmcs, unsigned long mask)
  945. {
  946. int i;
  947. for (i=0; mask; i++, mask>>=1) {
  948. if ((mask & 0x1) == 0) continue;
  949. ia64_set_pmc(i, pmcs[i]);
  950. }
  951. ia64_srlz_d();
  952. }
  953. static inline int
  954. pfm_uuid_cmp(pfm_uuid_t a, pfm_uuid_t b)
  955. {
  956. return memcmp(a, b, sizeof(pfm_uuid_t));
  957. }
  958. static inline int
  959. pfm_buf_fmt_exit(pfm_buffer_fmt_t *fmt, struct task_struct *task, void *buf, struct pt_regs *regs)
  960. {
  961. int ret = 0;
  962. if (fmt->fmt_exit) ret = (*fmt->fmt_exit)(task, buf, regs);
  963. return ret;
  964. }
  965. static inline int
  966. pfm_buf_fmt_getsize(pfm_buffer_fmt_t *fmt, struct task_struct *task, unsigned int flags, int cpu, void *arg, unsigned long *size)
  967. {
  968. int ret = 0;
  969. if (fmt->fmt_getsize) ret = (*fmt->fmt_getsize)(task, flags, cpu, arg, size);
  970. return ret;
  971. }
  972. static inline int
  973. pfm_buf_fmt_validate(pfm_buffer_fmt_t *fmt, struct task_struct *task, unsigned int flags,
  974. int cpu, void *arg)
  975. {
  976. int ret = 0;
  977. if (fmt->fmt_validate) ret = (*fmt->fmt_validate)(task, flags, cpu, arg);
  978. return ret;
  979. }
  980. static inline int
  981. pfm_buf_fmt_init(pfm_buffer_fmt_t *fmt, struct task_struct *task, void *buf, unsigned int flags,
  982. int cpu, void *arg)
  983. {
  984. int ret = 0;
  985. if (fmt->fmt_init) ret = (*fmt->fmt_init)(task, buf, flags, cpu, arg);
  986. return ret;
  987. }
  988. static inline int
  989. pfm_buf_fmt_restart(pfm_buffer_fmt_t *fmt, struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs)
  990. {
  991. int ret = 0;
  992. if (fmt->fmt_restart) ret = (*fmt->fmt_restart)(task, ctrl, buf, regs);
  993. return ret;
  994. }
  995. static inline int
  996. pfm_buf_fmt_restart_active(pfm_buffer_fmt_t *fmt, struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs)
  997. {
  998. int ret = 0;
  999. if (fmt->fmt_restart_active) ret = (*fmt->fmt_restart_active)(task, ctrl, buf, regs);
  1000. return ret;
  1001. }
  1002. static pfm_buffer_fmt_t *
  1003. __pfm_find_buffer_fmt(pfm_uuid_t uuid)
  1004. {
  1005. struct list_head * pos;
  1006. pfm_buffer_fmt_t * entry;
  1007. list_for_each(pos, &pfm_buffer_fmt_list) {
  1008. entry = list_entry(pos, pfm_buffer_fmt_t, fmt_list);
  1009. if (pfm_uuid_cmp(uuid, entry->fmt_uuid) == 0)
  1010. return entry;
  1011. }
  1012. return NULL;
  1013. }
  1014. /*
  1015. * find a buffer format based on its uuid
  1016. */
  1017. static pfm_buffer_fmt_t *
  1018. pfm_find_buffer_fmt(pfm_uuid_t uuid)
  1019. {
  1020. pfm_buffer_fmt_t * fmt;
  1021. spin_lock(&pfm_buffer_fmt_lock);
  1022. fmt = __pfm_find_buffer_fmt(uuid);
  1023. spin_unlock(&pfm_buffer_fmt_lock);
  1024. return fmt;
  1025. }
  1026. int
  1027. pfm_register_buffer_fmt(pfm_buffer_fmt_t *fmt)
  1028. {
  1029. int ret = 0;
  1030. /* some sanity checks */
  1031. if (fmt == NULL || fmt->fmt_name == NULL) return -EINVAL;
  1032. /* we need at least a handler */
  1033. if (fmt->fmt_handler == NULL) return -EINVAL;
  1034. /*
  1035. * XXX: need check validity of fmt_arg_size
  1036. */
  1037. spin_lock(&pfm_buffer_fmt_lock);
  1038. if (__pfm_find_buffer_fmt(fmt->fmt_uuid)) {
  1039. printk(KERN_ERR "perfmon: duplicate sampling format: %s\n", fmt->fmt_name);
  1040. ret = -EBUSY;
  1041. goto out;
  1042. }
  1043. list_add(&fmt->fmt_list, &pfm_buffer_fmt_list);
  1044. printk(KERN_INFO "perfmon: added sampling format %s\n", fmt->fmt_name);
  1045. out:
  1046. spin_unlock(&pfm_buffer_fmt_lock);
  1047. return ret;
  1048. }
  1049. EXPORT_SYMBOL(pfm_register_buffer_fmt);
  1050. int
  1051. pfm_unregister_buffer_fmt(pfm_uuid_t uuid)
  1052. {
  1053. pfm_buffer_fmt_t *fmt;
  1054. int ret = 0;
  1055. spin_lock(&pfm_buffer_fmt_lock);
  1056. fmt = __pfm_find_buffer_fmt(uuid);
  1057. if (!fmt) {
  1058. printk(KERN_ERR "perfmon: cannot unregister format, not found\n");
  1059. ret = -EINVAL;
  1060. goto out;
  1061. }
  1062. list_del_init(&fmt->fmt_list);
  1063. printk(KERN_INFO "perfmon: removed sampling format: %s\n", fmt->fmt_name);
  1064. out:
  1065. spin_unlock(&pfm_buffer_fmt_lock);
  1066. return ret;
  1067. }
  1068. EXPORT_SYMBOL(pfm_unregister_buffer_fmt);
  1069. extern void update_pal_halt_status(int);
  1070. static int
  1071. pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu)
  1072. {
  1073. unsigned long flags;
  1074. /*
  1075. * validy checks on cpu_mask have been done upstream
  1076. */
  1077. LOCK_PFS(flags);
  1078. DPRINT(("in sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
  1079. pfm_sessions.pfs_sys_sessions,
  1080. pfm_sessions.pfs_task_sessions,
  1081. pfm_sessions.pfs_sys_use_dbregs,
  1082. is_syswide,
  1083. cpu));
  1084. if (is_syswide) {
  1085. /*
  1086. * cannot mix system wide and per-task sessions
  1087. */
  1088. if (pfm_sessions.pfs_task_sessions > 0UL) {
  1089. DPRINT(("system wide not possible, %u conflicting task_sessions\n",
  1090. pfm_sessions.pfs_task_sessions));
  1091. goto abort;
  1092. }
  1093. if (pfm_sessions.pfs_sys_session[cpu]) goto error_conflict;
  1094. DPRINT(("reserving system wide session on CPU%u currently on CPU%u\n", cpu, smp_processor_id()));
  1095. pfm_sessions.pfs_sys_session[cpu] = task;
  1096. pfm_sessions.pfs_sys_sessions++ ;
  1097. } else {
  1098. if (pfm_sessions.pfs_sys_sessions) goto abort;
  1099. pfm_sessions.pfs_task_sessions++;
  1100. }
  1101. DPRINT(("out sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
  1102. pfm_sessions.pfs_sys_sessions,
  1103. pfm_sessions.pfs_task_sessions,
  1104. pfm_sessions.pfs_sys_use_dbregs,
  1105. is_syswide,
  1106. cpu));
  1107. /*
  1108. * disable default_idle() to go to PAL_HALT
  1109. */
  1110. update_pal_halt_status(0);
  1111. UNLOCK_PFS(flags);
  1112. return 0;
  1113. error_conflict:
  1114. DPRINT(("system wide not possible, conflicting session [%d] on CPU%d\n",
  1115. pfm_sessions.pfs_sys_session[cpu]->pid,
  1116. cpu));
  1117. abort:
  1118. UNLOCK_PFS(flags);
  1119. return -EBUSY;
  1120. }
  1121. static int
  1122. pfm_unreserve_session(pfm_context_t *ctx, int is_syswide, unsigned int cpu)
  1123. {
  1124. unsigned long flags;
  1125. /*
  1126. * validy checks on cpu_mask have been done upstream
  1127. */
  1128. LOCK_PFS(flags);
  1129. DPRINT(("in sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
  1130. pfm_sessions.pfs_sys_sessions,
  1131. pfm_sessions.pfs_task_sessions,
  1132. pfm_sessions.pfs_sys_use_dbregs,
  1133. is_syswide,
  1134. cpu));
  1135. if (is_syswide) {
  1136. pfm_sessions.pfs_sys_session[cpu] = NULL;
  1137. /*
  1138. * would not work with perfmon+more than one bit in cpu_mask
  1139. */
  1140. if (ctx && ctx->ctx_fl_using_dbreg) {
  1141. if (pfm_sessions.pfs_sys_use_dbregs == 0) {
  1142. printk(KERN_ERR "perfmon: invalid release for ctx %p sys_use_dbregs=0\n", ctx);
  1143. } else {
  1144. pfm_sessions.pfs_sys_use_dbregs--;
  1145. }
  1146. }
  1147. pfm_sessions.pfs_sys_sessions--;
  1148. } else {
  1149. pfm_sessions.pfs_task_sessions--;
  1150. }
  1151. DPRINT(("out sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
  1152. pfm_sessions.pfs_sys_sessions,
  1153. pfm_sessions.pfs_task_sessions,
  1154. pfm_sessions.pfs_sys_use_dbregs,
  1155. is_syswide,
  1156. cpu));
  1157. /*
  1158. * if possible, enable default_idle() to go into PAL_HALT
  1159. */
  1160. if (pfm_sessions.pfs_task_sessions == 0 && pfm_sessions.pfs_sys_sessions == 0)
  1161. update_pal_halt_status(1);
  1162. UNLOCK_PFS(flags);
  1163. return 0;
  1164. }
  1165. /*
  1166. * removes virtual mapping of the sampling buffer.
  1167. * IMPORTANT: cannot be called with interrupts disable, e.g. inside
  1168. * a PROTECT_CTX() section.
  1169. */
  1170. static int
  1171. pfm_remove_smpl_mapping(struct task_struct *task, void *vaddr, unsigned long size)
  1172. {
  1173. int r;
  1174. /* sanity checks */
  1175. if (task->mm == NULL || size == 0UL || vaddr == NULL) {
  1176. printk(KERN_ERR "perfmon: pfm_remove_smpl_mapping [%d] invalid context mm=%p\n", task->pid, task->mm);
  1177. return -EINVAL;
  1178. }
  1179. DPRINT(("smpl_vaddr=%p size=%lu\n", vaddr, size));
  1180. /*
  1181. * does the actual unmapping
  1182. */
  1183. down_write(&task->mm->mmap_sem);
  1184. DPRINT(("down_write done smpl_vaddr=%p size=%lu\n", vaddr, size));
  1185. r = pfm_do_munmap(task->mm, (unsigned long)vaddr, size, 0);
  1186. up_write(&task->mm->mmap_sem);
  1187. if (r !=0) {
  1188. printk(KERN_ERR "perfmon: [%d] unable to unmap sampling buffer @%p size=%lu\n", task->pid, vaddr, size);
  1189. }
  1190. DPRINT(("do_unmap(%p, %lu)=%d\n", vaddr, size, r));
  1191. return 0;
  1192. }
  1193. /*
  1194. * free actual physical storage used by sampling buffer
  1195. */
  1196. #if 0
  1197. static int
  1198. pfm_free_smpl_buffer(pfm_context_t *ctx)
  1199. {
  1200. pfm_buffer_fmt_t *fmt;
  1201. if (ctx->ctx_smpl_hdr == NULL) goto invalid_free;
  1202. /*
  1203. * we won't use the buffer format anymore
  1204. */
  1205. fmt = ctx->ctx_buf_fmt;
  1206. DPRINT(("sampling buffer @%p size %lu vaddr=%p\n",
  1207. ctx->ctx_smpl_hdr,
  1208. ctx->ctx_smpl_size,
  1209. ctx->ctx_smpl_vaddr));
  1210. pfm_buf_fmt_exit(fmt, current, NULL, NULL);
  1211. /*
  1212. * free the buffer
  1213. */
  1214. pfm_rvfree(ctx->ctx_smpl_hdr, ctx->ctx_smpl_size);
  1215. ctx->ctx_smpl_hdr = NULL;
  1216. ctx->ctx_smpl_size = 0UL;
  1217. return 0;
  1218. invalid_free:
  1219. printk(KERN_ERR "perfmon: pfm_free_smpl_buffer [%d] no buffer\n", current->pid);
  1220. return -EINVAL;
  1221. }
  1222. #endif
  1223. static inline void
  1224. pfm_exit_smpl_buffer(pfm_buffer_fmt_t *fmt)
  1225. {
  1226. if (fmt == NULL) return;
  1227. pfm_buf_fmt_exit(fmt, current, NULL, NULL);
  1228. }
  1229. /*
  1230. * pfmfs should _never_ be mounted by userland - too much of security hassle,
  1231. * no real gain from having the whole whorehouse mounted. So we don't need
  1232. * any operations on the root directory. However, we need a non-trivial
  1233. * d_name - pfm: will go nicely and kill the special-casing in procfs.
  1234. */
  1235. static struct vfsmount *pfmfs_mnt;
  1236. static int __init
  1237. init_pfm_fs(void)
  1238. {
  1239. int err = register_filesystem(&pfm_fs_type);
  1240. if (!err) {
  1241. pfmfs_mnt = kern_mount(&pfm_fs_type);
  1242. err = PTR_ERR(pfmfs_mnt);
  1243. if (IS_ERR(pfmfs_mnt))
  1244. unregister_filesystem(&pfm_fs_type);
  1245. else
  1246. err = 0;
  1247. }
  1248. return err;
  1249. }
  1250. static void __exit
  1251. exit_pfm_fs(void)
  1252. {
  1253. unregister_filesystem(&pfm_fs_type);
  1254. mntput(pfmfs_mnt);
  1255. }
  1256. static ssize_t
  1257. pfm_read(struct file *filp, char __user *buf, size_t size, loff_t *ppos)
  1258. {
  1259. pfm_context_t *ctx;
  1260. pfm_msg_t *msg;
  1261. ssize_t ret;
  1262. unsigned long flags;
  1263. DECLARE_WAITQUEUE(wait, current);
  1264. if (PFM_IS_FILE(filp) == 0) {
  1265. printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", current->pid);
  1266. return -EINVAL;
  1267. }
  1268. ctx = (pfm_context_t *)filp->private_data;
  1269. if (ctx == NULL) {
  1270. printk(KERN_ERR "perfmon: pfm_read: NULL ctx [%d]\n", current->pid);
  1271. return -EINVAL;
  1272. }
  1273. /*
  1274. * check even when there is no message
  1275. */
  1276. if (size < sizeof(pfm_msg_t)) {
  1277. DPRINT(("message is too small ctx=%p (>=%ld)\n", ctx, sizeof(pfm_msg_t)));
  1278. return -EINVAL;
  1279. }
  1280. PROTECT_CTX(ctx, flags);
  1281. /*
  1282. * put ourselves on the wait queue
  1283. */
  1284. add_wait_queue(&ctx->ctx_msgq_wait, &wait);
  1285. for(;;) {
  1286. /*
  1287. * check wait queue
  1288. */
  1289. set_current_state(TASK_INTERRUPTIBLE);
  1290. DPRINT(("head=%d tail=%d\n", ctx->ctx_msgq_head, ctx->ctx_msgq_tail));
  1291. ret = 0;
  1292. if(PFM_CTXQ_EMPTY(ctx) == 0) break;
  1293. UNPROTECT_CTX(ctx, flags);
  1294. /*
  1295. * check non-blocking read
  1296. */
  1297. ret = -EAGAIN;
  1298. if(filp->f_flags & O_NONBLOCK) break;
  1299. /*
  1300. * check pending signals
  1301. */
  1302. if(signal_pending(current)) {
  1303. ret = -EINTR;
  1304. break;
  1305. }
  1306. /*
  1307. * no message, so wait
  1308. */
  1309. schedule();
  1310. PROTECT_CTX(ctx, flags);
  1311. }
  1312. DPRINT(("[%d] back to running ret=%ld\n", current->pid, ret));
  1313. set_current_state(TASK_RUNNING);
  1314. remove_wait_queue(&ctx->ctx_msgq_wait, &wait);
  1315. if (ret < 0) goto abort;
  1316. ret = -EINVAL;
  1317. msg = pfm_get_next_msg(ctx);
  1318. if (msg == NULL) {
  1319. printk(KERN_ERR "perfmon: pfm_read no msg for ctx=%p [%d]\n", ctx, current->pid);
  1320. goto abort_locked;
  1321. }
  1322. DPRINT(("fd=%d type=%d\n", msg->pfm_gen_msg.msg_ctx_fd, msg->pfm_gen_msg.msg_type));
  1323. ret = -EFAULT;
  1324. if(copy_to_user(buf, msg, sizeof(pfm_msg_t)) == 0) ret = sizeof(pfm_msg_t);
  1325. abort_locked:
  1326. UNPROTECT_CTX(ctx, flags);
  1327. abort:
  1328. return ret;
  1329. }
  1330. static ssize_t
  1331. pfm_write(struct file *file, const char __user *ubuf,
  1332. size_t size, loff_t *ppos)
  1333. {
  1334. DPRINT(("pfm_write called\n"));
  1335. return -EINVAL;
  1336. }
  1337. static unsigned int
  1338. pfm_poll(struct file *filp, poll_table * wait)
  1339. {
  1340. pfm_context_t *ctx;
  1341. unsigned long flags;
  1342. unsigned int mask = 0;
  1343. if (PFM_IS_FILE(filp) == 0) {
  1344. printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", current->pid);
  1345. return 0;
  1346. }
  1347. ctx = (pfm_context_t *)filp->private_data;
  1348. if (ctx == NULL) {
  1349. printk(KERN_ERR "perfmon: pfm_poll: NULL ctx [%d]\n", current->pid);
  1350. return 0;
  1351. }
  1352. DPRINT(("pfm_poll ctx_fd=%d before poll_wait\n", ctx->ctx_fd));
  1353. poll_wait(filp, &ctx->ctx_msgq_wait, wait);
  1354. PROTECT_CTX(ctx, flags);
  1355. if (PFM_CTXQ_EMPTY(ctx) == 0)
  1356. mask = POLLIN | POLLRDNORM;
  1357. UNPROTECT_CTX(ctx, flags);
  1358. DPRINT(("pfm_poll ctx_fd=%d mask=0x%x\n", ctx->ctx_fd, mask));
  1359. return mask;
  1360. }
  1361. static int
  1362. pfm_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
  1363. {
  1364. DPRINT(("pfm_ioctl called\n"));
  1365. return -EINVAL;
  1366. }
  1367. /*
  1368. * interrupt cannot be masked when coming here
  1369. */
  1370. static inline int
  1371. pfm_do_fasync(int fd, struct file *filp, pfm_context_t *ctx, int on)
  1372. {
  1373. int ret;
  1374. ret = fasync_helper (fd, filp, on, &ctx->ctx_async_queue);
  1375. DPRINT(("pfm_fasync called by [%d] on ctx_fd=%d on=%d async_queue=%p ret=%d\n",
  1376. current->pid,
  1377. fd,
  1378. on,
  1379. ctx->ctx_async_queue, ret));
  1380. return ret;
  1381. }
  1382. static int
  1383. pfm_fasync(int fd, struct file *filp, int on)
  1384. {
  1385. pfm_context_t *ctx;
  1386. int ret;
  1387. if (PFM_IS_FILE(filp) == 0) {
  1388. printk(KERN_ERR "perfmon: pfm_fasync bad magic [%d]\n", current->pid);
  1389. return -EBADF;
  1390. }
  1391. ctx = (pfm_context_t *)filp->private_data;
  1392. if (ctx == NULL) {
  1393. printk(KERN_ERR "perfmon: pfm_fasync NULL ctx [%d]\n", current->pid);
  1394. return -EBADF;
  1395. }
  1396. /*
  1397. * we cannot mask interrupts during this call because this may
  1398. * may go to sleep if memory is not readily avalaible.
  1399. *
  1400. * We are protected from the conetxt disappearing by the get_fd()/put_fd()
  1401. * done in caller. Serialization of this function is ensured by caller.
  1402. */
  1403. ret = pfm_do_fasync(fd, filp, ctx, on);
  1404. DPRINT(("pfm_fasync called on ctx_fd=%d on=%d async_queue=%p ret=%d\n",
  1405. fd,
  1406. on,
  1407. ctx->ctx_async_queue, ret));
  1408. return ret;
  1409. }
  1410. #ifdef CONFIG_SMP
  1411. /*
  1412. * this function is exclusively called from pfm_close().
  1413. * The context is not protected at that time, nor are interrupts
  1414. * on the remote CPU. That's necessary to avoid deadlocks.
  1415. */
  1416. static void
  1417. pfm_syswide_force_stop(void *info)
  1418. {
  1419. pfm_context_t *ctx = (pfm_context_t *)info;
  1420. struct pt_regs *regs = ia64_task_regs(current);
  1421. struct task_struct *owner;
  1422. unsigned long flags;
  1423. int ret;
  1424. if (ctx->ctx_cpu != smp_processor_id()) {
  1425. printk(KERN_ERR "perfmon: pfm_syswide_force_stop for CPU%d but on CPU%d\n",
  1426. ctx->ctx_cpu,
  1427. smp_processor_id());
  1428. return;
  1429. }
  1430. owner = GET_PMU_OWNER();
  1431. if (owner != ctx->ctx_task) {
  1432. printk(KERN_ERR "perfmon: pfm_syswide_force_stop CPU%d unexpected owner [%d] instead of [%d]\n",
  1433. smp_processor_id(),
  1434. owner->pid, ctx->ctx_task->pid);
  1435. return;
  1436. }
  1437. if (GET_PMU_CTX() != ctx) {
  1438. printk(KERN_ERR "perfmon: pfm_syswide_force_stop CPU%d unexpected ctx %p instead of %p\n",
  1439. smp_processor_id(),
  1440. GET_PMU_CTX(), ctx);
  1441. return;
  1442. }
  1443. DPRINT(("on CPU%d forcing system wide stop for [%d]\n", smp_processor_id(), ctx->ctx_task->pid));
  1444. /*
  1445. * the context is already protected in pfm_close(), we simply
  1446. * need to mask interrupts to avoid a PMU interrupt race on
  1447. * this CPU
  1448. */
  1449. local_irq_save(flags);
  1450. ret = pfm_context_unload(ctx, NULL, 0, regs);
  1451. if (ret) {
  1452. DPRINT(("context_unload returned %d\n", ret));
  1453. }
  1454. /*
  1455. * unmask interrupts, PMU interrupts are now spurious here
  1456. */
  1457. local_irq_restore(flags);
  1458. }
  1459. static void
  1460. pfm_syswide_cleanup_other_cpu(pfm_context_t *ctx)
  1461. {
  1462. int ret;
  1463. DPRINT(("calling CPU%d for cleanup\n", ctx->ctx_cpu));
  1464. ret = smp_call_function_single(ctx->ctx_cpu, pfm_syswide_force_stop, ctx, 0, 1);
  1465. DPRINT(("called CPU%d for cleanup ret=%d\n", ctx->ctx_cpu, ret));
  1466. }
  1467. #endif /* CONFIG_SMP */
  1468. /*
  1469. * called for each close(). Partially free resources.
  1470. * When caller is self-monitoring, the context is unloaded.
  1471. */
  1472. static int
  1473. pfm_flush(struct file *filp)
  1474. {
  1475. pfm_context_t *ctx;
  1476. struct task_struct *task;
  1477. struct pt_regs *regs;
  1478. unsigned long flags;
  1479. unsigned long smpl_buf_size = 0UL;
  1480. void *smpl_buf_vaddr = NULL;
  1481. int state, is_system;
  1482. if (PFM_IS_FILE(filp) == 0) {
  1483. DPRINT(("bad magic for\n"));
  1484. return -EBADF;
  1485. }
  1486. ctx = (pfm_context_t *)filp->private_data;
  1487. if (ctx == NULL) {
  1488. printk(KERN_ERR "perfmon: pfm_flush: NULL ctx [%d]\n", current->pid);
  1489. return -EBADF;
  1490. }
  1491. /*
  1492. * remove our file from the async queue, if we use this mode.
  1493. * This can be done without the context being protected. We come
  1494. * here when the context has become unreacheable by other tasks.
  1495. *
  1496. * We may still have active monitoring at this point and we may
  1497. * end up in pfm_overflow_handler(). However, fasync_helper()
  1498. * operates with interrupts disabled and it cleans up the
  1499. * queue. If the PMU handler is called prior to entering
  1500. * fasync_helper() then it will send a signal. If it is
  1501. * invoked after, it will find an empty queue and no
  1502. * signal will be sent. In both case, we are safe
  1503. */
  1504. if (filp->f_flags & FASYNC) {
  1505. DPRINT(("cleaning up async_queue=%p\n", ctx->ctx_async_queue));
  1506. pfm_do_fasync (-1, filp, ctx, 0);
  1507. }
  1508. PROTECT_CTX(ctx, flags);
  1509. state = ctx->ctx_state;
  1510. is_system = ctx->ctx_fl_system;
  1511. task = PFM_CTX_TASK(ctx);
  1512. regs = ia64_task_regs(task);
  1513. DPRINT(("ctx_state=%d is_current=%d\n",
  1514. state,
  1515. task == current ? 1 : 0));
  1516. /*
  1517. * if state == UNLOADED, then task is NULL
  1518. */
  1519. /*
  1520. * we must stop and unload because we are losing access to the context.
  1521. */
  1522. if (task == current) {
  1523. #ifdef CONFIG_SMP
  1524. /*
  1525. * the task IS the owner but it migrated to another CPU: that's bad
  1526. * but we must handle this cleanly. Unfortunately, the kernel does
  1527. * not provide a mechanism to block migration (while the context is loaded).
  1528. *
  1529. * We need to release the resource on the ORIGINAL cpu.
  1530. */
  1531. if (is_system && ctx->ctx_cpu != smp_processor_id()) {
  1532. DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
  1533. /*
  1534. * keep context protected but unmask interrupt for IPI
  1535. */
  1536. local_irq_restore(flags);
  1537. pfm_syswide_cleanup_other_cpu(ctx);
  1538. /*
  1539. * restore interrupt masking
  1540. */
  1541. local_irq_save(flags);
  1542. /*
  1543. * context is unloaded at this point
  1544. */
  1545. } else
  1546. #endif /* CONFIG_SMP */
  1547. {
  1548. DPRINT(("forcing unload\n"));
  1549. /*
  1550. * stop and unload, returning with state UNLOADED
  1551. * and session unreserved.
  1552. */
  1553. pfm_context_unload(ctx, NULL, 0, regs);
  1554. DPRINT(("ctx_state=%d\n", ctx->ctx_state));
  1555. }
  1556. }
  1557. /*
  1558. * remove virtual mapping, if any, for the calling task.
  1559. * cannot reset ctx field until last user is calling close().
  1560. *
  1561. * ctx_smpl_vaddr must never be cleared because it is needed
  1562. * by every task with access to the context
  1563. *
  1564. * When called from do_exit(), the mm context is gone already, therefore
  1565. * mm is NULL, i.e., the VMA is already gone and we do not have to
  1566. * do anything here
  1567. */
  1568. if (ctx->ctx_smpl_vaddr && current->mm) {
  1569. smpl_buf_vaddr = ctx->ctx_smpl_vaddr;
  1570. smpl_buf_size = ctx->ctx_smpl_size;
  1571. }
  1572. UNPROTECT_CTX(ctx, flags);
  1573. /*
  1574. * if there was a mapping, then we systematically remove it
  1575. * at this point. Cannot be done inside critical section
  1576. * because some VM function reenables interrupts.
  1577. *
  1578. */
  1579. if (smpl_buf_vaddr) pfm_remove_smpl_mapping(current, smpl_buf_vaddr, smpl_buf_size);
  1580. return 0;
  1581. }
  1582. /*
  1583. * called either on explicit close() or from exit_files().
  1584. * Only the LAST user of the file gets to this point, i.e., it is
  1585. * called only ONCE.
  1586. *
  1587. * IMPORTANT: we get called ONLY when the refcnt on the file gets to zero
  1588. * (fput()),i.e, last task to access the file. Nobody else can access the
  1589. * file at this point.
  1590. *
  1591. * When called from exit_files(), the VMA has been freed because exit_mm()
  1592. * is executed before exit_files().
  1593. *
  1594. * When called from exit_files(), the current task is not yet ZOMBIE but we
  1595. * flush the PMU state to the context.
  1596. */
  1597. static int
  1598. pfm_close(struct inode *inode, struct file *filp)
  1599. {
  1600. pfm_context_t *ctx;
  1601. struct task_struct *task;
  1602. struct pt_regs *regs;
  1603. DECLARE_WAITQUEUE(wait, current);
  1604. unsigned long flags;
  1605. unsigned long smpl_buf_size = 0UL;
  1606. void *smpl_buf_addr = NULL;
  1607. int free_possible = 1;
  1608. int state, is_system;
  1609. DPRINT(("pfm_close called private=%p\n", filp->private_data));
  1610. if (PFM_IS_FILE(filp) == 0) {
  1611. DPRINT(("bad magic\n"));
  1612. return -EBADF;
  1613. }
  1614. ctx = (pfm_context_t *)filp->private_data;
  1615. if (ctx == NULL) {
  1616. printk(KERN_ERR "perfmon: pfm_close: NULL ctx [%d]\n", current->pid);
  1617. return -EBADF;
  1618. }
  1619. PROTECT_CTX(ctx, flags);
  1620. state = ctx->ctx_state;
  1621. is_system = ctx->ctx_fl_system;
  1622. task = PFM_CTX_TASK(ctx);
  1623. regs = ia64_task_regs(task);
  1624. DPRINT(("ctx_state=%d is_current=%d\n",
  1625. state,
  1626. task == current ? 1 : 0));
  1627. /*
  1628. * if task == current, then pfm_flush() unloaded the context
  1629. */
  1630. if (state == PFM_CTX_UNLOADED) goto doit;
  1631. /*
  1632. * context is loaded/masked and task != current, we need to
  1633. * either force an unload or go zombie
  1634. */
  1635. /*
  1636. * The task is currently blocked or will block after an overflow.
  1637. * we must force it to wakeup to get out of the
  1638. * MASKED state and transition to the unloaded state by itself.
  1639. *
  1640. * This situation is only possible for per-task mode
  1641. */
  1642. if (state == PFM_CTX_MASKED && CTX_OVFL_NOBLOCK(ctx) == 0) {
  1643. /*
  1644. * set a "partial" zombie state to be checked
  1645. * upon return from down() in pfm_handle_work().
  1646. *
  1647. * We cannot use the ZOMBIE state, because it is checked
  1648. * by pfm_load_regs() which is called upon wakeup from down().
  1649. * In such case, it would free the context and then we would
  1650. * return to pfm_handle_work() which would access the
  1651. * stale context. Instead, we set a flag invisible to pfm_load_regs()
  1652. * but visible to pfm_handle_work().
  1653. *
  1654. * For some window of time, we have a zombie context with
  1655. * ctx_state = MASKED and not ZOMBIE
  1656. */
  1657. ctx->ctx_fl_going_zombie = 1;
  1658. /*
  1659. * force task to wake up from MASKED state
  1660. */
  1661. up(&ctx->ctx_restart_sem);
  1662. DPRINT(("waking up ctx_state=%d\n", state));
  1663. /*
  1664. * put ourself to sleep waiting for the other
  1665. * task to report completion
  1666. *
  1667. * the context is protected by mutex, therefore there
  1668. * is no risk of being notified of completion before
  1669. * begin actually on the waitq.
  1670. */
  1671. set_current_state(TASK_INTERRUPTIBLE);
  1672. add_wait_queue(&ctx->ctx_zombieq, &wait);
  1673. UNPROTECT_CTX(ctx, flags);
  1674. /*
  1675. * XXX: check for signals :
  1676. * - ok for explicit close
  1677. * - not ok when coming from exit_files()
  1678. */
  1679. schedule();
  1680. PROTECT_CTX(ctx, flags);
  1681. remove_wait_queue(&ctx->ctx_zombieq, &wait);
  1682. set_current_state(TASK_RUNNING);
  1683. /*
  1684. * context is unloaded at this point
  1685. */
  1686. DPRINT(("after zombie wakeup ctx_state=%d for\n", state));
  1687. }
  1688. else if (task != current) {
  1689. #ifdef CONFIG_SMP
  1690. /*
  1691. * switch context to zombie state
  1692. */
  1693. ctx->ctx_state = PFM_CTX_ZOMBIE;
  1694. DPRINT(("zombie ctx for [%d]\n", task->pid));
  1695. /*
  1696. * cannot free the context on the spot. deferred until
  1697. * the task notices the ZOMBIE state
  1698. */
  1699. free_possible = 0;
  1700. #else
  1701. pfm_context_unload(ctx, NULL, 0, regs);
  1702. #endif
  1703. }
  1704. doit:
  1705. /* reload state, may have changed during opening of critical section */
  1706. state = ctx->ctx_state;
  1707. /*
  1708. * the context is still attached to a task (possibly current)
  1709. * we cannot destroy it right now
  1710. */
  1711. /*
  1712. * we must free the sampling buffer right here because
  1713. * we cannot rely on it being cleaned up later by the
  1714. * monitored task. It is not possible to free vmalloc'ed
  1715. * memory in pfm_load_regs(). Instead, we remove the buffer
  1716. * now. should there be subsequent PMU overflow originally
  1717. * meant for sampling, the will be converted to spurious
  1718. * and that's fine because the monitoring tools is gone anyway.
  1719. */
  1720. if (ctx->ctx_smpl_hdr) {
  1721. smpl_buf_addr = ctx->ctx_smpl_hdr;
  1722. smpl_buf_size = ctx->ctx_smpl_size;
  1723. /* no more sampling */
  1724. ctx->ctx_smpl_hdr = NULL;
  1725. ctx->ctx_fl_is_sampling = 0;
  1726. }
  1727. DPRINT(("ctx_state=%d free_possible=%d addr=%p size=%lu\n",
  1728. state,
  1729. free_possible,
  1730. smpl_buf_addr,
  1731. smpl_buf_size));
  1732. if (smpl_buf_addr) pfm_exit_smpl_buffer(ctx->ctx_buf_fmt);
  1733. /*
  1734. * UNLOADED that the session has already been unreserved.
  1735. */
  1736. if (state == PFM_CTX_ZOMBIE) {
  1737. pfm_unreserve_session(ctx, ctx->ctx_fl_system , ctx->ctx_cpu);
  1738. }
  1739. /*
  1740. * disconnect file descriptor from context must be done
  1741. * before we unlock.
  1742. */
  1743. filp->private_data = NULL;
  1744. /*
  1745. * if we free on the spot, the context is now completely unreacheable
  1746. * from the callers side. The monitored task side is also cut, so we
  1747. * can freely cut.
  1748. *
  1749. * If we have a deferred free, only the caller side is disconnected.
  1750. */
  1751. UNPROTECT_CTX(ctx, flags);
  1752. /*
  1753. * All memory free operations (especially for vmalloc'ed memory)
  1754. * MUST be done with interrupts ENABLED.
  1755. */
  1756. if (smpl_buf_addr) pfm_rvfree(smpl_buf_addr, smpl_buf_size);
  1757. /*
  1758. * return the memory used by the context
  1759. */
  1760. if (free_possible) pfm_context_free(ctx);
  1761. return 0;
  1762. }
  1763. static int
  1764. pfm_no_open(struct inode *irrelevant, struct file *dontcare)
  1765. {
  1766. DPRINT(("pfm_no_open called\n"));
  1767. return -ENXIO;
  1768. }
  1769. static struct file_operations pfm_file_ops = {
  1770. .llseek = no_llseek,
  1771. .read = pfm_read,
  1772. .write = pfm_write,
  1773. .poll = pfm_poll,
  1774. .ioctl = pfm_ioctl,
  1775. .open = pfm_no_open, /* special open code to disallow open via /proc */
  1776. .fasync = pfm_fasync,
  1777. .release = pfm_close,
  1778. .flush = pfm_flush
  1779. };
  1780. static int
  1781. pfmfs_delete_dentry(struct dentry *dentry)
  1782. {
  1783. return 1;
  1784. }
  1785. static struct dentry_operations pfmfs_dentry_operations = {
  1786. .d_delete = pfmfs_delete_dentry,
  1787. };
  1788. static int
  1789. pfm_alloc_fd(struct file **cfile)
  1790. {
  1791. int fd, ret = 0;
  1792. struct file *file = NULL;
  1793. struct inode * inode;
  1794. char name[32];
  1795. struct qstr this;
  1796. fd = get_unused_fd();
  1797. if (fd < 0) return -ENFILE;
  1798. ret = -ENFILE;
  1799. file = get_empty_filp();
  1800. if (!file) goto out;
  1801. /*
  1802. * allocate a new inode
  1803. */
  1804. inode = new_inode(pfmfs_mnt->mnt_sb);
  1805. if (!inode) goto out;
  1806. DPRINT(("new inode ino=%ld @%p\n", inode->i_ino, inode));
  1807. inode->i_mode = S_IFCHR|S_IRUGO;
  1808. inode->i_uid = current->fsuid;
  1809. inode->i_gid = current->fsgid;
  1810. sprintf(name, "[%lu]", inode->i_ino);
  1811. this.name = name;
  1812. this.len = strlen(name);
  1813. this.hash = inode->i_ino;
  1814. ret = -ENOMEM;
  1815. /*
  1816. * allocate a new dcache entry
  1817. */
  1818. file->f_dentry = d_alloc(pfmfs_mnt->mnt_sb->s_root, &this);
  1819. if (!file->f_dentry) goto out;
  1820. file->f_dentry->d_op = &pfmfs_dentry_operations;
  1821. d_add(file->f_dentry, inode);
  1822. file->f_vfsmnt = mntget(pfmfs_mnt);
  1823. file->f_mapping = inode->i_mapping;
  1824. file->f_op = &pfm_file_ops;
  1825. file->f_mode = FMODE_READ;
  1826. file->f_flags = O_RDONLY;
  1827. file->f_pos = 0;
  1828. /*
  1829. * may have to delay until context is attached?
  1830. */
  1831. fd_install(fd, file);
  1832. /*
  1833. * the file structure we will use
  1834. */
  1835. *cfile = file;
  1836. return fd;
  1837. out:
  1838. if (file) put_filp(file);
  1839. put_unused_fd(fd);
  1840. return ret;
  1841. }
  1842. static void
  1843. pfm_free_fd(int fd, struct file *file)
  1844. {
  1845. struct files_struct *files = current->files;
  1846. /*
  1847. * there ie no fd_uninstall(), so we do it here
  1848. */
  1849. spin_lock(&files->file_lock);
  1850. files->fd[fd] = NULL;
  1851. spin_unlock(&files->file_lock);
  1852. if (file) put_filp(file);
  1853. put_unused_fd(fd);
  1854. }
  1855. static int
  1856. pfm_remap_buffer(struct vm_area_struct *vma, unsigned long buf, unsigned long addr, unsigned long size)
  1857. {
  1858. DPRINT(("CPU%d buf=0x%lx addr=0x%lx size=%ld\n", smp_processor_id(), buf, addr, size));
  1859. while (size > 0) {
  1860. unsigned long pfn = ia64_tpa(buf) >> PAGE_SHIFT;
  1861. if (remap_pfn_range(vma, addr, pfn, PAGE_SIZE, PAGE_READONLY))
  1862. return -ENOMEM;
  1863. addr += PAGE_SIZE;
  1864. buf += PAGE_SIZE;
  1865. size -= PAGE_SIZE;
  1866. }
  1867. return 0;
  1868. }
  1869. /*
  1870. * allocate a sampling buffer and remaps it into the user address space of the task
  1871. */
  1872. static int
  1873. pfm_smpl_buffer_alloc(struct task_struct *task, pfm_context_t *ctx, unsigned long rsize, void **user_vaddr)
  1874. {
  1875. struct mm_struct *mm = task->mm;
  1876. struct vm_area_struct *vma = NULL;
  1877. unsigned long size;
  1878. void *smpl_buf;
  1879. /*
  1880. * the fixed header + requested size and align to page boundary
  1881. */
  1882. size = PAGE_ALIGN(rsize);
  1883. DPRINT(("sampling buffer rsize=%lu size=%lu bytes\n", rsize, size));
  1884. /*
  1885. * check requested size to avoid Denial-of-service attacks
  1886. * XXX: may have to refine this test
  1887. * Check against address space limit.
  1888. *
  1889. * if ((mm->total_vm << PAGE_SHIFT) + len> task->rlim[RLIMIT_AS].rlim_cur)
  1890. * return -ENOMEM;
  1891. */
  1892. if (size > task->signal->rlim[RLIMIT_MEMLOCK].rlim_cur)
  1893. return -ENOMEM;
  1894. /*
  1895. * We do the easy to undo allocations first.
  1896. *
  1897. * pfm_rvmalloc(), clears the buffer, so there is no leak
  1898. */
  1899. smpl_buf = pfm_rvmalloc(size);
  1900. if (smpl_buf == NULL) {
  1901. DPRINT(("Can't allocate sampling buffer\n"));
  1902. return -ENOMEM;
  1903. }
  1904. DPRINT(("smpl_buf @%p\n", smpl_buf));
  1905. /* allocate vma */
  1906. vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
  1907. if (!vma) {
  1908. DPRINT(("Cannot allocate vma\n"));
  1909. goto error_kmem;
  1910. }
  1911. memset(vma, 0, sizeof(*vma));
  1912. /*
  1913. * partially initialize the vma for the sampling buffer
  1914. */
  1915. vma->vm_mm = mm;
  1916. vma->vm_flags = VM_READ| VM_MAYREAD |VM_RESERVED;
  1917. vma->vm_page_prot = PAGE_READONLY; /* XXX may need to change */
  1918. /*
  1919. * Now we have everything we need and we can initialize
  1920. * and connect all the data structures
  1921. */
  1922. ctx->ctx_smpl_hdr = smpl_buf;
  1923. ctx->ctx_smpl_size = size; /* aligned size */
  1924. /*
  1925. * Let's do the difficult operations next.
  1926. *
  1927. * now we atomically find some area in the address space and
  1928. * remap the buffer in it.
  1929. */
  1930. down_write(&task->mm->mmap_sem);
  1931. /* find some free area in address space, must have mmap sem held */
  1932. vma->vm_start = pfm_get_unmapped_area(NULL, 0, size, 0, MAP_PRIVATE|MAP_ANONYMOUS, 0);
  1933. if (vma->vm_start == 0UL) {
  1934. DPRINT(("Cannot find unmapped area for size %ld\n", size));
  1935. up_write(&task->mm->mmap_sem);
  1936. goto error;
  1937. }
  1938. vma->vm_end = vma->vm_start + size;
  1939. vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT;
  1940. DPRINT(("aligned size=%ld, hdr=%p mapped @0x%lx\n", size, ctx->ctx_smpl_hdr, vma->vm_start));
  1941. /* can only be applied to current task, need to have the mm semaphore held when called */
  1942. if (pfm_remap_buffer(vma, (unsigned long)smpl_buf, vma->vm_start, size)) {
  1943. DPRINT(("Can't remap buffer\n"));
  1944. up_write(&task->mm->mmap_sem);
  1945. goto error;
  1946. }
  1947. /*
  1948. * now insert the vma in the vm list for the process, must be
  1949. * done with mmap lock held
  1950. */
  1951. insert_vm_struct(mm, vma);
  1952. mm->total_vm += size >> PAGE_SHIFT;
  1953. vm_stat_account(vma);
  1954. up_write(&task->mm->mmap_sem);
  1955. /*
  1956. * keep track of user level virtual address
  1957. */
  1958. ctx->ctx_smpl_vaddr = (void *)vma->vm_start;
  1959. *(unsigned long *)user_vaddr = vma->vm_start;
  1960. return 0;
  1961. error:
  1962. kmem_cache_free(vm_area_cachep, vma);
  1963. error_kmem:
  1964. pfm_rvfree(smpl_buf, size);
  1965. return -ENOMEM;
  1966. }
  1967. /*
  1968. * XXX: do something better here
  1969. */
  1970. static int
  1971. pfm_bad_permissions(struct task_struct *task)
  1972. {
  1973. /* inspired by ptrace_attach() */
  1974. DPRINT(("cur: uid=%d gid=%d task: euid=%d suid=%d uid=%d egid=%d sgid=%d\n",
  1975. current->uid,
  1976. current->gid,
  1977. task->euid,
  1978. task->suid,
  1979. task->uid,
  1980. task->egid,
  1981. task->sgid));
  1982. return ((current->uid != task->euid)
  1983. || (current->uid != task->suid)
  1984. || (current->uid != task->uid)
  1985. || (current->gid != task->egid)
  1986. || (current->gid != task->sgid)
  1987. || (current->gid != task->gid)) && !capable(CAP_SYS_PTRACE);
  1988. }
  1989. static int
  1990. pfarg_is_sane(struct task_struct *task, pfarg_context_t *pfx)
  1991. {
  1992. int ctx_flags;
  1993. /* valid signal */
  1994. ctx_flags = pfx->ctx_flags;
  1995. if (ctx_flags & PFM_FL_SYSTEM_WIDE) {
  1996. /*
  1997. * cannot block in this mode
  1998. */
  1999. if (ctx_flags & PFM_FL_NOTIFY_BLOCK) {
  2000. DPRINT(("cannot use blocking mode when in system wide monitoring\n"));
  2001. return -EINVAL;
  2002. }
  2003. } else {
  2004. }
  2005. /* probably more to add here */
  2006. return 0;
  2007. }
  2008. static int
  2009. pfm_setup_buffer_fmt(struct task_struct *task, pfm_context_t *ctx, unsigned int ctx_flags,
  2010. unsigned int cpu, pfarg_context_t *arg)
  2011. {
  2012. pfm_buffer_fmt_t *fmt = NULL;
  2013. unsigned long size = 0UL;
  2014. void *uaddr = NULL;
  2015. void *fmt_arg = NULL;
  2016. int ret = 0;
  2017. #define PFM_CTXARG_BUF_ARG(a) (pfm_buffer_fmt_t *)(a+1)
  2018. /* invoke and lock buffer format, if found */
  2019. fmt = pfm_find_buffer_fmt(arg->ctx_smpl_buf_id);
  2020. if (fmt == NULL) {
  2021. DPRINT(("[%d] cannot find buffer format\n", task->pid));
  2022. return -EINVAL;
  2023. }
  2024. /*
  2025. * buffer argument MUST be contiguous to pfarg_context_t
  2026. */
  2027. if (fmt->fmt_arg_size) fmt_arg = PFM_CTXARG_BUF_ARG(arg);
  2028. ret = pfm_buf_fmt_validate(fmt, task, ctx_flags, cpu, fmt_arg);
  2029. DPRINT(("[%d] after validate(0x%x,%d,%p)=%d\n", task->pid, ctx_flags, cpu, fmt_arg, ret));
  2030. if (ret) goto error;
  2031. /* link buffer format and context */
  2032. ctx->ctx_buf_fmt = fmt;
  2033. /*
  2034. * check if buffer format wants to use perfmon buffer allocation/mapping service
  2035. */
  2036. ret = pfm_buf_fmt_getsize(fmt, task, ctx_flags, cpu, fmt_arg, &size);
  2037. if (ret) goto error;
  2038. if (size) {
  2039. /*
  2040. * buffer is always remapped into the caller's address space
  2041. */
  2042. ret = pfm_smpl_buffer_alloc(current, ctx, size, &uaddr);
  2043. if (ret) goto error;
  2044. /* keep track of user address of buffer */
  2045. arg->ctx_smpl_vaddr = uaddr;
  2046. }
  2047. ret = pfm_buf_fmt_init(fmt, task, ctx->ctx_smpl_hdr, ctx_flags, cpu, fmt_arg);
  2048. error:
  2049. return ret;
  2050. }
  2051. static void
  2052. pfm_reset_pmu_state(pfm_context_t *ctx)
  2053. {
  2054. int i;
  2055. /*
  2056. * install reset values for PMC.
  2057. */
  2058. for (i=1; PMC_IS_LAST(i) == 0; i++) {
  2059. if (PMC_IS_IMPL(i) == 0) continue;
  2060. ctx->ctx_pmcs[i] = PMC_DFL_VAL(i);
  2061. DPRINT(("pmc[%d]=0x%lx\n", i, ctx->ctx_pmcs[i]));
  2062. }
  2063. /*
  2064. * PMD registers are set to 0UL when the context in memset()
  2065. */
  2066. /*
  2067. * On context switched restore, we must restore ALL pmc and ALL pmd even
  2068. * when they are not actively used by the task. In UP, the incoming process
  2069. * may otherwise pick up left over PMC, PMD state from the previous process.
  2070. * As opposed to PMD, stale PMC can cause harm to the incoming
  2071. * process because they may change what is being measured.
  2072. * Therefore, we must systematically reinstall the entire
  2073. * PMC state. In SMP, the same thing is possible on the
  2074. * same CPU but also on between 2 CPUs.
  2075. *
  2076. * The problem with PMD is information leaking especially
  2077. * to user level when psr.sp=0
  2078. *
  2079. * There is unfortunately no easy way to avoid this problem
  2080. * on either UP or SMP. This definitively slows down the
  2081. * pfm_load_regs() function.
  2082. */
  2083. /*
  2084. * bitmask of all PMCs accessible to this context
  2085. *
  2086. * PMC0 is treated differently.
  2087. */
  2088. ctx->ctx_all_pmcs[0] = pmu_conf->impl_pmcs[0] & ~0x1;
  2089. /*
  2090. * bitmask of all PMDs that are accesible to this context
  2091. */
  2092. ctx->ctx_all_pmds[0] = pmu_conf->impl_pmds[0];
  2093. DPRINT(("<%d> all_pmcs=0x%lx all_pmds=0x%lx\n", ctx->ctx_fd, ctx->ctx_all_pmcs[0],ctx->ctx_all_pmds[0]));
  2094. /*
  2095. * useful in case of re-enable after disable
  2096. */
  2097. ctx->ctx_used_ibrs[0] = 0UL;
  2098. ctx->ctx_used_dbrs[0] = 0UL;
  2099. }
  2100. static int
  2101. pfm_ctx_getsize(void *arg, size_t *sz)
  2102. {
  2103. pfarg_context_t *req = (pfarg_context_t *)arg;
  2104. pfm_buffer_fmt_t *fmt;
  2105. *sz = 0;
  2106. if (!pfm_uuid_cmp(req->ctx_smpl_buf_id, pfm_null_uuid)) return 0;
  2107. fmt = pfm_find_buffer_fmt(req->ctx_smpl_buf_id);
  2108. if (fmt == NULL) {
  2109. DPRINT(("cannot find buffer format\n"));
  2110. return -EINVAL;
  2111. }
  2112. /* get just enough to copy in user parameters */
  2113. *sz = fmt->fmt_arg_size;
  2114. DPRINT(("arg_size=%lu\n", *sz));
  2115. return 0;
  2116. }
  2117. /*
  2118. * cannot attach if :
  2119. * - kernel task
  2120. * - task not owned by caller
  2121. * - task incompatible with context mode
  2122. */
  2123. static int
  2124. pfm_task_incompatible(pfm_context_t *ctx, struct task_struct *task)
  2125. {
  2126. /*
  2127. * no kernel task or task not owner by caller
  2128. */
  2129. if (task->mm == NULL) {
  2130. DPRINT(("task [%d] has not memory context (kernel thread)\n", task->pid));
  2131. return -EPERM;
  2132. }
  2133. if (pfm_bad_permissions(task)) {
  2134. DPRINT(("no permission to attach to [%d]\n", task->pid));
  2135. return -EPERM;
  2136. }
  2137. /*
  2138. * cannot block in self-monitoring mode
  2139. */
  2140. if (CTX_OVFL_NOBLOCK(ctx) == 0 && task == current) {
  2141. DPRINT(("cannot load a blocking context on self for [%d]\n", task->pid));
  2142. return -EINVAL;
  2143. }
  2144. if (task->exit_state == EXIT_ZOMBIE) {
  2145. DPRINT(("cannot attach to zombie task [%d]\n", task->pid));
  2146. return -EBUSY;
  2147. }
  2148. /*
  2149. * always ok for self
  2150. */
  2151. if (task == current) return 0;
  2152. if ((task->state != TASK_STOPPED) && (task->state != TASK_TRACED)) {
  2153. DPRINT(("cannot attach to non-stopped task [%d] state=%ld\n", task->pid, task->state));
  2154. return -EBUSY;
  2155. }
  2156. /*
  2157. * make sure the task is off any CPU
  2158. */
  2159. wait_task_inactive(task);
  2160. /* more to come... */
  2161. return 0;
  2162. }
  2163. static int
  2164. pfm_get_task(pfm_context_t *ctx, pid_t pid, struct task_struct **task)
  2165. {
  2166. struct task_struct *p = current;
  2167. int ret;
  2168. /* XXX: need to add more checks here */
  2169. if (pid < 2) return -EPERM;
  2170. if (pid != current->pid) {
  2171. read_lock(&tasklist_lock);
  2172. p = find_task_by_pid(pid);
  2173. /* make sure task cannot go away while we operate on it */
  2174. if (p) get_task_struct(p);
  2175. read_unlock(&tasklist_lock);
  2176. if (p == NULL) return -ESRCH;
  2177. }
  2178. ret = pfm_task_incompatible(ctx, p);
  2179. if (ret == 0) {
  2180. *task = p;
  2181. } else if (p != current) {
  2182. pfm_put_task(p);
  2183. }
  2184. return ret;
  2185. }
  2186. static int
  2187. pfm_context_create(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
  2188. {
  2189. pfarg_context_t *req = (pfarg_context_t *)arg;
  2190. struct file *filp;
  2191. int ctx_flags;
  2192. int ret;
  2193. /* let's check the arguments first */
  2194. ret = pfarg_is_sane(current, req);
  2195. if (ret < 0) return ret;
  2196. ctx_flags = req->ctx_flags;
  2197. ret = -ENOMEM;
  2198. ctx = pfm_context_alloc();
  2199. if (!ctx) goto error;
  2200. ret = pfm_alloc_fd(&filp);
  2201. if (ret < 0) goto error_file;
  2202. req->ctx_fd = ctx->ctx_fd = ret;
  2203. /*
  2204. * attach context to file
  2205. */
  2206. filp->private_data = ctx;
  2207. /*
  2208. * does the user want to sample?
  2209. */
  2210. if (pfm_uuid_cmp(req->ctx_smpl_buf_id, pfm_null_uuid)) {
  2211. ret = pfm_setup_buffer_fmt(current, ctx, ctx_flags, 0, req);
  2212. if (ret) goto buffer_error;
  2213. }
  2214. /*
  2215. * init context protection lock
  2216. */
  2217. spin_lock_init(&ctx->ctx_lock);
  2218. /*
  2219. * context is unloaded
  2220. */
  2221. ctx->ctx_state = PFM_CTX_UNLOADED;
  2222. /*
  2223. * initialization of context's flags
  2224. */
  2225. ctx->ctx_fl_block = (ctx_flags & PFM_FL_NOTIFY_BLOCK) ? 1 : 0;
  2226. ctx->ctx_fl_system = (ctx_flags & PFM_FL_SYSTEM_WIDE) ? 1: 0;
  2227. ctx->ctx_fl_is_sampling = ctx->ctx_buf_fmt ? 1 : 0; /* assume record() is defined */
  2228. ctx->ctx_fl_no_msg = (ctx_flags & PFM_FL_OVFL_NO_MSG) ? 1: 0;
  2229. /*
  2230. * will move to set properties
  2231. * ctx->ctx_fl_excl_idle = (ctx_flags & PFM_FL_EXCL_IDLE) ? 1: 0;
  2232. */
  2233. /*
  2234. * init restart semaphore to locked
  2235. */
  2236. sema_init(&ctx->ctx_restart_sem, 0);
  2237. /*
  2238. * activation is used in SMP only
  2239. */
  2240. ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;
  2241. SET_LAST_CPU(ctx, -1);
  2242. /*
  2243. * initialize notification message queue
  2244. */
  2245. ctx->ctx_msgq_head = ctx->ctx_msgq_tail = 0;
  2246. init_waitqueue_head(&ctx->ctx_msgq_wait);
  2247. init_waitqueue_head(&ctx->ctx_zombieq);
  2248. DPRINT(("ctx=%p flags=0x%x system=%d notify_block=%d excl_idle=%d no_msg=%d ctx_fd=%d \n",
  2249. ctx,
  2250. ctx_flags,
  2251. ctx->ctx_fl_system,
  2252. ctx->ctx_fl_block,
  2253. ctx->ctx_fl_excl_idle,
  2254. ctx->ctx_fl_no_msg,
  2255. ctx->ctx_fd));
  2256. /*
  2257. * initialize soft PMU state
  2258. */
  2259. pfm_reset_pmu_state(ctx);
  2260. return 0;
  2261. buffer_error:
  2262. pfm_free_fd(ctx->ctx_fd, filp);
  2263. if (ctx->ctx_buf_fmt) {
  2264. pfm_buf_fmt_exit(ctx->ctx_buf_fmt, current, NULL, regs);
  2265. }
  2266. error_file:
  2267. pfm_context_free(ctx);
  2268. error:
  2269. return ret;
  2270. }
  2271. static inline unsigned long
  2272. pfm_new_counter_value (pfm_counter_t *reg, int is_long_reset)
  2273. {
  2274. unsigned long val = is_long_reset ? reg->long_reset : reg->short_reset;
  2275. unsigned long new_seed, old_seed = reg->seed, mask = reg->mask;
  2276. extern unsigned long carta_random32 (unsigned long seed);
  2277. if (reg->flags & PFM_REGFL_RANDOM) {
  2278. new_seed = carta_random32(old_seed);
  2279. val -= (old_seed & mask); /* counter values are negative numbers! */
  2280. if ((mask >> 32) != 0)
  2281. /* construct a full 64-bit random value: */
  2282. new_seed |= carta_random32(old_seed >> 32) << 32;
  2283. reg->seed = new_seed;
  2284. }
  2285. reg->lval = val;
  2286. return val;
  2287. }
  2288. static void
  2289. pfm_reset_regs_masked(pfm_context_t *ctx, unsigned long *ovfl_regs, int is_long_reset)
  2290. {
  2291. unsigned long mask = ovfl_regs[0];
  2292. unsigned long reset_others = 0UL;
  2293. unsigned long val;
  2294. int i;
  2295. /*
  2296. * now restore reset value on sampling overflowed counters
  2297. */
  2298. mask >>= PMU_FIRST_COUNTER;
  2299. for(i = PMU_FIRST_COUNTER; mask; i++, mask >>= 1) {
  2300. if ((mask & 0x1UL) == 0UL) continue;
  2301. ctx->ctx_pmds[i].val = val = pfm_new_counter_value(ctx->ctx_pmds+ i, is_long_reset);
  2302. reset_others |= ctx->ctx_pmds[i].reset_pmds[0];
  2303. DPRINT_ovfl((" %s reset ctx_pmds[%d]=%lx\n", is_long_reset ? "long" : "short", i, val));
  2304. }
  2305. /*
  2306. * Now take care of resetting the other registers
  2307. */
  2308. for(i = 0; reset_others; i++, reset_others >>= 1) {
  2309. if ((reset_others & 0x1) == 0) continue;
  2310. ctx->ctx_pmds[i].val = val = pfm_new_counter_value(ctx->ctx_pmds + i, is_long_reset);
  2311. DPRINT_ovfl(("%s reset_others pmd[%d]=%lx\n",
  2312. is_long_reset ? "long" : "short", i, val));
  2313. }
  2314. }
  2315. static void
  2316. pfm_reset_regs(pfm_context_t *ctx, unsigned long *ovfl_regs, int is_long_reset)
  2317. {
  2318. unsigned long mask = ovfl_regs[0];
  2319. unsigned long reset_others = 0UL;
  2320. unsigned long val;
  2321. int i;
  2322. DPRINT_ovfl(("ovfl_regs=0x%lx is_long_reset=%d\n", ovfl_regs[0], is_long_reset));
  2323. if (ctx->ctx_state == PFM_CTX_MASKED) {
  2324. pfm_reset_regs_masked(ctx, ovfl_regs, is_long_reset);
  2325. return;
  2326. }
  2327. /*
  2328. * now restore reset value on sampling overflowed counters
  2329. */
  2330. mask >>= PMU_FIRST_COUNTER;
  2331. for(i = PMU_FIRST_COUNTER; mask; i++, mask >>= 1) {
  2332. if ((mask & 0x1UL) == 0UL) continue;
  2333. val = pfm_new_counter_value(ctx->ctx_pmds+ i, is_long_reset);
  2334. reset_others |= ctx->ctx_pmds[i].reset_pmds[0];
  2335. DPRINT_ovfl((" %s reset ctx_pmds[%d]=%lx\n", is_long_reset ? "long" : "short", i, val));
  2336. pfm_write_soft_counter(ctx, i, val);
  2337. }
  2338. /*
  2339. * Now take care of resetting the other registers
  2340. */
  2341. for(i = 0; reset_others; i++, reset_others >>= 1) {
  2342. if ((reset_others & 0x1) == 0) continue;
  2343. val = pfm_new_counter_value(ctx->ctx_pmds + i, is_long_reset);
  2344. if (PMD_IS_COUNTING(i)) {
  2345. pfm_write_soft_counter(ctx, i, val);
  2346. } else {
  2347. ia64_set_pmd(i, val);
  2348. }
  2349. DPRINT_ovfl(("%s reset_others pmd[%d]=%lx\n",
  2350. is_long_reset ? "long" : "short", i, val));
  2351. }
  2352. ia64_srlz_d();
  2353. }
  2354. static int
  2355. pfm_write_pmcs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
  2356. {
  2357. struct thread_struct *thread = NULL;
  2358. struct task_struct *task;
  2359. pfarg_reg_t *req = (pfarg_reg_t *)arg;
  2360. unsigned long value, pmc_pm;
  2361. unsigned long smpl_pmds, reset_pmds, impl_pmds;
  2362. unsigned int cnum, reg_flags, flags, pmc_type;
  2363. int i, can_access_pmu = 0, is_loaded, is_system, expert_mode;
  2364. int is_monitor, is_counting, state;
  2365. int ret = -EINVAL;
  2366. pfm_reg_check_t wr_func;
  2367. #define PFM_CHECK_PMC_PM(x, y, z) ((x)->ctx_fl_system ^ PMC_PM(y, z))
  2368. state = ctx->ctx_state;
  2369. is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
  2370. is_system = ctx->ctx_fl_system;
  2371. task = ctx->ctx_task;
  2372. impl_pmds = pmu_conf->impl_pmds[0];
  2373. if (state == PFM_CTX_ZOMBIE) return -EINVAL;
  2374. if (is_loaded) {
  2375. thread = &task->thread;
  2376. /*
  2377. * In system wide and when the context is loaded, access can only happen
  2378. * when the caller is running on the CPU being monitored by the session.
  2379. * It does not have to be the owner (ctx_task) of the context per se.
  2380. */
  2381. if (is_system && ctx->ctx_cpu != smp_processor_id()) {
  2382. DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
  2383. return -EBUSY;
  2384. }
  2385. can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
  2386. }
  2387. expert_mode = pfm_sysctl.expert_mode;
  2388. for (i = 0; i < count; i++, req++) {
  2389. cnum = req->reg_num;
  2390. reg_flags = req->reg_flags;
  2391. value = req->reg_value;
  2392. smpl_pmds = req->reg_smpl_pmds[0];
  2393. reset_pmds = req->reg_reset_pmds[0];
  2394. flags = 0;
  2395. if (cnum >= PMU_MAX_PMCS) {
  2396. DPRINT(("pmc%u is invalid\n", cnum));
  2397. goto error;
  2398. }
  2399. pmc_type = pmu_conf->pmc_desc[cnum].type;
  2400. pmc_pm = (value >> pmu_conf->pmc_desc[cnum].pm_pos) & 0x1;
  2401. is_counting = (pmc_type & PFM_REG_COUNTING) == PFM_REG_COUNTING ? 1 : 0;
  2402. is_monitor = (pmc_type & PFM_REG_MONITOR) == PFM_REG_MONITOR ? 1 : 0;
  2403. /*
  2404. * we reject all non implemented PMC as well
  2405. * as attempts to modify PMC[0-3] which are used
  2406. * as status registers by the PMU
  2407. */
  2408. if ((pmc_type & PFM_REG_IMPL) == 0 || (pmc_type & PFM_REG_CONTROL) == PFM_REG_CONTROL) {
  2409. DPRINT(("pmc%u is unimplemented or no-access pmc_type=%x\n", cnum, pmc_type));
  2410. goto error;
  2411. }
  2412. wr_func = pmu_conf->pmc_desc[cnum].write_check;
  2413. /*
  2414. * If the PMC is a monitor, then if the value is not the default:
  2415. * - system-wide session: PMCx.pm=1 (privileged monitor)
  2416. * - per-task : PMCx.pm=0 (user monitor)
  2417. */
  2418. if (is_monitor && value != PMC_DFL_VAL(cnum) && is_system ^ pmc_pm) {
  2419. DPRINT(("pmc%u pmc_pm=%lu is_system=%d\n",
  2420. cnum,
  2421. pmc_pm,
  2422. is_system));
  2423. goto error;
  2424. }
  2425. if (is_counting) {
  2426. /*
  2427. * enforce generation of overflow interrupt. Necessary on all
  2428. * CPUs.
  2429. */
  2430. value |= 1 << PMU_PMC_OI;
  2431. if (reg_flags & PFM_REGFL_OVFL_NOTIFY) {
  2432. flags |= PFM_REGFL_OVFL_NOTIFY;
  2433. }
  2434. if (reg_flags & PFM_REGFL_RANDOM) flags |= PFM_REGFL_RANDOM;
  2435. /* verify validity of smpl_pmds */
  2436. if ((smpl_pmds & impl_pmds) != smpl_pmds) {
  2437. DPRINT(("invalid smpl_pmds 0x%lx for pmc%u\n", smpl_pmds, cnum));
  2438. goto error;
  2439. }
  2440. /* verify validity of reset_pmds */
  2441. if ((reset_pmds & impl_pmds) != reset_pmds) {
  2442. DPRINT(("invalid reset_pmds 0x%lx for pmc%u\n", reset_pmds, cnum));
  2443. goto error;
  2444. }
  2445. } else {
  2446. if (reg_flags & (PFM_REGFL_OVFL_NOTIFY|PFM_REGFL_RANDOM)) {
  2447. DPRINT(("cannot set ovfl_notify or random on pmc%u\n", cnum));
  2448. goto error;
  2449. }
  2450. /* eventid on non-counting monitors are ignored */
  2451. }
  2452. /*
  2453. * execute write checker, if any
  2454. */
  2455. if (likely(expert_mode == 0 && wr_func)) {
  2456. ret = (*wr_func)(task, ctx, cnum, &value, regs);
  2457. if (ret) goto error;
  2458. ret = -EINVAL;
  2459. }
  2460. /*
  2461. * no error on this register
  2462. */
  2463. PFM_REG_RETFLAG_SET(req->reg_flags, 0);
  2464. /*
  2465. * Now we commit the changes to the software state
  2466. */
  2467. /*
  2468. * update overflow information
  2469. */
  2470. if (is_counting) {
  2471. /*
  2472. * full flag update each time a register is programmed
  2473. */
  2474. ctx->ctx_pmds[cnum].flags = flags;
  2475. ctx->ctx_pmds[cnum].reset_pmds[0] = reset_pmds;
  2476. ctx->ctx_pmds[cnum].smpl_pmds[0] = smpl_pmds;
  2477. ctx->ctx_pmds[cnum].eventid = req->reg_smpl_eventid;
  2478. /*
  2479. * Mark all PMDS to be accessed as used.
  2480. *
  2481. * We do not keep track of PMC because we have to
  2482. * systematically restore ALL of them.
  2483. *
  2484. * We do not update the used_monitors mask, because
  2485. * if we have not programmed them, then will be in
  2486. * a quiescent state, therefore we will not need to
  2487. * mask/restore then when context is MASKED.
  2488. */
  2489. CTX_USED_PMD(ctx, reset_pmds);
  2490. CTX_USED_PMD(ctx, smpl_pmds);
  2491. /*
  2492. * make sure we do not try to reset on
  2493. * restart because we have established new values
  2494. */
  2495. if (state == PFM_CTX_MASKED) ctx->ctx_ovfl_regs[0] &= ~1UL << cnum;
  2496. }
  2497. /*
  2498. * Needed in case the user does not initialize the equivalent
  2499. * PMD. Clearing is done indirectly via pfm_reset_pmu_state() so there is no
  2500. * possible leak here.
  2501. */
  2502. CTX_USED_PMD(ctx, pmu_conf->pmc_desc[cnum].dep_pmd[0]);
  2503. /*
  2504. * keep track of the monitor PMC that we are using.
  2505. * we save the value of the pmc in ctx_pmcs[] and if
  2506. * the monitoring is not stopped for the context we also
  2507. * place it in the saved state area so that it will be
  2508. * picked up later by the context switch code.
  2509. *
  2510. * The value in ctx_pmcs[] can only be changed in pfm_write_pmcs().
  2511. *
  2512. * The value in thread->pmcs[] may be modified on overflow, i.e., when
  2513. * monitoring needs to be stopped.
  2514. */
  2515. if (is_monitor) CTX_USED_MONITOR(ctx, 1UL << cnum);
  2516. /*
  2517. * update context state
  2518. */
  2519. ctx->ctx_pmcs[cnum] = value;
  2520. if (is_loaded) {
  2521. /*
  2522. * write thread state
  2523. */
  2524. if (is_system == 0) thread->pmcs[cnum] = value;
  2525. /*
  2526. * write hardware register if we can
  2527. */
  2528. if (can_access_pmu) {
  2529. ia64_set_pmc(cnum, value);
  2530. }
  2531. #ifdef CONFIG_SMP
  2532. else {
  2533. /*
  2534. * per-task SMP only here
  2535. *
  2536. * we are guaranteed that the task is not running on the other CPU,
  2537. * we indicate that this PMD will need to be reloaded if the task
  2538. * is rescheduled on the CPU it ran last on.
  2539. */
  2540. ctx->ctx_reload_pmcs[0] |= 1UL << cnum;
  2541. }
  2542. #endif
  2543. }
  2544. DPRINT(("pmc[%u]=0x%lx ld=%d apmu=%d flags=0x%x all_pmcs=0x%lx used_pmds=0x%lx eventid=%ld smpl_pmds=0x%lx reset_pmds=0x%lx reloads_pmcs=0x%lx used_monitors=0x%lx ovfl_regs=0x%lx\n",
  2545. cnum,
  2546. value,
  2547. is_loaded,
  2548. can_access_pmu,
  2549. flags,
  2550. ctx->ctx_all_pmcs[0],
  2551. ctx->ctx_used_pmds[0],
  2552. ctx->ctx_pmds[cnum].eventid,
  2553. smpl_pmds,
  2554. reset_pmds,
  2555. ctx->ctx_reload_pmcs[0],
  2556. ctx->ctx_used_monitors[0],
  2557. ctx->ctx_ovfl_regs[0]));
  2558. }
  2559. /*
  2560. * make sure the changes are visible
  2561. */
  2562. if (can_access_pmu) ia64_srlz_d();
  2563. return 0;
  2564. error:
  2565. PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
  2566. return ret;
  2567. }
  2568. static int
  2569. pfm_write_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
  2570. {
  2571. struct thread_struct *thread = NULL;
  2572. struct task_struct *task;
  2573. pfarg_reg_t *req = (pfarg_reg_t *)arg;
  2574. unsigned long value, hw_value, ovfl_mask;
  2575. unsigned int cnum;
  2576. int i, can_access_pmu = 0, state;
  2577. int is_counting, is_loaded, is_system, expert_mode;
  2578. int ret = -EINVAL;
  2579. pfm_reg_check_t wr_func;
  2580. state = ctx->ctx_state;
  2581. is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
  2582. is_system = ctx->ctx_fl_system;
  2583. ovfl_mask = pmu_conf->ovfl_val;
  2584. task = ctx->ctx_task;
  2585. if (unlikely(state == PFM_CTX_ZOMBIE)) return -EINVAL;
  2586. /*
  2587. * on both UP and SMP, we can only write to the PMC when the task is
  2588. * the owner of the local PMU.
  2589. */
  2590. if (likely(is_loaded)) {
  2591. thread = &task->thread;
  2592. /*
  2593. * In system wide and when the context is loaded, access can only happen
  2594. * when the caller is running on the CPU being monitored by the session.
  2595. * It does not have to be the owner (ctx_task) of the context per se.
  2596. */
  2597. if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) {
  2598. DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
  2599. return -EBUSY;
  2600. }
  2601. can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
  2602. }
  2603. expert_mode = pfm_sysctl.expert_mode;
  2604. for (i = 0; i < count; i++, req++) {
  2605. cnum = req->reg_num;
  2606. value = req->reg_value;
  2607. if (!PMD_IS_IMPL(cnum)) {
  2608. DPRINT(("pmd[%u] is unimplemented or invalid\n", cnum));
  2609. goto abort_mission;
  2610. }
  2611. is_counting = PMD_IS_COUNTING(cnum);
  2612. wr_func = pmu_conf->pmd_desc[cnum].write_check;
  2613. /*
  2614. * execute write checker, if any
  2615. */
  2616. if (unlikely(expert_mode == 0 && wr_func)) {
  2617. unsigned long v = value;
  2618. ret = (*wr_func)(task, ctx, cnum, &v, regs);
  2619. if (ret) goto abort_mission;
  2620. value = v;
  2621. ret = -EINVAL;
  2622. }
  2623. /*
  2624. * no error on this register
  2625. */
  2626. PFM_REG_RETFLAG_SET(req->reg_flags, 0);
  2627. /*
  2628. * now commit changes to software state
  2629. */
  2630. hw_value = value;
  2631. /*
  2632. * update virtualized (64bits) counter
  2633. */
  2634. if (is_counting) {
  2635. /*
  2636. * write context state
  2637. */
  2638. ctx->ctx_pmds[cnum].lval = value;
  2639. /*
  2640. * when context is load we use the split value
  2641. */
  2642. if (is_loaded) {
  2643. hw_value = value & ovfl_mask;
  2644. value = value & ~ovfl_mask;
  2645. }
  2646. }
  2647. /*
  2648. * update reset values (not just for counters)
  2649. */
  2650. ctx->ctx_pmds[cnum].long_reset = req->reg_long_reset;
  2651. ctx->ctx_pmds[cnum].short_reset = req->reg_short_reset;
  2652. /*
  2653. * update randomization parameters (not just for counters)
  2654. */
  2655. ctx->ctx_pmds[cnum].seed = req->reg_random_seed;
  2656. ctx->ctx_pmds[cnum].mask = req->reg_random_mask;
  2657. /*
  2658. * update context value
  2659. */
  2660. ctx->ctx_pmds[cnum].val = value;
  2661. /*
  2662. * Keep track of what we use
  2663. *
  2664. * We do not keep track of PMC because we have to
  2665. * systematically restore ALL of them.
  2666. */
  2667. CTX_USED_PMD(ctx, PMD_PMD_DEP(cnum));
  2668. /*
  2669. * mark this PMD register used as well
  2670. */
  2671. CTX_USED_PMD(ctx, RDEP(cnum));
  2672. /*
  2673. * make sure we do not try to reset on
  2674. * restart because we have established new values
  2675. */
  2676. if (is_counting && state == PFM_CTX_MASKED) {
  2677. ctx->ctx_ovfl_regs[0] &= ~1UL << cnum;
  2678. }
  2679. if (is_loaded) {
  2680. /*
  2681. * write thread state
  2682. */
  2683. if (is_system == 0) thread->pmds[cnum] = hw_value;
  2684. /*
  2685. * write hardware register if we can
  2686. */
  2687. if (can_access_pmu) {
  2688. ia64_set_pmd(cnum, hw_value);
  2689. } else {
  2690. #ifdef CONFIG_SMP
  2691. /*
  2692. * we are guaranteed that the task is not running on the other CPU,
  2693. * we indicate that this PMD will need to be reloaded if the task
  2694. * is rescheduled on the CPU it ran last on.
  2695. */
  2696. ctx->ctx_reload_pmds[0] |= 1UL << cnum;
  2697. #endif
  2698. }
  2699. }
  2700. DPRINT(("pmd[%u]=0x%lx ld=%d apmu=%d, hw_value=0x%lx ctx_pmd=0x%lx short_reset=0x%lx "
  2701. "long_reset=0x%lx notify=%c seed=0x%lx mask=0x%lx used_pmds=0x%lx reset_pmds=0x%lx reload_pmds=0x%lx all_pmds=0x%lx ovfl_regs=0x%lx\n",
  2702. cnum,
  2703. value,
  2704. is_loaded,
  2705. can_access_pmu,
  2706. hw_value,
  2707. ctx->ctx_pmds[cnum].val,
  2708. ctx->ctx_pmds[cnum].short_reset,
  2709. ctx->ctx_pmds[cnum].long_reset,
  2710. PMC_OVFL_NOTIFY(ctx, cnum) ? 'Y':'N',
  2711. ctx->ctx_pmds[cnum].seed,
  2712. ctx->ctx_pmds[cnum].mask,
  2713. ctx->ctx_used_pmds[0],
  2714. ctx->ctx_pmds[cnum].reset_pmds[0],
  2715. ctx->ctx_reload_pmds[0],
  2716. ctx->ctx_all_pmds[0],
  2717. ctx->ctx_ovfl_regs[0]));
  2718. }
  2719. /*
  2720. * make changes visible
  2721. */
  2722. if (can_access_pmu) ia64_srlz_d();
  2723. return 0;
  2724. abort_mission:
  2725. /*
  2726. * for now, we have only one possibility for error
  2727. */
  2728. PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
  2729. return ret;
  2730. }
  2731. /*
  2732. * By the way of PROTECT_CONTEXT(), interrupts are masked while we are in this function.
  2733. * Therefore we know, we do not have to worry about the PMU overflow interrupt. If an
  2734. * interrupt is delivered during the call, it will be kept pending until we leave, making
  2735. * it appears as if it had been generated at the UNPROTECT_CONTEXT(). At least we are
  2736. * guaranteed to return consistent data to the user, it may simply be old. It is not
  2737. * trivial to treat the overflow while inside the call because you may end up in
  2738. * some module sampling buffer code causing deadlocks.
  2739. */
  2740. static int
  2741. pfm_read_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
  2742. {
  2743. struct thread_struct *thread = NULL;
  2744. struct task_struct *task;
  2745. unsigned long val = 0UL, lval, ovfl_mask, sval;
  2746. pfarg_reg_t *req = (pfarg_reg_t *)arg;
  2747. unsigned int cnum, reg_flags = 0;
  2748. int i, can_access_pmu = 0, state;
  2749. int is_loaded, is_system, is_counting, expert_mode;
  2750. int ret = -EINVAL;
  2751. pfm_reg_check_t rd_func;
  2752. /*
  2753. * access is possible when loaded only for
  2754. * self-monitoring tasks or in UP mode
  2755. */
  2756. state = ctx->ctx_state;
  2757. is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
  2758. is_system = ctx->ctx_fl_system;
  2759. ovfl_mask = pmu_conf->ovfl_val;
  2760. task = ctx->ctx_task;
  2761. if (state == PFM_CTX_ZOMBIE) return -EINVAL;
  2762. if (likely(is_loaded)) {
  2763. thread = &task->thread;
  2764. /*
  2765. * In system wide and when the context is loaded, access can only happen
  2766. * when the caller is running on the CPU being monitored by the session.
  2767. * It does not have to be the owner (ctx_task) of the context per se.
  2768. */
  2769. if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) {
  2770. DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
  2771. return -EBUSY;
  2772. }
  2773. /*
  2774. * this can be true when not self-monitoring only in UP
  2775. */
  2776. can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
  2777. if (can_access_pmu) ia64_srlz_d();
  2778. }
  2779. expert_mode = pfm_sysctl.expert_mode;
  2780. DPRINT(("ld=%d apmu=%d ctx_state=%d\n",
  2781. is_loaded,
  2782. can_access_pmu,
  2783. state));
  2784. /*
  2785. * on both UP and SMP, we can only read the PMD from the hardware register when
  2786. * the task is the owner of the local PMU.
  2787. */
  2788. for (i = 0; i < count; i++, req++) {
  2789. cnum = req->reg_num;
  2790. reg_flags = req->reg_flags;
  2791. if (unlikely(!PMD_IS_IMPL(cnum))) goto error;
  2792. /*
  2793. * we can only read the register that we use. That includes
  2794. * the one we explicitely initialize AND the one we want included
  2795. * in the sampling buffer (smpl_regs).
  2796. *
  2797. * Having this restriction allows optimization in the ctxsw routine
  2798. * without compromising security (leaks)
  2799. */
  2800. if (unlikely(!CTX_IS_USED_PMD(ctx, cnum))) goto error;
  2801. sval = ctx->ctx_pmds[cnum].val;
  2802. lval = ctx->ctx_pmds[cnum].lval;
  2803. is_counting = PMD_IS_COUNTING(cnum);
  2804. /*
  2805. * If the task is not the current one, then we check if the
  2806. * PMU state is still in the local live register due to lazy ctxsw.
  2807. * If true, then we read directly from the registers.
  2808. */
  2809. if (can_access_pmu){
  2810. val = ia64_get_pmd(cnum);
  2811. } else {
  2812. /*
  2813. * context has been saved
  2814. * if context is zombie, then task does not exist anymore.
  2815. * In this case, we use the full value saved in the context (pfm_flush_regs()).
  2816. */
  2817. val = is_loaded ? thread->pmds[cnum] : 0UL;
  2818. }
  2819. rd_func = pmu_conf->pmd_desc[cnum].read_check;
  2820. if (is_counting) {
  2821. /*
  2822. * XXX: need to check for overflow when loaded
  2823. */
  2824. val &= ovfl_mask;
  2825. val += sval;
  2826. }
  2827. /*
  2828. * execute read checker, if any
  2829. */
  2830. if (unlikely(expert_mode == 0 && rd_func)) {
  2831. unsigned long v = val;
  2832. ret = (*rd_func)(ctx->ctx_task, ctx, cnum, &v, regs);
  2833. if (ret) goto error;
  2834. val = v;
  2835. ret = -EINVAL;
  2836. }
  2837. PFM_REG_RETFLAG_SET(reg_flags, 0);
  2838. DPRINT(("pmd[%u]=0x%lx\n", cnum, val));
  2839. /*
  2840. * update register return value, abort all if problem during copy.
  2841. * we only modify the reg_flags field. no check mode is fine because
  2842. * access has been verified upfront in sys_perfmonctl().
  2843. */
  2844. req->reg_value = val;
  2845. req->reg_flags = reg_flags;
  2846. req->reg_last_reset_val = lval;
  2847. }
  2848. return 0;
  2849. error:
  2850. PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
  2851. return ret;
  2852. }
  2853. int
  2854. pfm_mod_write_pmcs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
  2855. {
  2856. pfm_context_t *ctx;
  2857. if (req == NULL) return -EINVAL;
  2858. ctx = GET_PMU_CTX();
  2859. if (ctx == NULL) return -EINVAL;
  2860. /*
  2861. * for now limit to current task, which is enough when calling
  2862. * from overflow handler
  2863. */
  2864. if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
  2865. return pfm_write_pmcs(ctx, req, nreq, regs);
  2866. }
  2867. EXPORT_SYMBOL(pfm_mod_write_pmcs);
  2868. int
  2869. pfm_mod_read_pmds(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
  2870. {
  2871. pfm_context_t *ctx;
  2872. if (req == NULL) return -EINVAL;
  2873. ctx = GET_PMU_CTX();
  2874. if (ctx == NULL) return -EINVAL;
  2875. /*
  2876. * for now limit to current task, which is enough when calling
  2877. * from overflow handler
  2878. */
  2879. if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
  2880. return pfm_read_pmds(ctx, req, nreq, regs);
  2881. }
  2882. EXPORT_SYMBOL(pfm_mod_read_pmds);
  2883. /*
  2884. * Only call this function when a process it trying to
  2885. * write the debug registers (reading is always allowed)
  2886. */
  2887. int
  2888. pfm_use_debug_registers(struct task_struct *task)
  2889. {
  2890. pfm_context_t *ctx = task->thread.pfm_context;
  2891. unsigned long flags;
  2892. int ret = 0;
  2893. if (pmu_conf->use_rr_dbregs == 0) return 0;
  2894. DPRINT(("called for [%d]\n", task->pid));
  2895. /*
  2896. * do it only once
  2897. */
  2898. if (task->thread.flags & IA64_THREAD_DBG_VALID) return 0;
  2899. /*
  2900. * Even on SMP, we do not need to use an atomic here because
  2901. * the only way in is via ptrace() and this is possible only when the
  2902. * process is stopped. Even in the case where the ctxsw out is not totally
  2903. * completed by the time we come here, there is no way the 'stopped' process
  2904. * could be in the middle of fiddling with the pfm_write_ibr_dbr() routine.
  2905. * So this is always safe.
  2906. */
  2907. if (ctx && ctx->ctx_fl_using_dbreg == 1) return -1;
  2908. LOCK_PFS(flags);
  2909. /*
  2910. * We cannot allow setting breakpoints when system wide monitoring
  2911. * sessions are using the debug registers.
  2912. */
  2913. if (pfm_sessions.pfs_sys_use_dbregs> 0)
  2914. ret = -1;
  2915. else
  2916. pfm_sessions.pfs_ptrace_use_dbregs++;
  2917. DPRINT(("ptrace_use_dbregs=%u sys_use_dbregs=%u by [%d] ret = %d\n",
  2918. pfm_sessions.pfs_ptrace_use_dbregs,
  2919. pfm_sessions.pfs_sys_use_dbregs,
  2920. task->pid, ret));
  2921. UNLOCK_PFS(flags);
  2922. return ret;
  2923. }
  2924. /*
  2925. * This function is called for every task that exits with the
  2926. * IA64_THREAD_DBG_VALID set. This indicates a task which was
  2927. * able to use the debug registers for debugging purposes via
  2928. * ptrace(). Therefore we know it was not using them for
  2929. * perfmormance monitoring, so we only decrement the number
  2930. * of "ptraced" debug register users to keep the count up to date
  2931. */
  2932. int
  2933. pfm_release_debug_registers(struct task_struct *task)
  2934. {
  2935. unsigned long flags;
  2936. int ret;
  2937. if (pmu_conf->use_rr_dbregs == 0) return 0;
  2938. LOCK_PFS(flags);
  2939. if (pfm_sessions.pfs_ptrace_use_dbregs == 0) {
  2940. printk(KERN_ERR "perfmon: invalid release for [%d] ptrace_use_dbregs=0\n", task->pid);
  2941. ret = -1;
  2942. } else {
  2943. pfm_sessions.pfs_ptrace_use_dbregs--;
  2944. ret = 0;
  2945. }
  2946. UNLOCK_PFS(flags);
  2947. return ret;
  2948. }
  2949. static int
  2950. pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
  2951. {
  2952. struct task_struct *task;
  2953. pfm_buffer_fmt_t *fmt;
  2954. pfm_ovfl_ctrl_t rst_ctrl;
  2955. int state, is_system;
  2956. int ret = 0;
  2957. state = ctx->ctx_state;
  2958. fmt = ctx->ctx_buf_fmt;
  2959. is_system = ctx->ctx_fl_system;
  2960. task = PFM_CTX_TASK(ctx);
  2961. switch(state) {
  2962. case PFM_CTX_MASKED:
  2963. break;
  2964. case PFM_CTX_LOADED:
  2965. if (CTX_HAS_SMPL(ctx) && fmt->fmt_restart_active) break;
  2966. /* fall through */
  2967. case PFM_CTX_UNLOADED:
  2968. case PFM_CTX_ZOMBIE:
  2969. DPRINT(("invalid state=%d\n", state));
  2970. return -EBUSY;
  2971. default:
  2972. DPRINT(("state=%d, cannot operate (no active_restart handler)\n", state));
  2973. return -EINVAL;
  2974. }
  2975. /*
  2976. * In system wide and when the context is loaded, access can only happen
  2977. * when the caller is running on the CPU being monitored by the session.
  2978. * It does not have to be the owner (ctx_task) of the context per se.
  2979. */
  2980. if (is_system && ctx->ctx_cpu != smp_processor_id()) {
  2981. DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
  2982. return -EBUSY;
  2983. }
  2984. /* sanity check */
  2985. if (unlikely(task == NULL)) {
  2986. printk(KERN_ERR "perfmon: [%d] pfm_restart no task\n", current->pid);
  2987. return -EINVAL;
  2988. }
  2989. if (task == current || is_system) {
  2990. fmt = ctx->ctx_buf_fmt;
  2991. DPRINT(("restarting self %d ovfl=0x%lx\n",
  2992. task->pid,
  2993. ctx->ctx_ovfl_regs[0]));
  2994. if (CTX_HAS_SMPL(ctx)) {
  2995. prefetch(ctx->ctx_smpl_hdr);
  2996. rst_ctrl.bits.mask_monitoring = 0;
  2997. rst_ctrl.bits.reset_ovfl_pmds = 0;
  2998. if (state == PFM_CTX_LOADED)
  2999. ret = pfm_buf_fmt_restart_active(fmt, task, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
  3000. else
  3001. ret = pfm_buf_fmt_restart(fmt, task, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
  3002. } else {
  3003. rst_ctrl.bits.mask_monitoring = 0;
  3004. rst_ctrl.bits.reset_ovfl_pmds = 1;
  3005. }
  3006. if (ret == 0) {
  3007. if (rst_ctrl.bits.reset_ovfl_pmds)
  3008. pfm_reset_regs(ctx, ctx->ctx_ovfl_regs, PFM_PMD_LONG_RESET);
  3009. if (rst_ctrl.bits.mask_monitoring == 0) {
  3010. DPRINT(("resuming monitoring for [%d]\n", task->pid));
  3011. if (state == PFM_CTX_MASKED) pfm_restore_monitoring(task);
  3012. } else {
  3013. DPRINT(("keeping monitoring stopped for [%d]\n", task->pid));
  3014. // cannot use pfm_stop_monitoring(task, regs);
  3015. }
  3016. }
  3017. /*
  3018. * clear overflowed PMD mask to remove any stale information
  3019. */
  3020. ctx->ctx_ovfl_regs[0] = 0UL;
  3021. /*
  3022. * back to LOADED state
  3023. */
  3024. ctx->ctx_state = PFM_CTX_LOADED;
  3025. /*
  3026. * XXX: not really useful for self monitoring
  3027. */
  3028. ctx->ctx_fl_can_restart = 0;
  3029. return 0;
  3030. }
  3031. /*
  3032. * restart another task
  3033. */
  3034. /*
  3035. * When PFM_CTX_MASKED, we cannot issue a restart before the previous
  3036. * one is seen by the task.
  3037. */
  3038. if (state == PFM_CTX_MASKED) {
  3039. if (ctx->ctx_fl_can_restart == 0) return -EINVAL;
  3040. /*
  3041. * will prevent subsequent restart before this one is
  3042. * seen by other task
  3043. */
  3044. ctx->ctx_fl_can_restart = 0;
  3045. }
  3046. /*
  3047. * if blocking, then post the semaphore is PFM_CTX_MASKED, i.e.
  3048. * the task is blocked or on its way to block. That's the normal
  3049. * restart path. If the monitoring is not masked, then the task
  3050. * can be actively monitoring and we cannot directly intervene.
  3051. * Therefore we use the trap mechanism to catch the task and
  3052. * force it to reset the buffer/reset PMDs.
  3053. *
  3054. * if non-blocking, then we ensure that the task will go into
  3055. * pfm_handle_work() before returning to user mode.
  3056. *
  3057. * We cannot explicitely reset another task, it MUST always
  3058. * be done by the task itself. This works for system wide because
  3059. * the tool that is controlling the session is logically doing
  3060. * "self-monitoring".
  3061. */
  3062. if (CTX_OVFL_NOBLOCK(ctx) == 0 && state == PFM_CTX_MASKED) {
  3063. DPRINT(("unblocking [%d] \n", task->pid));
  3064. up(&ctx->ctx_restart_sem);
  3065. } else {
  3066. DPRINT(("[%d] armed exit trap\n", task->pid));
  3067. ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_RESET;
  3068. PFM_SET_WORK_PENDING(task, 1);
  3069. pfm_set_task_notify(task);
  3070. /*
  3071. * XXX: send reschedule if task runs on another CPU
  3072. */
  3073. }
  3074. return 0;
  3075. }
  3076. static int
  3077. pfm_debug(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
  3078. {
  3079. unsigned int m = *(unsigned int *)arg;
  3080. pfm_sysctl.debug = m == 0 ? 0 : 1;
  3081. printk(KERN_INFO "perfmon debugging %s (timing reset)\n", pfm_sysctl.debug ? "on" : "off");
  3082. if (m == 0) {
  3083. memset(pfm_stats, 0, sizeof(pfm_stats));
  3084. for(m=0; m < NR_CPUS; m++) pfm_stats[m].pfm_ovfl_intr_cycles_min = ~0UL;
  3085. }
  3086. return 0;
  3087. }
  3088. /*
  3089. * arg can be NULL and count can be zero for this function
  3090. */
  3091. static int
  3092. pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
  3093. {
  3094. struct thread_struct *thread = NULL;
  3095. struct task_struct *task;
  3096. pfarg_dbreg_t *req = (pfarg_dbreg_t *)arg;
  3097. unsigned long flags;
  3098. dbreg_t dbreg;
  3099. unsigned int rnum;
  3100. int first_time;
  3101. int ret = 0, state;
  3102. int i, can_access_pmu = 0;
  3103. int is_system, is_loaded;
  3104. if (pmu_conf->use_rr_dbregs == 0) return -EINVAL;
  3105. state = ctx->ctx_state;
  3106. is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
  3107. is_system = ctx->ctx_fl_system;
  3108. task = ctx->ctx_task;
  3109. if (state == PFM_CTX_ZOMBIE) return -EINVAL;
  3110. /*
  3111. * on both UP and SMP, we can only write to the PMC when the task is
  3112. * the owner of the local PMU.
  3113. */
  3114. if (is_loaded) {
  3115. thread = &task->thread;
  3116. /*
  3117. * In system wide and when the context is loaded, access can only happen
  3118. * when the caller is running on the CPU being monitored by the session.
  3119. * It does not have to be the owner (ctx_task) of the context per se.
  3120. */
  3121. if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) {
  3122. DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
  3123. return -EBUSY;
  3124. }
  3125. can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
  3126. }
  3127. /*
  3128. * we do not need to check for ipsr.db because we do clear ibr.x, dbr.r, and dbr.w
  3129. * ensuring that no real breakpoint can be installed via this call.
  3130. *
  3131. * IMPORTANT: regs can be NULL in this function
  3132. */
  3133. first_time = ctx->ctx_fl_using_dbreg == 0;
  3134. /*
  3135. * don't bother if we are loaded and task is being debugged
  3136. */
  3137. if (is_loaded && (thread->flags & IA64_THREAD_DBG_VALID) != 0) {
  3138. DPRINT(("debug registers already in use for [%d]\n", task->pid));
  3139. return -EBUSY;
  3140. }
  3141. /*
  3142. * check for debug registers in system wide mode
  3143. *
  3144. * If though a check is done in pfm_context_load(),
  3145. * we must repeat it here, in case the registers are
  3146. * written after the context is loaded
  3147. */
  3148. if (is_loaded) {
  3149. LOCK_PFS(flags);
  3150. if (first_time && is_system) {
  3151. if (pfm_sessions.pfs_ptrace_use_dbregs)
  3152. ret = -EBUSY;
  3153. else
  3154. pfm_sessions.pfs_sys_use_dbregs++;
  3155. }
  3156. UNLOCK_PFS(flags);
  3157. }
  3158. if (ret != 0) return ret;
  3159. /*
  3160. * mark ourself as user of the debug registers for
  3161. * perfmon purposes.
  3162. */
  3163. ctx->ctx_fl_using_dbreg = 1;
  3164. /*
  3165. * clear hardware registers to make sure we don't
  3166. * pick up stale state.
  3167. *
  3168. * for a system wide session, we do not use
  3169. * thread.dbr, thread.ibr because this process
  3170. * never leaves the current CPU and the state
  3171. * is shared by all processes running on it
  3172. */
  3173. if (first_time && can_access_pmu) {
  3174. DPRINT(("[%d] clearing ibrs, dbrs\n", task->pid));
  3175. for (i=0; i < pmu_conf->num_ibrs; i++) {
  3176. ia64_set_ibr(i, 0UL);
  3177. ia64_dv_serialize_instruction();
  3178. }
  3179. ia64_srlz_i();
  3180. for (i=0; i < pmu_conf->num_dbrs; i++) {
  3181. ia64_set_dbr(i, 0UL);
  3182. ia64_dv_serialize_data();
  3183. }
  3184. ia64_srlz_d();
  3185. }
  3186. /*
  3187. * Now install the values into the registers
  3188. */
  3189. for (i = 0; i < count; i++, req++) {
  3190. rnum = req->dbreg_num;
  3191. dbreg.val = req->dbreg_value;
  3192. ret = -EINVAL;
  3193. if ((mode == PFM_CODE_RR && rnum >= PFM_NUM_IBRS) || ((mode == PFM_DATA_RR) && rnum >= PFM_NUM_DBRS)) {
  3194. DPRINT(("invalid register %u val=0x%lx mode=%d i=%d count=%d\n",
  3195. rnum, dbreg.val, mode, i, count));
  3196. goto abort_mission;
  3197. }
  3198. /*
  3199. * make sure we do not install enabled breakpoint
  3200. */
  3201. if (rnum & 0x1) {
  3202. if (mode == PFM_CODE_RR)
  3203. dbreg.ibr.ibr_x = 0;
  3204. else
  3205. dbreg.dbr.dbr_r = dbreg.dbr.dbr_w = 0;
  3206. }
  3207. PFM_REG_RETFLAG_SET(req->dbreg_flags, 0);
  3208. /*
  3209. * Debug registers, just like PMC, can only be modified
  3210. * by a kernel call. Moreover, perfmon() access to those
  3211. * registers are centralized in this routine. The hardware
  3212. * does not modify the value of these registers, therefore,
  3213. * if we save them as they are written, we can avoid having
  3214. * to save them on context switch out. This is made possible
  3215. * by the fact that when perfmon uses debug registers, ptrace()
  3216. * won't be able to modify them concurrently.
  3217. */
  3218. if (mode == PFM_CODE_RR) {
  3219. CTX_USED_IBR(ctx, rnum);
  3220. if (can_access_pmu) {
  3221. ia64_set_ibr(rnum, dbreg.val);
  3222. ia64_dv_serialize_instruction();
  3223. }
  3224. ctx->ctx_ibrs[rnum] = dbreg.val;
  3225. DPRINT(("write ibr%u=0x%lx used_ibrs=0x%x ld=%d apmu=%d\n",
  3226. rnum, dbreg.val, ctx->ctx_used_ibrs[0], is_loaded, can_access_pmu));
  3227. } else {
  3228. CTX_USED_DBR(ctx, rnum);
  3229. if (can_access_pmu) {
  3230. ia64_set_dbr(rnum, dbreg.val);
  3231. ia64_dv_serialize_data();
  3232. }
  3233. ctx->ctx_dbrs[rnum] = dbreg.val;
  3234. DPRINT(("write dbr%u=0x%lx used_dbrs=0x%x ld=%d apmu=%d\n",
  3235. rnum, dbreg.val, ctx->ctx_used_dbrs[0], is_loaded, can_access_pmu));
  3236. }
  3237. }
  3238. return 0;
  3239. abort_mission:
  3240. /*
  3241. * in case it was our first attempt, we undo the global modifications
  3242. */
  3243. if (first_time) {
  3244. LOCK_PFS(flags);
  3245. if (ctx->ctx_fl_system) {
  3246. pfm_sessions.pfs_sys_use_dbregs--;
  3247. }
  3248. UNLOCK_PFS(flags);
  3249. ctx->ctx_fl_using_dbreg = 0;
  3250. }
  3251. /*
  3252. * install error return flag
  3253. */
  3254. PFM_REG_RETFLAG_SET(req->dbreg_flags, PFM_REG_RETFL_EINVAL);
  3255. return ret;
  3256. }
  3257. static int
  3258. pfm_write_ibrs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
  3259. {
  3260. return pfm_write_ibr_dbr(PFM_CODE_RR, ctx, arg, count, regs);
  3261. }
  3262. static int
  3263. pfm_write_dbrs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
  3264. {
  3265. return pfm_write_ibr_dbr(PFM_DATA_RR, ctx, arg, count, regs);
  3266. }
  3267. int
  3268. pfm_mod_write_ibrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
  3269. {
  3270. pfm_context_t *ctx;
  3271. if (req == NULL) return -EINVAL;
  3272. ctx = GET_PMU_CTX();
  3273. if (ctx == NULL) return -EINVAL;
  3274. /*
  3275. * for now limit to current task, which is enough when calling
  3276. * from overflow handler
  3277. */
  3278. if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
  3279. return pfm_write_ibrs(ctx, req, nreq, regs);
  3280. }
  3281. EXPORT_SYMBOL(pfm_mod_write_ibrs);
  3282. int
  3283. pfm_mod_write_dbrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
  3284. {
  3285. pfm_context_t *ctx;
  3286. if (req == NULL) return -EINVAL;
  3287. ctx = GET_PMU_CTX();
  3288. if (ctx == NULL) return -EINVAL;
  3289. /*
  3290. * for now limit to current task, which is enough when calling
  3291. * from overflow handler
  3292. */
  3293. if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
  3294. return pfm_write_dbrs(ctx, req, nreq, regs);
  3295. }
  3296. EXPORT_SYMBOL(pfm_mod_write_dbrs);
  3297. static int
  3298. pfm_get_features(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
  3299. {
  3300. pfarg_features_t *req = (pfarg_features_t *)arg;
  3301. req->ft_version = PFM_VERSION;
  3302. return 0;
  3303. }
  3304. static int
  3305. pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
  3306. {
  3307. struct pt_regs *tregs;
  3308. struct task_struct *task = PFM_CTX_TASK(ctx);
  3309. int state, is_system;
  3310. state = ctx->ctx_state;
  3311. is_system = ctx->ctx_fl_system;
  3312. /*
  3313. * context must be attached to issue the stop command (includes LOADED,MASKED,ZOMBIE)
  3314. */
  3315. if (state == PFM_CTX_UNLOADED) return -EINVAL;
  3316. /*
  3317. * In system wide and when the context is loaded, access can only happen
  3318. * when the caller is running on the CPU being monitored by the session.
  3319. * It does not have to be the owner (ctx_task) of the context per se.
  3320. */
  3321. if (is_system && ctx->ctx_cpu != smp_processor_id()) {
  3322. DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
  3323. return -EBUSY;
  3324. }
  3325. DPRINT(("task [%d] ctx_state=%d is_system=%d\n",
  3326. PFM_CTX_TASK(ctx)->pid,
  3327. state,
  3328. is_system));
  3329. /*
  3330. * in system mode, we need to update the PMU directly
  3331. * and the user level state of the caller, which may not
  3332. * necessarily be the creator of the context.
  3333. */
  3334. if (is_system) {
  3335. /*
  3336. * Update local PMU first
  3337. *
  3338. * disable dcr pp
  3339. */
  3340. ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) & ~IA64_DCR_PP);
  3341. ia64_srlz_i();
  3342. /*
  3343. * update local cpuinfo
  3344. */
  3345. PFM_CPUINFO_CLEAR(PFM_CPUINFO_DCR_PP);
  3346. /*
  3347. * stop monitoring, does srlz.i
  3348. */
  3349. pfm_clear_psr_pp();
  3350. /*
  3351. * stop monitoring in the caller
  3352. */
  3353. ia64_psr(regs)->pp = 0;
  3354. return 0;
  3355. }
  3356. /*
  3357. * per-task mode
  3358. */
  3359. if (task == current) {
  3360. /* stop monitoring at kernel level */
  3361. pfm_clear_psr_up();
  3362. /*
  3363. * stop monitoring at the user level
  3364. */
  3365. ia64_psr(regs)->up = 0;
  3366. } else {
  3367. tregs = ia64_task_regs(task);
  3368. /*
  3369. * stop monitoring at the user level
  3370. */
  3371. ia64_psr(tregs)->up = 0;
  3372. /*
  3373. * monitoring disabled in kernel at next reschedule
  3374. */
  3375. ctx->ctx_saved_psr_up = 0;
  3376. DPRINT(("task=[%d]\n", task->pid));
  3377. }
  3378. return 0;
  3379. }
  3380. static int
  3381. pfm_start(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
  3382. {
  3383. struct pt_regs *tregs;
  3384. int state, is_system;
  3385. state = ctx->ctx_state;
  3386. is_system = ctx->ctx_fl_system;
  3387. if (state != PFM_CTX_LOADED) return -EINVAL;
  3388. /*
  3389. * In system wide and when the context is loaded, access can only happen
  3390. * when the caller is running on the CPU being monitored by the session.
  3391. * It does not have to be the owner (ctx_task) of the context per se.
  3392. */
  3393. if (is_system && ctx->ctx_cpu != smp_processor_id()) {
  3394. DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
  3395. return -EBUSY;
  3396. }
  3397. /*
  3398. * in system mode, we need to update the PMU directly
  3399. * and the user level state of the caller, which may not
  3400. * necessarily be the creator of the context.
  3401. */
  3402. if (is_system) {
  3403. /*
  3404. * set user level psr.pp for the caller
  3405. */
  3406. ia64_psr(regs)->pp = 1;
  3407. /*
  3408. * now update the local PMU and cpuinfo
  3409. */
  3410. PFM_CPUINFO_SET(PFM_CPUINFO_DCR_PP);
  3411. /*
  3412. * start monitoring at kernel level
  3413. */
  3414. pfm_set_psr_pp();
  3415. /* enable dcr pp */
  3416. ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) | IA64_DCR_PP);
  3417. ia64_srlz_i();
  3418. return 0;
  3419. }
  3420. /*
  3421. * per-process mode
  3422. */
  3423. if (ctx->ctx_task == current) {
  3424. /* start monitoring at kernel level */
  3425. pfm_set_psr_up();
  3426. /*
  3427. * activate monitoring at user level
  3428. */
  3429. ia64_psr(regs)->up = 1;
  3430. } else {
  3431. tregs = ia64_task_regs(ctx->ctx_task);
  3432. /*
  3433. * start monitoring at the kernel level the next
  3434. * time the task is scheduled
  3435. */
  3436. ctx->ctx_saved_psr_up = IA64_PSR_UP;
  3437. /*
  3438. * activate monitoring at user level
  3439. */
  3440. ia64_psr(tregs)->up = 1;
  3441. }
  3442. return 0;
  3443. }
  3444. static int
  3445. pfm_get_pmc_reset(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
  3446. {
  3447. pfarg_reg_t *req = (pfarg_reg_t *)arg;
  3448. unsigned int cnum;
  3449. int i;
  3450. int ret = -EINVAL;
  3451. for (i = 0; i < count; i++, req++) {
  3452. cnum = req->reg_num;
  3453. if (!PMC_IS_IMPL(cnum)) goto abort_mission;
  3454. req->reg_value = PMC_DFL_VAL(cnum);
  3455. PFM_REG_RETFLAG_SET(req->reg_flags, 0);
  3456. DPRINT(("pmc_reset_val pmc[%u]=0x%lx\n", cnum, req->reg_value));
  3457. }
  3458. return 0;
  3459. abort_mission:
  3460. PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
  3461. return ret;
  3462. }
  3463. static int
  3464. pfm_check_task_exist(pfm_context_t *ctx)
  3465. {
  3466. struct task_struct *g, *t;
  3467. int ret = -ESRCH;
  3468. read_lock(&tasklist_lock);
  3469. do_each_thread (g, t) {
  3470. if (t->thread.pfm_context == ctx) {
  3471. ret = 0;
  3472. break;
  3473. }
  3474. } while_each_thread (g, t);
  3475. read_unlock(&tasklist_lock);
  3476. DPRINT(("pfm_check_task_exist: ret=%d ctx=%p\n", ret, ctx));
  3477. return ret;
  3478. }
  3479. static int
  3480. pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
  3481. {
  3482. struct task_struct *task;
  3483. struct thread_struct *thread;
  3484. struct pfm_context_t *old;
  3485. unsigned long flags;
  3486. #ifndef CONFIG_SMP
  3487. struct task_struct *owner_task = NULL;
  3488. #endif
  3489. pfarg_load_t *req = (pfarg_load_t *)arg;
  3490. unsigned long *pmcs_source, *pmds_source;
  3491. int the_cpu;
  3492. int ret = 0;
  3493. int state, is_system, set_dbregs = 0;
  3494. state = ctx->ctx_state;
  3495. is_system = ctx->ctx_fl_system;
  3496. /*
  3497. * can only load from unloaded or terminated state
  3498. */
  3499. if (state != PFM_CTX_UNLOADED) {
  3500. DPRINT(("cannot load to [%d], invalid ctx_state=%d\n",
  3501. req->load_pid,
  3502. ctx->ctx_state));
  3503. return -EBUSY;
  3504. }
  3505. DPRINT(("load_pid [%d] using_dbreg=%d\n", req->load_pid, ctx->ctx_fl_using_dbreg));
  3506. if (CTX_OVFL_NOBLOCK(ctx) == 0 && req->load_pid == current->pid) {
  3507. DPRINT(("cannot use blocking mode on self\n"));
  3508. return -EINVAL;
  3509. }
  3510. ret = pfm_get_task(ctx, req->load_pid, &task);
  3511. if (ret) {
  3512. DPRINT(("load_pid [%d] get_task=%d\n", req->load_pid, ret));
  3513. return ret;
  3514. }
  3515. ret = -EINVAL;
  3516. /*
  3517. * system wide is self monitoring only
  3518. */
  3519. if (is_system && task != current) {
  3520. DPRINT(("system wide is self monitoring only load_pid=%d\n",
  3521. req->load_pid));
  3522. goto error;
  3523. }
  3524. thread = &task->thread;
  3525. ret = 0;
  3526. /*
  3527. * cannot load a context which is using range restrictions,
  3528. * into a task that is being debugged.
  3529. */
  3530. if (ctx->ctx_fl_using_dbreg) {
  3531. if (thread->flags & IA64_THREAD_DBG_VALID) {
  3532. ret = -EBUSY;
  3533. DPRINT(("load_pid [%d] task is debugged, cannot load range restrictions\n", req->load_pid));
  3534. goto error;
  3535. }
  3536. LOCK_PFS(flags);
  3537. if (is_system) {
  3538. if (pfm_sessions.pfs_ptrace_use_dbregs) {
  3539. DPRINT(("cannot load [%d] dbregs in use\n", task->pid));
  3540. ret = -EBUSY;
  3541. } else {
  3542. pfm_sessions.pfs_sys_use_dbregs++;
  3543. DPRINT(("load [%d] increased sys_use_dbreg=%u\n", task->pid, pfm_sessions.pfs_sys_use_dbregs));
  3544. set_dbregs = 1;
  3545. }
  3546. }
  3547. UNLOCK_PFS(flags);
  3548. if (ret) goto error;
  3549. }
  3550. /*
  3551. * SMP system-wide monitoring implies self-monitoring.
  3552. *
  3553. * The programming model expects the task to
  3554. * be pinned on a CPU throughout the session.
  3555. * Here we take note of the current CPU at the
  3556. * time the context is loaded. No call from
  3557. * another CPU will be allowed.
  3558. *
  3559. * The pinning via shed_setaffinity()
  3560. * must be done by the calling task prior
  3561. * to this call.
  3562. *
  3563. * systemwide: keep track of CPU this session is supposed to run on
  3564. */
  3565. the_cpu = ctx->ctx_cpu = smp_processor_id();
  3566. ret = -EBUSY;
  3567. /*
  3568. * now reserve the session
  3569. */
  3570. ret = pfm_reserve_session(current, is_system, the_cpu);
  3571. if (ret) goto error;
  3572. /*
  3573. * task is necessarily stopped at this point.
  3574. *
  3575. * If the previous context was zombie, then it got removed in
  3576. * pfm_save_regs(). Therefore we should not see it here.
  3577. * If we see a context, then this is an active context
  3578. *
  3579. * XXX: needs to be atomic
  3580. */
  3581. DPRINT(("before cmpxchg() old_ctx=%p new_ctx=%p\n",
  3582. thread->pfm_context, ctx));
  3583. ret = -EBUSY;
  3584. old = ia64_cmpxchg(acq, &thread->pfm_context, NULL, ctx, sizeof(pfm_context_t *));
  3585. if (old != NULL) {
  3586. DPRINT(("load_pid [%d] already has a context\n", req->load_pid));
  3587. goto error_unres;
  3588. }
  3589. pfm_reset_msgq(ctx);
  3590. ctx->ctx_state = PFM_CTX_LOADED;
  3591. /*
  3592. * link context to task
  3593. */
  3594. ctx->ctx_task = task;
  3595. if (is_system) {
  3596. /*
  3597. * we load as stopped
  3598. */
  3599. PFM_CPUINFO_SET(PFM_CPUINFO_SYST_WIDE);
  3600. PFM_CPUINFO_CLEAR(PFM_CPUINFO_DCR_PP);
  3601. if (ctx->ctx_fl_excl_idle) PFM_CPUINFO_SET(PFM_CPUINFO_EXCL_IDLE);
  3602. } else {
  3603. thread->flags |= IA64_THREAD_PM_VALID;
  3604. }
  3605. /*
  3606. * propagate into thread-state
  3607. */
  3608. pfm_copy_pmds(task, ctx);
  3609. pfm_copy_pmcs(task, ctx);
  3610. pmcs_source = thread->pmcs;
  3611. pmds_source = thread->pmds;
  3612. /*
  3613. * always the case for system-wide
  3614. */
  3615. if (task == current) {
  3616. if (is_system == 0) {
  3617. /* allow user level control */
  3618. ia64_psr(regs)->sp = 0;
  3619. DPRINT(("clearing psr.sp for [%d]\n", task->pid));
  3620. SET_LAST_CPU(ctx, smp_processor_id());
  3621. INC_ACTIVATION();
  3622. SET_ACTIVATION(ctx);
  3623. #ifndef CONFIG_SMP
  3624. /*
  3625. * push the other task out, if any
  3626. */
  3627. owner_task = GET_PMU_OWNER();
  3628. if (owner_task) pfm_lazy_save_regs(owner_task);
  3629. #endif
  3630. }
  3631. /*
  3632. * load all PMD from ctx to PMU (as opposed to thread state)
  3633. * restore all PMC from ctx to PMU
  3634. */
  3635. pfm_restore_pmds(pmds_source, ctx->ctx_all_pmds[0]);
  3636. pfm_restore_pmcs(pmcs_source, ctx->ctx_all_pmcs[0]);
  3637. ctx->ctx_reload_pmcs[0] = 0UL;
  3638. ctx->ctx_reload_pmds[0] = 0UL;
  3639. /*
  3640. * guaranteed safe by earlier check against DBG_VALID
  3641. */
  3642. if (ctx->ctx_fl_using_dbreg) {
  3643. pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
  3644. pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
  3645. }
  3646. /*
  3647. * set new ownership
  3648. */
  3649. SET_PMU_OWNER(task, ctx);
  3650. DPRINT(("context loaded on PMU for [%d]\n", task->pid));
  3651. } else {
  3652. /*
  3653. * when not current, task MUST be stopped, so this is safe
  3654. */
  3655. regs = ia64_task_regs(task);
  3656. /* force a full reload */
  3657. ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;
  3658. SET_LAST_CPU(ctx, -1);
  3659. /* initial saved psr (stopped) */
  3660. ctx->ctx_saved_psr_up = 0UL;
  3661. ia64_psr(regs)->up = ia64_psr(regs)->pp = 0;
  3662. }
  3663. ret = 0;
  3664. error_unres:
  3665. if (ret) pfm_unreserve_session(ctx, ctx->ctx_fl_system, the_cpu);
  3666. error:
  3667. /*
  3668. * we must undo the dbregs setting (for system-wide)
  3669. */
  3670. if (ret && set_dbregs) {
  3671. LOCK_PFS(flags);
  3672. pfm_sessions.pfs_sys_use_dbregs--;
  3673. UNLOCK_PFS(flags);
  3674. }
  3675. /*
  3676. * release task, there is now a link with the context
  3677. */
  3678. if (is_system == 0 && task != current) {
  3679. pfm_put_task(task);
  3680. if (ret == 0) {
  3681. ret = pfm_check_task_exist(ctx);
  3682. if (ret) {
  3683. ctx->ctx_state = PFM_CTX_UNLOADED;
  3684. ctx->ctx_task = NULL;
  3685. }
  3686. }
  3687. }
  3688. return ret;
  3689. }
  3690. /*
  3691. * in this function, we do not need to increase the use count
  3692. * for the task via get_task_struct(), because we hold the
  3693. * context lock. If the task were to disappear while having
  3694. * a context attached, it would go through pfm_exit_thread()
  3695. * which also grabs the context lock and would therefore be blocked
  3696. * until we are here.
  3697. */
  3698. static void pfm_flush_pmds(struct task_struct *, pfm_context_t *ctx);
  3699. static int
  3700. pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
  3701. {
  3702. struct task_struct *task = PFM_CTX_TASK(ctx);
  3703. struct pt_regs *tregs;
  3704. int prev_state, is_system;
  3705. int ret;
  3706. DPRINT(("ctx_state=%d task [%d]\n", ctx->ctx_state, task ? task->pid : -1));
  3707. prev_state = ctx->ctx_state;
  3708. is_system = ctx->ctx_fl_system;
  3709. /*
  3710. * unload only when necessary
  3711. */
  3712. if (prev_state == PFM_CTX_UNLOADED) {
  3713. DPRINT(("ctx_state=%d, nothing to do\n", prev_state));
  3714. return 0;
  3715. }
  3716. /*
  3717. * clear psr and dcr bits
  3718. */
  3719. ret = pfm_stop(ctx, NULL, 0, regs);
  3720. if (ret) return ret;
  3721. ctx->ctx_state = PFM_CTX_UNLOADED;
  3722. /*
  3723. * in system mode, we need to update the PMU directly
  3724. * and the user level state of the caller, which may not
  3725. * necessarily be the creator of the context.
  3726. */
  3727. if (is_system) {
  3728. /*
  3729. * Update cpuinfo
  3730. *
  3731. * local PMU is taken care of in pfm_stop()
  3732. */
  3733. PFM_CPUINFO_CLEAR(PFM_CPUINFO_SYST_WIDE);
  3734. PFM_CPUINFO_CLEAR(PFM_CPUINFO_EXCL_IDLE);
  3735. /*
  3736. * save PMDs in context
  3737. * release ownership
  3738. */
  3739. pfm_flush_pmds(current, ctx);
  3740. /*
  3741. * at this point we are done with the PMU
  3742. * so we can unreserve the resource.
  3743. */
  3744. if (prev_state != PFM_CTX_ZOMBIE)
  3745. pfm_unreserve_session(ctx, 1 , ctx->ctx_cpu);
  3746. /*
  3747. * disconnect context from task
  3748. */
  3749. task->thread.pfm_context = NULL;
  3750. /*
  3751. * disconnect task from context
  3752. */
  3753. ctx->ctx_task = NULL;
  3754. /*
  3755. * There is nothing more to cleanup here.
  3756. */
  3757. return 0;
  3758. }
  3759. /*
  3760. * per-task mode
  3761. */
  3762. tregs = task == current ? regs : ia64_task_regs(task);
  3763. if (task == current) {
  3764. /*
  3765. * cancel user level control
  3766. */
  3767. ia64_psr(regs)->sp = 1;
  3768. DPRINT(("setting psr.sp for [%d]\n", task->pid));
  3769. }
  3770. /*
  3771. * save PMDs to context
  3772. * release ownership
  3773. */
  3774. pfm_flush_pmds(task, ctx);
  3775. /*
  3776. * at this point we are done with the PMU
  3777. * so we can unreserve the resource.
  3778. *
  3779. * when state was ZOMBIE, we have already unreserved.
  3780. */
  3781. if (prev_state != PFM_CTX_ZOMBIE)
  3782. pfm_unreserve_session(ctx, 0 , ctx->ctx_cpu);
  3783. /*
  3784. * reset activation counter and psr
  3785. */
  3786. ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;
  3787. SET_LAST_CPU(ctx, -1);
  3788. /*
  3789. * PMU state will not be restored
  3790. */
  3791. task->thread.flags &= ~IA64_THREAD_PM_VALID;
  3792. /*
  3793. * break links between context and task
  3794. */
  3795. task->thread.pfm_context = NULL;
  3796. ctx->ctx_task = NULL;
  3797. PFM_SET_WORK_PENDING(task, 0);
  3798. ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_NONE;
  3799. ctx->ctx_fl_can_restart = 0;
  3800. ctx->ctx_fl_going_zombie = 0;
  3801. DPRINT(("disconnected [%d] from context\n", task->pid));
  3802. return 0;
  3803. }
  3804. /*
  3805. * called only from exit_thread(): task == current
  3806. * we come here only if current has a context attached (loaded or masked)
  3807. */
  3808. void
  3809. pfm_exit_thread(struct task_struct *task)
  3810. {
  3811. pfm_context_t *ctx;
  3812. unsigned long flags;
  3813. struct pt_regs *regs = ia64_task_regs(task);
  3814. int ret, state;
  3815. int free_ok = 0;
  3816. ctx = PFM_GET_CTX(task);
  3817. PROTECT_CTX(ctx, flags);
  3818. DPRINT(("state=%d task [%d]\n", ctx->ctx_state, task->pid));
  3819. state = ctx->ctx_state;
  3820. switch(state) {
  3821. case PFM_CTX_UNLOADED:
  3822. /*
  3823. * only comes to thios function if pfm_context is not NULL, i.e., cannot
  3824. * be in unloaded state
  3825. */
  3826. printk(KERN_ERR "perfmon: pfm_exit_thread [%d] ctx unloaded\n", task->pid);
  3827. break;
  3828. case PFM_CTX_LOADED:
  3829. case PFM_CTX_MASKED:
  3830. ret = pfm_context_unload(ctx, NULL, 0, regs);
  3831. if (ret) {
  3832. printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task->pid, state, ret);
  3833. }
  3834. DPRINT(("ctx unloaded for current state was %d\n", state));
  3835. pfm_end_notify_user(ctx);
  3836. break;
  3837. case PFM_CTX_ZOMBIE:
  3838. ret = pfm_context_unload(ctx, NULL, 0, regs);
  3839. if (ret) {
  3840. printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task->pid, state, ret);
  3841. }
  3842. free_ok = 1;
  3843. break;
  3844. default:
  3845. printk(KERN_ERR "perfmon: pfm_exit_thread [%d] unexpected state=%d\n", task->pid, state);
  3846. break;
  3847. }
  3848. UNPROTECT_CTX(ctx, flags);
  3849. { u64 psr = pfm_get_psr();
  3850. BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
  3851. BUG_ON(GET_PMU_OWNER());
  3852. BUG_ON(ia64_psr(regs)->up);
  3853. BUG_ON(ia64_psr(regs)->pp);
  3854. }
  3855. /*
  3856. * All memory free operations (especially for vmalloc'ed memory)
  3857. * MUST be done with interrupts ENABLED.
  3858. */
  3859. if (free_ok) pfm_context_free(ctx);
  3860. }
  3861. /*
  3862. * functions MUST be listed in the increasing order of their index (see permfon.h)
  3863. */
  3864. #define PFM_CMD(name, flags, arg_count, arg_type, getsz) { name, #name, flags, arg_count, sizeof(arg_type), getsz }
  3865. #define PFM_CMD_S(name, flags) { name, #name, flags, 0, 0, NULL }
  3866. #define PFM_CMD_PCLRWS (PFM_CMD_FD|PFM_CMD_ARG_RW|PFM_CMD_STOP)
  3867. #define PFM_CMD_PCLRW (PFM_CMD_FD|PFM_CMD_ARG_RW)
  3868. #define PFM_CMD_NONE { NULL, "no-cmd", 0, 0, 0, NULL}
  3869. static pfm_cmd_desc_t pfm_cmd_tab[]={
  3870. /* 0 */PFM_CMD_NONE,
  3871. /* 1 */PFM_CMD(pfm_write_pmcs, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
  3872. /* 2 */PFM_CMD(pfm_write_pmds, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
  3873. /* 3 */PFM_CMD(pfm_read_pmds, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
  3874. /* 4 */PFM_CMD_S(pfm_stop, PFM_CMD_PCLRWS),
  3875. /* 5 */PFM_CMD_S(pfm_start, PFM_CMD_PCLRWS),
  3876. /* 6 */PFM_CMD_NONE,
  3877. /* 7 */PFM_CMD_NONE,
  3878. /* 8 */PFM_CMD(pfm_context_create, PFM_CMD_ARG_RW, 1, pfarg_context_t, pfm_ctx_getsize),
  3879. /* 9 */PFM_CMD_NONE,
  3880. /* 10 */PFM_CMD_S(pfm_restart, PFM_CMD_PCLRW),
  3881. /* 11 */PFM_CMD_NONE,
  3882. /* 12 */PFM_CMD(pfm_get_features, PFM_CMD_ARG_RW, 1, pfarg_features_t, NULL),
  3883. /* 13 */PFM_CMD(pfm_debug, 0, 1, unsigned int, NULL),
  3884. /* 14 */PFM_CMD_NONE,
  3885. /* 15 */PFM_CMD(pfm_get_pmc_reset, PFM_CMD_ARG_RW, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
  3886. /* 16 */PFM_CMD(pfm_context_load, PFM_CMD_PCLRWS, 1, pfarg_load_t, NULL),
  3887. /* 17 */PFM_CMD_S(pfm_context_unload, PFM_CMD_PCLRWS),
  3888. /* 18 */PFM_CMD_NONE,
  3889. /* 19 */PFM_CMD_NONE,
  3890. /* 20 */PFM_CMD_NONE,
  3891. /* 21 */PFM_CMD_NONE,
  3892. /* 22 */PFM_CMD_NONE,
  3893. /* 23 */PFM_CMD_NONE,
  3894. /* 24 */PFM_CMD_NONE,
  3895. /* 25 */PFM_CMD_NONE,
  3896. /* 26 */PFM_CMD_NONE,
  3897. /* 27 */PFM_CMD_NONE,
  3898. /* 28 */PFM_CMD_NONE,
  3899. /* 29 */PFM_CMD_NONE,
  3900. /* 30 */PFM_CMD_NONE,
  3901. /* 31 */PFM_CMD_NONE,
  3902. /* 32 */PFM_CMD(pfm_write_ibrs, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_dbreg_t, NULL),
  3903. /* 33 */PFM_CMD(pfm_write_dbrs, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_dbreg_t, NULL)
  3904. };
  3905. #define PFM_CMD_COUNT (sizeof(pfm_cmd_tab)/sizeof(pfm_cmd_desc_t))
  3906. static int
  3907. pfm_check_task_state(pfm_context_t *ctx, int cmd, unsigned long flags)
  3908. {
  3909. struct task_struct *task;
  3910. int state, old_state;
  3911. recheck:
  3912. state = ctx->ctx_state;
  3913. task = ctx->ctx_task;
  3914. if (task == NULL) {
  3915. DPRINT(("context %d no task, state=%d\n", ctx->ctx_fd, state));
  3916. return 0;
  3917. }
  3918. DPRINT(("context %d state=%d [%d] task_state=%ld must_stop=%d\n",
  3919. ctx->ctx_fd,
  3920. state,
  3921. task->pid,
  3922. task->state, PFM_CMD_STOPPED(cmd)));
  3923. /*
  3924. * self-monitoring always ok.
  3925. *
  3926. * for system-wide the caller can either be the creator of the
  3927. * context (to one to which the context is attached to) OR
  3928. * a task running on the same CPU as the session.
  3929. */
  3930. if (task == current || ctx->ctx_fl_system) return 0;
  3931. /*
  3932. * we are monitoring another thread
  3933. */
  3934. switch(state) {
  3935. case PFM_CTX_UNLOADED:
  3936. /*
  3937. * if context is UNLOADED we are safe to go
  3938. */
  3939. return 0;
  3940. case PFM_CTX_ZOMBIE:
  3941. /*
  3942. * no command can operate on a zombie context
  3943. */
  3944. DPRINT(("cmd %d state zombie cannot operate on context\n", cmd));
  3945. return -EINVAL;
  3946. case PFM_CTX_MASKED:
  3947. /*
  3948. * PMU state has been saved to software even though
  3949. * the thread may still be running.
  3950. */
  3951. if (cmd != PFM_UNLOAD_CONTEXT) return 0;
  3952. }
  3953. /*
  3954. * context is LOADED or MASKED. Some commands may need to have
  3955. * the task stopped.
  3956. *
  3957. * We could lift this restriction for UP but it would mean that
  3958. * the user has no guarantee the task would not run between
  3959. * two successive calls to perfmonctl(). That's probably OK.
  3960. * If this user wants to ensure the task does not run, then
  3961. * the task must be stopped.
  3962. */
  3963. if (PFM_CMD_STOPPED(cmd)) {
  3964. if ((task->state != TASK_STOPPED) && (task->state != TASK_TRACED)) {
  3965. DPRINT(("[%d] task not in stopped state\n", task->pid));
  3966. return -EBUSY;
  3967. }
  3968. /*
  3969. * task is now stopped, wait for ctxsw out
  3970. *
  3971. * This is an interesting point in the code.
  3972. * We need to unprotect the context because
  3973. * the pfm_save_regs() routines needs to grab
  3974. * the same lock. There are danger in doing
  3975. * this because it leaves a window open for
  3976. * another task to get access to the context
  3977. * and possibly change its state. The one thing
  3978. * that is not possible is for the context to disappear
  3979. * because we are protected by the VFS layer, i.e.,
  3980. * get_fd()/put_fd().
  3981. */
  3982. old_state = state;
  3983. UNPROTECT_CTX(ctx, flags);
  3984. wait_task_inactive(task);
  3985. PROTECT_CTX(ctx, flags);
  3986. /*
  3987. * we must recheck to verify if state has changed
  3988. */
  3989. if (ctx->ctx_state != old_state) {
  3990. DPRINT(("old_state=%d new_state=%d\n", old_state, ctx->ctx_state));
  3991. goto recheck;
  3992. }
  3993. }
  3994. return 0;
  3995. }
  3996. /*
  3997. * system-call entry point (must return long)
  3998. */
  3999. asmlinkage long
  4000. sys_perfmonctl (int fd, int cmd, void __user *arg, int count)
  4001. {
  4002. struct file *file = NULL;
  4003. pfm_context_t *ctx = NULL;
  4004. unsigned long flags = 0UL;
  4005. void *args_k = NULL;
  4006. long ret; /* will expand int return types */
  4007. size_t base_sz, sz, xtra_sz = 0;
  4008. int narg, completed_args = 0, call_made = 0, cmd_flags;
  4009. int (*func)(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
  4010. int (*getsize)(void *arg, size_t *sz);
  4011. #define PFM_MAX_ARGSIZE 4096
  4012. /*
  4013. * reject any call if perfmon was disabled at initialization
  4014. */
  4015. if (unlikely(pmu_conf == NULL)) return -ENOSYS;
  4016. if (unlikely(cmd < 0 || cmd >= PFM_CMD_COUNT)) {
  4017. DPRINT(("invalid cmd=%d\n", cmd));
  4018. return -EINVAL;
  4019. }
  4020. func = pfm_cmd_tab[cmd].cmd_func;
  4021. narg = pfm_cmd_tab[cmd].cmd_narg;
  4022. base_sz = pfm_cmd_tab[cmd].cmd_argsize;
  4023. getsize = pfm_cmd_tab[cmd].cmd_getsize;
  4024. cmd_flags = pfm_cmd_tab[cmd].cmd_flags;
  4025. if (unlikely(func == NULL)) {
  4026. DPRINT(("invalid cmd=%d\n", cmd));
  4027. return -EINVAL;
  4028. }
  4029. DPRINT(("cmd=%s idx=%d narg=0x%x argsz=%lu count=%d\n",
  4030. PFM_CMD_NAME(cmd),
  4031. cmd,
  4032. narg,
  4033. base_sz,
  4034. count));
  4035. /*
  4036. * check if number of arguments matches what the command expects
  4037. */
  4038. if (unlikely((narg == PFM_CMD_ARG_MANY && count <= 0) || (narg > 0 && narg != count)))
  4039. return -EINVAL;
  4040. restart_args:
  4041. sz = xtra_sz + base_sz*count;
  4042. /*
  4043. * limit abuse to min page size
  4044. */
  4045. if (unlikely(sz > PFM_MAX_ARGSIZE)) {
  4046. printk(KERN_ERR "perfmon: [%d] argument too big %lu\n", current->pid, sz);
  4047. return -E2BIG;
  4048. }
  4049. /*
  4050. * allocate default-sized argument buffer
  4051. */
  4052. if (likely(count && args_k == NULL)) {
  4053. args_k = kmalloc(PFM_MAX_ARGSIZE, GFP_KERNEL);
  4054. if (args_k == NULL) return -ENOMEM;
  4055. }
  4056. ret = -EFAULT;
  4057. /*
  4058. * copy arguments
  4059. *
  4060. * assume sz = 0 for command without parameters
  4061. */
  4062. if (sz && copy_from_user(args_k, arg, sz)) {
  4063. DPRINT(("cannot copy_from_user %lu bytes @%p\n", sz, arg));
  4064. goto error_args;
  4065. }
  4066. /*
  4067. * check if command supports extra parameters
  4068. */
  4069. if (completed_args == 0 && getsize) {
  4070. /*
  4071. * get extra parameters size (based on main argument)
  4072. */
  4073. ret = (*getsize)(args_k, &xtra_sz);
  4074. if (ret) goto error_args;
  4075. completed_args = 1;
  4076. DPRINT(("restart_args sz=%lu xtra_sz=%lu\n", sz, xtra_sz));
  4077. /* retry if necessary */
  4078. if (likely(xtra_sz)) goto restart_args;
  4079. }
  4080. if (unlikely((cmd_flags & PFM_CMD_FD) == 0)) goto skip_fd;
  4081. ret = -EBADF;
  4082. file = fget(fd);
  4083. if (unlikely(file == NULL)) {
  4084. DPRINT(("invalid fd %d\n", fd));
  4085. goto error_args;
  4086. }
  4087. if (unlikely(PFM_IS_FILE(file) == 0)) {
  4088. DPRINT(("fd %d not related to perfmon\n", fd));
  4089. goto error_args;
  4090. }
  4091. ctx = (pfm_context_t *)file->private_data;
  4092. if (unlikely(ctx == NULL)) {
  4093. DPRINT(("no context for fd %d\n", fd));
  4094. goto error_args;
  4095. }
  4096. prefetch(&ctx->ctx_state);
  4097. PROTECT_CTX(ctx, flags);
  4098. /*
  4099. * check task is stopped
  4100. */
  4101. ret = pfm_check_task_state(ctx, cmd, flags);
  4102. if (unlikely(ret)) goto abort_locked;
  4103. skip_fd:
  4104. ret = (*func)(ctx, args_k, count, ia64_task_regs(current));
  4105. call_made = 1;
  4106. abort_locked:
  4107. if (likely(ctx)) {
  4108. DPRINT(("context unlocked\n"));
  4109. UNPROTECT_CTX(ctx, flags);
  4110. fput(file);
  4111. }
  4112. /* copy argument back to user, if needed */
  4113. if (call_made && PFM_CMD_RW_ARG(cmd) && copy_to_user(arg, args_k, base_sz*count)) ret = -EFAULT;
  4114. error_args:
  4115. if (args_k) kfree(args_k);
  4116. DPRINT(("cmd=%s ret=%ld\n", PFM_CMD_NAME(cmd), ret));
  4117. return ret;
  4118. }
  4119. static void
  4120. pfm_resume_after_ovfl(pfm_context_t *ctx, unsigned long ovfl_regs, struct pt_regs *regs)
  4121. {
  4122. pfm_buffer_fmt_t *fmt = ctx->ctx_buf_fmt;
  4123. pfm_ovfl_ctrl_t rst_ctrl;
  4124. int state;
  4125. int ret = 0;
  4126. state = ctx->ctx_state;
  4127. /*
  4128. * Unlock sampling buffer and reset index atomically
  4129. * XXX: not really needed when blocking
  4130. */
  4131. if (CTX_HAS_SMPL(ctx)) {
  4132. rst_ctrl.bits.mask_monitoring = 0;
  4133. rst_ctrl.bits.reset_ovfl_pmds = 0;
  4134. if (state == PFM_CTX_LOADED)
  4135. ret = pfm_buf_fmt_restart_active(fmt, current, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
  4136. else
  4137. ret = pfm_buf_fmt_restart(fmt, current, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
  4138. } else {
  4139. rst_ctrl.bits.mask_monitoring = 0;
  4140. rst_ctrl.bits.reset_ovfl_pmds = 1;
  4141. }
  4142. if (ret == 0) {
  4143. if (rst_ctrl.bits.reset_ovfl_pmds) {
  4144. pfm_reset_regs(ctx, &ovfl_regs, PFM_PMD_LONG_RESET);
  4145. }
  4146. if (rst_ctrl.bits.mask_monitoring == 0) {
  4147. DPRINT(("resuming monitoring\n"));
  4148. if (ctx->ctx_state == PFM_CTX_MASKED) pfm_restore_monitoring(current);
  4149. } else {
  4150. DPRINT(("stopping monitoring\n"));
  4151. //pfm_stop_monitoring(current, regs);
  4152. }
  4153. ctx->ctx_state = PFM_CTX_LOADED;
  4154. }
  4155. }
  4156. /*
  4157. * context MUST BE LOCKED when calling
  4158. * can only be called for current
  4159. */
  4160. static void
  4161. pfm_context_force_terminate(pfm_context_t *ctx, struct pt_regs *regs)
  4162. {
  4163. int ret;
  4164. DPRINT(("entering for [%d]\n", current->pid));
  4165. ret = pfm_context_unload(ctx, NULL, 0, regs);
  4166. if (ret) {
  4167. printk(KERN_ERR "pfm_context_force_terminate: [%d] unloaded failed with %d\n", current->pid, ret);
  4168. }
  4169. /*
  4170. * and wakeup controlling task, indicating we are now disconnected
  4171. */
  4172. wake_up_interruptible(&ctx->ctx_zombieq);
  4173. /*
  4174. * given that context is still locked, the controlling
  4175. * task will only get access when we return from
  4176. * pfm_handle_work().
  4177. */
  4178. }
  4179. static int pfm_ovfl_notify_user(pfm_context_t *ctx, unsigned long ovfl_pmds);
  4180. /*
  4181. * pfm_handle_work() can be called with interrupts enabled
  4182. * (TIF_NEED_RESCHED) or disabled. The down_interruptible
  4183. * call may sleep, therefore we must re-enable interrupts
  4184. * to avoid deadlocks. It is safe to do so because this function
  4185. * is called ONLY when returning to user level (PUStk=1), in which case
  4186. * there is no risk of kernel stack overflow due to deep
  4187. * interrupt nesting.
  4188. */
  4189. void
  4190. pfm_handle_work(void)
  4191. {
  4192. pfm_context_t *ctx;
  4193. struct pt_regs *regs;
  4194. unsigned long flags, dummy_flags;
  4195. unsigned long ovfl_regs;
  4196. unsigned int reason;
  4197. int ret;
  4198. ctx = PFM_GET_CTX(current);
  4199. if (ctx == NULL) {
  4200. printk(KERN_ERR "perfmon: [%d] has no PFM context\n", current->pid);
  4201. return;
  4202. }
  4203. PROTECT_CTX(ctx, flags);
  4204. PFM_SET_WORK_PENDING(current, 0);
  4205. pfm_clear_task_notify();
  4206. regs = ia64_task_regs(current);
  4207. /*
  4208. * extract reason for being here and clear
  4209. */
  4210. reason = ctx->ctx_fl_trap_reason;
  4211. ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_NONE;
  4212. ovfl_regs = ctx->ctx_ovfl_regs[0];
  4213. DPRINT(("reason=%d state=%d\n", reason, ctx->ctx_state));
  4214. /*
  4215. * must be done before we check for simple-reset mode
  4216. */
  4217. if (ctx->ctx_fl_going_zombie || ctx->ctx_state == PFM_CTX_ZOMBIE) goto do_zombie;
  4218. //if (CTX_OVFL_NOBLOCK(ctx)) goto skip_blocking;
  4219. if (reason == PFM_TRAP_REASON_RESET) goto skip_blocking;
  4220. /*
  4221. * restore interrupt mask to what it was on entry.
  4222. * Could be enabled/diasbled.
  4223. */
  4224. UNPROTECT_CTX(ctx, flags);
  4225. /*
  4226. * force interrupt enable because of down_interruptible()
  4227. */
  4228. local_irq_enable();
  4229. DPRINT(("before block sleeping\n"));
  4230. /*
  4231. * may go through without blocking on SMP systems
  4232. * if restart has been received already by the time we call down()
  4233. */
  4234. ret = down_interruptible(&ctx->ctx_restart_sem);
  4235. DPRINT(("after block sleeping ret=%d\n", ret));
  4236. /*
  4237. * lock context and mask interrupts again
  4238. * We save flags into a dummy because we may have
  4239. * altered interrupts mask compared to entry in this
  4240. * function.
  4241. */
  4242. PROTECT_CTX(ctx, dummy_flags);
  4243. /*
  4244. * we need to read the ovfl_regs only after wake-up
  4245. * because we may have had pfm_write_pmds() in between
  4246. * and that can changed PMD values and therefore
  4247. * ovfl_regs is reset for these new PMD values.
  4248. */
  4249. ovfl_regs = ctx->ctx_ovfl_regs[0];
  4250. if (ctx->ctx_fl_going_zombie) {
  4251. do_zombie:
  4252. DPRINT(("context is zombie, bailing out\n"));
  4253. pfm_context_force_terminate(ctx, regs);
  4254. goto nothing_to_do;
  4255. }
  4256. /*
  4257. * in case of interruption of down() we don't restart anything
  4258. */
  4259. if (ret < 0) goto nothing_to_do;
  4260. skip_blocking:
  4261. pfm_resume_after_ovfl(ctx, ovfl_regs, regs);
  4262. ctx->ctx_ovfl_regs[0] = 0UL;
  4263. nothing_to_do:
  4264. /*
  4265. * restore flags as they were upon entry
  4266. */
  4267. UNPROTECT_CTX(ctx, flags);
  4268. }
  4269. static int
  4270. pfm_notify_user(pfm_context_t *ctx, pfm_msg_t *msg)
  4271. {
  4272. if (ctx->ctx_state == PFM_CTX_ZOMBIE) {
  4273. DPRINT(("ignoring overflow notification, owner is zombie\n"));
  4274. return 0;
  4275. }
  4276. DPRINT(("waking up somebody\n"));
  4277. if (msg) wake_up_interruptible(&ctx->ctx_msgq_wait);
  4278. /*
  4279. * safe, we are not in intr handler, nor in ctxsw when
  4280. * we come here
  4281. */
  4282. kill_fasync (&ctx->ctx_async_queue, SIGIO, POLL_IN);
  4283. return 0;
  4284. }
  4285. static int
  4286. pfm_ovfl_notify_user(pfm_context_t *ctx, unsigned long ovfl_pmds)
  4287. {
  4288. pfm_msg_t *msg = NULL;
  4289. if (ctx->ctx_fl_no_msg == 0) {
  4290. msg = pfm_get_new_msg(ctx);
  4291. if (msg == NULL) {
  4292. printk(KERN_ERR "perfmon: pfm_ovfl_notify_user no more notification msgs\n");
  4293. return -1;
  4294. }
  4295. msg->pfm_ovfl_msg.msg_type = PFM_MSG_OVFL;
  4296. msg->pfm_ovfl_msg.msg_ctx_fd = ctx->ctx_fd;
  4297. msg->pfm_ovfl_msg.msg_active_set = 0;
  4298. msg->pfm_ovfl_msg.msg_ovfl_pmds[0] = ovfl_pmds;
  4299. msg->pfm_ovfl_msg.msg_ovfl_pmds[1] = 0UL;
  4300. msg->pfm_ovfl_msg.msg_ovfl_pmds[2] = 0UL;
  4301. msg->pfm_ovfl_msg.msg_ovfl_pmds[3] = 0UL;
  4302. msg->pfm_ovfl_msg.msg_tstamp = 0UL;
  4303. }
  4304. DPRINT(("ovfl msg: msg=%p no_msg=%d fd=%d ovfl_pmds=0x%lx\n",
  4305. msg,
  4306. ctx->ctx_fl_no_msg,
  4307. ctx->ctx_fd,
  4308. ovfl_pmds));
  4309. return pfm_notify_user(ctx, msg);
  4310. }
  4311. static int
  4312. pfm_end_notify_user(pfm_context_t *ctx)
  4313. {
  4314. pfm_msg_t *msg;
  4315. msg = pfm_get_new_msg(ctx);
  4316. if (msg == NULL) {
  4317. printk(KERN_ERR "perfmon: pfm_end_notify_user no more notification msgs\n");
  4318. return -1;
  4319. }
  4320. /* no leak */
  4321. memset(msg, 0, sizeof(*msg));
  4322. msg->pfm_end_msg.msg_type = PFM_MSG_END;
  4323. msg->pfm_end_msg.msg_ctx_fd = ctx->ctx_fd;
  4324. msg->pfm_ovfl_msg.msg_tstamp = 0UL;
  4325. DPRINT(("end msg: msg=%p no_msg=%d ctx_fd=%d\n",
  4326. msg,
  4327. ctx->ctx_fl_no_msg,
  4328. ctx->ctx_fd));
  4329. return pfm_notify_user(ctx, msg);
  4330. }
  4331. /*
  4332. * main overflow processing routine.
  4333. * it can be called from the interrupt path or explicitely during the context switch code
  4334. */
  4335. static void
  4336. pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, struct pt_regs *regs)
  4337. {
  4338. pfm_ovfl_arg_t *ovfl_arg;
  4339. unsigned long mask;
  4340. unsigned long old_val, ovfl_val, new_val;
  4341. unsigned long ovfl_notify = 0UL, ovfl_pmds = 0UL, smpl_pmds = 0UL, reset_pmds;
  4342. unsigned long tstamp;
  4343. pfm_ovfl_ctrl_t ovfl_ctrl;
  4344. unsigned int i, has_smpl;
  4345. int must_notify = 0;
  4346. if (unlikely(ctx->ctx_state == PFM_CTX_ZOMBIE)) goto stop_monitoring;
  4347. /*
  4348. * sanity test. Should never happen
  4349. */
  4350. if (unlikely((pmc0 & 0x1) == 0)) goto sanity_check;
  4351. tstamp = ia64_get_itc();
  4352. mask = pmc0 >> PMU_FIRST_COUNTER;
  4353. ovfl_val = pmu_conf->ovfl_val;
  4354. has_smpl = CTX_HAS_SMPL(ctx);
  4355. DPRINT_ovfl(("pmc0=0x%lx pid=%d iip=0x%lx, %s "
  4356. "used_pmds=0x%lx\n",
  4357. pmc0,
  4358. task ? task->pid: -1,
  4359. (regs ? regs->cr_iip : 0),
  4360. CTX_OVFL_NOBLOCK(ctx) ? "nonblocking" : "blocking",
  4361. ctx->ctx_used_pmds[0]));
  4362. /*
  4363. * first we update the virtual counters
  4364. * assume there was a prior ia64_srlz_d() issued
  4365. */
  4366. for (i = PMU_FIRST_COUNTER; mask ; i++, mask >>= 1) {
  4367. /* skip pmd which did not overflow */
  4368. if ((mask & 0x1) == 0) continue;
  4369. /*
  4370. * Note that the pmd is not necessarily 0 at this point as qualified events
  4371. * may have happened before the PMU was frozen. The residual count is not
  4372. * taken into consideration here but will be with any read of the pmd via
  4373. * pfm_read_pmds().
  4374. */
  4375. old_val = new_val = ctx->ctx_pmds[i].val;
  4376. new_val += 1 + ovfl_val;
  4377. ctx->ctx_pmds[i].val = new_val;
  4378. /*
  4379. * check for overflow condition
  4380. */
  4381. if (likely(old_val > new_val)) {
  4382. ovfl_pmds |= 1UL << i;
  4383. if (PMC_OVFL_NOTIFY(ctx, i)) ovfl_notify |= 1UL << i;
  4384. }
  4385. DPRINT_ovfl(("ctx_pmd[%d].val=0x%lx old_val=0x%lx pmd=0x%lx ovfl_pmds=0x%lx ovfl_notify=0x%lx\n",
  4386. i,
  4387. new_val,
  4388. old_val,
  4389. ia64_get_pmd(i) & ovfl_val,
  4390. ovfl_pmds,
  4391. ovfl_notify));
  4392. }
  4393. /*
  4394. * there was no 64-bit overflow, nothing else to do
  4395. */
  4396. if (ovfl_pmds == 0UL) return;
  4397. /*
  4398. * reset all control bits
  4399. */
  4400. ovfl_ctrl.val = 0;
  4401. reset_pmds = 0UL;
  4402. /*
  4403. * if a sampling format module exists, then we "cache" the overflow by
  4404. * calling the module's handler() routine.
  4405. */
  4406. if (has_smpl) {
  4407. unsigned long start_cycles, end_cycles;
  4408. unsigned long pmd_mask;
  4409. int j, k, ret = 0;
  4410. int this_cpu = smp_processor_id();
  4411. pmd_mask = ovfl_pmds >> PMU_FIRST_COUNTER;
  4412. ovfl_arg = &ctx->ctx_ovfl_arg;
  4413. prefetch(ctx->ctx_smpl_hdr);
  4414. for(i=PMU_FIRST_COUNTER; pmd_mask && ret == 0; i++, pmd_mask >>=1) {
  4415. mask = 1UL << i;
  4416. if ((pmd_mask & 0x1) == 0) continue;
  4417. ovfl_arg->ovfl_pmd = (unsigned char )i;
  4418. ovfl_arg->ovfl_notify = ovfl_notify & mask ? 1 : 0;
  4419. ovfl_arg->active_set = 0;
  4420. ovfl_arg->ovfl_ctrl.val = 0; /* module must fill in all fields */
  4421. ovfl_arg->smpl_pmds[0] = smpl_pmds = ctx->ctx_pmds[i].smpl_pmds[0];
  4422. ovfl_arg->pmd_value = ctx->ctx_pmds[i].val;
  4423. ovfl_arg->pmd_last_reset = ctx->ctx_pmds[i].lval;
  4424. ovfl_arg->pmd_eventid = ctx->ctx_pmds[i].eventid;
  4425. /*
  4426. * copy values of pmds of interest. Sampling format may copy them
  4427. * into sampling buffer.
  4428. */
  4429. if (smpl_pmds) {
  4430. for(j=0, k=0; smpl_pmds; j++, smpl_pmds >>=1) {
  4431. if ((smpl_pmds & 0x1) == 0) continue;
  4432. ovfl_arg->smpl_pmds_values[k++] = PMD_IS_COUNTING(j) ? pfm_read_soft_counter(ctx, j) : ia64_get_pmd(j);
  4433. DPRINT_ovfl(("smpl_pmd[%d]=pmd%u=0x%lx\n", k-1, j, ovfl_arg->smpl_pmds_values[k-1]));
  4434. }
  4435. }
  4436. pfm_stats[this_cpu].pfm_smpl_handler_calls++;
  4437. start_cycles = ia64_get_itc();
  4438. /*
  4439. * call custom buffer format record (handler) routine
  4440. */
  4441. ret = (*ctx->ctx_buf_fmt->fmt_handler)(task, ctx->ctx_smpl_hdr, ovfl_arg, regs, tstamp);
  4442. end_cycles = ia64_get_itc();
  4443. /*
  4444. * For those controls, we take the union because they have
  4445. * an all or nothing behavior.
  4446. */
  4447. ovfl_ctrl.bits.notify_user |= ovfl_arg->ovfl_ctrl.bits.notify_user;
  4448. ovfl_ctrl.bits.block_task |= ovfl_arg->ovfl_ctrl.bits.block_task;
  4449. ovfl_ctrl.bits.mask_monitoring |= ovfl_arg->ovfl_ctrl.bits.mask_monitoring;
  4450. /*
  4451. * build the bitmask of pmds to reset now
  4452. */
  4453. if (ovfl_arg->ovfl_ctrl.bits.reset_ovfl_pmds) reset_pmds |= mask;
  4454. pfm_stats[this_cpu].pfm_smpl_handler_cycles += end_cycles - start_cycles;
  4455. }
  4456. /*
  4457. * when the module cannot handle the rest of the overflows, we abort right here
  4458. */
  4459. if (ret && pmd_mask) {
  4460. DPRINT(("handler aborts leftover ovfl_pmds=0x%lx\n",
  4461. pmd_mask<<PMU_FIRST_COUNTER));
  4462. }
  4463. /*
  4464. * remove the pmds we reset now from the set of pmds to reset in pfm_restart()
  4465. */
  4466. ovfl_pmds &= ~reset_pmds;
  4467. } else {
  4468. /*
  4469. * when no sampling module is used, then the default
  4470. * is to notify on overflow if requested by user
  4471. */
  4472. ovfl_ctrl.bits.notify_user = ovfl_notify ? 1 : 0;
  4473. ovfl_ctrl.bits.block_task = ovfl_notify ? 1 : 0;
  4474. ovfl_ctrl.bits.mask_monitoring = ovfl_notify ? 1 : 0; /* XXX: change for saturation */
  4475. ovfl_ctrl.bits.reset_ovfl_pmds = ovfl_notify ? 0 : 1;
  4476. /*
  4477. * if needed, we reset all overflowed pmds
  4478. */
  4479. if (ovfl_notify == 0) reset_pmds = ovfl_pmds;
  4480. }
  4481. DPRINT_ovfl(("ovfl_pmds=0x%lx reset_pmds=0x%lx\n", ovfl_pmds, reset_pmds));
  4482. /*
  4483. * reset the requested PMD registers using the short reset values
  4484. */
  4485. if (reset_pmds) {
  4486. unsigned long bm = reset_pmds;
  4487. pfm_reset_regs(ctx, &bm, PFM_PMD_SHORT_RESET);
  4488. }
  4489. if (ovfl_notify && ovfl_ctrl.bits.notify_user) {
  4490. /*
  4491. * keep track of what to reset when unblocking
  4492. */
  4493. ctx->ctx_ovfl_regs[0] = ovfl_pmds;
  4494. /*
  4495. * check for blocking context
  4496. */
  4497. if (CTX_OVFL_NOBLOCK(ctx) == 0 && ovfl_ctrl.bits.block_task) {
  4498. ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_BLOCK;
  4499. /*
  4500. * set the perfmon specific checking pending work for the task
  4501. */
  4502. PFM_SET_WORK_PENDING(task, 1);
  4503. /*
  4504. * when coming from ctxsw, current still points to the
  4505. * previous task, therefore we must work with task and not current.
  4506. */
  4507. pfm_set_task_notify(task);
  4508. }
  4509. /*
  4510. * defer until state is changed (shorten spin window). the context is locked
  4511. * anyway, so the signal receiver would come spin for nothing.
  4512. */
  4513. must_notify = 1;
  4514. }
  4515. DPRINT_ovfl(("owner [%d] pending=%ld reason=%u ovfl_pmds=0x%lx ovfl_notify=0x%lx masked=%d\n",
  4516. GET_PMU_OWNER() ? GET_PMU_OWNER()->pid : -1,
  4517. PFM_GET_WORK_PENDING(task),
  4518. ctx->ctx_fl_trap_reason,
  4519. ovfl_pmds,
  4520. ovfl_notify,
  4521. ovfl_ctrl.bits.mask_monitoring ? 1 : 0));
  4522. /*
  4523. * in case monitoring must be stopped, we toggle the psr bits
  4524. */
  4525. if (ovfl_ctrl.bits.mask_monitoring) {
  4526. pfm_mask_monitoring(task);
  4527. ctx->ctx_state = PFM_CTX_MASKED;
  4528. ctx->ctx_fl_can_restart = 1;
  4529. }
  4530. /*
  4531. * send notification now
  4532. */
  4533. if (must_notify) pfm_ovfl_notify_user(ctx, ovfl_notify);
  4534. return;
  4535. sanity_check:
  4536. printk(KERN_ERR "perfmon: CPU%d overflow handler [%d] pmc0=0x%lx\n",
  4537. smp_processor_id(),
  4538. task ? task->pid : -1,
  4539. pmc0);
  4540. return;
  4541. stop_monitoring:
  4542. /*
  4543. * in SMP, zombie context is never restored but reclaimed in pfm_load_regs().
  4544. * Moreover, zombies are also reclaimed in pfm_save_regs(). Therefore we can
  4545. * come here as zombie only if the task is the current task. In which case, we
  4546. * can access the PMU hardware directly.
  4547. *
  4548. * Note that zombies do have PM_VALID set. So here we do the minimal.
  4549. *
  4550. * In case the context was zombified it could not be reclaimed at the time
  4551. * the monitoring program exited. At this point, the PMU reservation has been
  4552. * returned, the sampiing buffer has been freed. We must convert this call
  4553. * into a spurious interrupt. However, we must also avoid infinite overflows
  4554. * by stopping monitoring for this task. We can only come here for a per-task
  4555. * context. All we need to do is to stop monitoring using the psr bits which
  4556. * are always task private. By re-enabling secure montioring, we ensure that
  4557. * the monitored task will not be able to re-activate monitoring.
  4558. * The task will eventually be context switched out, at which point the context
  4559. * will be reclaimed (that includes releasing ownership of the PMU).
  4560. *
  4561. * So there might be a window of time where the number of per-task session is zero
  4562. * yet one PMU might have a owner and get at most one overflow interrupt for a zombie
  4563. * context. This is safe because if a per-task session comes in, it will push this one
  4564. * out and by the virtue on pfm_save_regs(), this one will disappear. If a system wide
  4565. * session is force on that CPU, given that we use task pinning, pfm_save_regs() will
  4566. * also push our zombie context out.
  4567. *
  4568. * Overall pretty hairy stuff....
  4569. */
  4570. DPRINT(("ctx is zombie for [%d], converted to spurious\n", task ? task->pid: -1));
  4571. pfm_clear_psr_up();
  4572. ia64_psr(regs)->up = 0;
  4573. ia64_psr(regs)->sp = 1;
  4574. return;
  4575. }
  4576. static int
  4577. pfm_do_interrupt_handler(int irq, void *arg, struct pt_regs *regs)
  4578. {
  4579. struct task_struct *task;
  4580. pfm_context_t *ctx;
  4581. unsigned long flags;
  4582. u64 pmc0;
  4583. int this_cpu = smp_processor_id();
  4584. int retval = 0;
  4585. pfm_stats[this_cpu].pfm_ovfl_intr_count++;
  4586. /*
  4587. * srlz.d done before arriving here
  4588. */
  4589. pmc0 = ia64_get_pmc(0);
  4590. task = GET_PMU_OWNER();
  4591. ctx = GET_PMU_CTX();
  4592. /*
  4593. * if we have some pending bits set
  4594. * assumes : if any PMC0.bit[63-1] is set, then PMC0.fr = 1
  4595. */
  4596. if (PMC0_HAS_OVFL(pmc0) && task) {
  4597. /*
  4598. * we assume that pmc0.fr is always set here
  4599. */
  4600. /* sanity check */
  4601. if (!ctx) goto report_spurious1;
  4602. if (ctx->ctx_fl_system == 0 && (task->thread.flags & IA64_THREAD_PM_VALID) == 0)
  4603. goto report_spurious2;
  4604. PROTECT_CTX_NOPRINT(ctx, flags);
  4605. pfm_overflow_handler(task, ctx, pmc0, regs);
  4606. UNPROTECT_CTX_NOPRINT(ctx, flags);
  4607. } else {
  4608. pfm_stats[this_cpu].pfm_spurious_ovfl_intr_count++;
  4609. retval = -1;
  4610. }
  4611. /*
  4612. * keep it unfrozen at all times
  4613. */
  4614. pfm_unfreeze_pmu();
  4615. return retval;
  4616. report_spurious1:
  4617. printk(KERN_INFO "perfmon: spurious overflow interrupt on CPU%d: process %d has no PFM context\n",
  4618. this_cpu, task->pid);
  4619. pfm_unfreeze_pmu();
  4620. return -1;
  4621. report_spurious2:
  4622. printk(KERN_INFO "perfmon: spurious overflow interrupt on CPU%d: process %d, invalid flag\n",
  4623. this_cpu,
  4624. task->pid);
  4625. pfm_unfreeze_pmu();
  4626. return -1;
  4627. }
  4628. static irqreturn_t
  4629. pfm_interrupt_handler(int irq, void *arg, struct pt_regs *regs)
  4630. {
  4631. unsigned long start_cycles, total_cycles;
  4632. unsigned long min, max;
  4633. int this_cpu;
  4634. int ret;
  4635. this_cpu = get_cpu();
  4636. if (likely(!pfm_alt_intr_handler)) {
  4637. min = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min;
  4638. max = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max;
  4639. start_cycles = ia64_get_itc();
  4640. ret = pfm_do_interrupt_handler(irq, arg, regs);
  4641. total_cycles = ia64_get_itc();
  4642. /*
  4643. * don't measure spurious interrupts
  4644. */
  4645. if (likely(ret == 0)) {
  4646. total_cycles -= start_cycles;
  4647. if (total_cycles < min) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min = total_cycles;
  4648. if (total_cycles > max) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max = total_cycles;
  4649. pfm_stats[this_cpu].pfm_ovfl_intr_cycles += total_cycles;
  4650. }
  4651. }
  4652. else {
  4653. (*pfm_alt_intr_handler->handler)(irq, arg, regs);
  4654. }
  4655. put_cpu_no_resched();
  4656. return IRQ_HANDLED;
  4657. }
  4658. /*
  4659. * /proc/perfmon interface, for debug only
  4660. */
  4661. #define PFM_PROC_SHOW_HEADER ((void *)NR_CPUS+1)
  4662. static void *
  4663. pfm_proc_start(struct seq_file *m, loff_t *pos)
  4664. {
  4665. if (*pos == 0) {
  4666. return PFM_PROC_SHOW_HEADER;
  4667. }
  4668. while (*pos <= NR_CPUS) {
  4669. if (cpu_online(*pos - 1)) {
  4670. return (void *)*pos;
  4671. }
  4672. ++*pos;
  4673. }
  4674. return NULL;
  4675. }
  4676. static void *
  4677. pfm_proc_next(struct seq_file *m, void *v, loff_t *pos)
  4678. {
  4679. ++*pos;
  4680. return pfm_proc_start(m, pos);
  4681. }
  4682. static void
  4683. pfm_proc_stop(struct seq_file *m, void *v)
  4684. {
  4685. }
  4686. static void
  4687. pfm_proc_show_header(struct seq_file *m)
  4688. {
  4689. struct list_head * pos;
  4690. pfm_buffer_fmt_t * entry;
  4691. unsigned long flags;
  4692. seq_printf(m,
  4693. "perfmon version : %u.%u\n"
  4694. "model : %s\n"
  4695. "fastctxsw : %s\n"
  4696. "expert mode : %s\n"
  4697. "ovfl_mask : 0x%lx\n"
  4698. "PMU flags : 0x%x\n",
  4699. PFM_VERSION_MAJ, PFM_VERSION_MIN,
  4700. pmu_conf->pmu_name,
  4701. pfm_sysctl.fastctxsw > 0 ? "Yes": "No",
  4702. pfm_sysctl.expert_mode > 0 ? "Yes": "No",
  4703. pmu_conf->ovfl_val,
  4704. pmu_conf->flags);
  4705. LOCK_PFS(flags);
  4706. seq_printf(m,
  4707. "proc_sessions : %u\n"
  4708. "sys_sessions : %u\n"
  4709. "sys_use_dbregs : %u\n"
  4710. "ptrace_use_dbregs : %u\n",
  4711. pfm_sessions.pfs_task_sessions,
  4712. pfm_sessions.pfs_sys_sessions,
  4713. pfm_sessions.pfs_sys_use_dbregs,
  4714. pfm_sessions.pfs_ptrace_use_dbregs);
  4715. UNLOCK_PFS(flags);
  4716. spin_lock(&pfm_buffer_fmt_lock);
  4717. list_for_each(pos, &pfm_buffer_fmt_list) {
  4718. entry = list_entry(pos, pfm_buffer_fmt_t, fmt_list);
  4719. seq_printf(m, "format : %02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x %s\n",
  4720. entry->fmt_uuid[0],
  4721. entry->fmt_uuid[1],
  4722. entry->fmt_uuid[2],
  4723. entry->fmt_uuid[3],
  4724. entry->fmt_uuid[4],
  4725. entry->fmt_uuid[5],
  4726. entry->fmt_uuid[6],
  4727. entry->fmt_uuid[7],
  4728. entry->fmt_uuid[8],
  4729. entry->fmt_uuid[9],
  4730. entry->fmt_uuid[10],
  4731. entry->fmt_uuid[11],
  4732. entry->fmt_uuid[12],
  4733. entry->fmt_uuid[13],
  4734. entry->fmt_uuid[14],
  4735. entry->fmt_uuid[15],
  4736. entry->fmt_name);
  4737. }
  4738. spin_unlock(&pfm_buffer_fmt_lock);
  4739. }
  4740. static int
  4741. pfm_proc_show(struct seq_file *m, void *v)
  4742. {
  4743. unsigned long psr;
  4744. unsigned int i;
  4745. int cpu;
  4746. if (v == PFM_PROC_SHOW_HEADER) {
  4747. pfm_proc_show_header(m);
  4748. return 0;
  4749. }
  4750. /* show info for CPU (v - 1) */
  4751. cpu = (long)v - 1;
  4752. seq_printf(m,
  4753. "CPU%-2d overflow intrs : %lu\n"
  4754. "CPU%-2d overflow cycles : %lu\n"
  4755. "CPU%-2d overflow min : %lu\n"
  4756. "CPU%-2d overflow max : %lu\n"
  4757. "CPU%-2d smpl handler calls : %lu\n"
  4758. "CPU%-2d smpl handler cycles : %lu\n"
  4759. "CPU%-2d spurious intrs : %lu\n"
  4760. "CPU%-2d replay intrs : %lu\n"
  4761. "CPU%-2d syst_wide : %d\n"
  4762. "CPU%-2d dcr_pp : %d\n"
  4763. "CPU%-2d exclude idle : %d\n"
  4764. "CPU%-2d owner : %d\n"
  4765. "CPU%-2d context : %p\n"
  4766. "CPU%-2d activations : %lu\n",
  4767. cpu, pfm_stats[cpu].pfm_ovfl_intr_count,
  4768. cpu, pfm_stats[cpu].pfm_ovfl_intr_cycles,
  4769. cpu, pfm_stats[cpu].pfm_ovfl_intr_cycles_min,
  4770. cpu, pfm_stats[cpu].pfm_ovfl_intr_cycles_max,
  4771. cpu, pfm_stats[cpu].pfm_smpl_handler_calls,
  4772. cpu, pfm_stats[cpu].pfm_smpl_handler_cycles,
  4773. cpu, pfm_stats[cpu].pfm_spurious_ovfl_intr_count,
  4774. cpu, pfm_stats[cpu].pfm_replay_ovfl_intr_count,
  4775. cpu, pfm_get_cpu_data(pfm_syst_info, cpu) & PFM_CPUINFO_SYST_WIDE ? 1 : 0,
  4776. cpu, pfm_get_cpu_data(pfm_syst_info, cpu) & PFM_CPUINFO_DCR_PP ? 1 : 0,
  4777. cpu, pfm_get_cpu_data(pfm_syst_info, cpu) & PFM_CPUINFO_EXCL_IDLE ? 1 : 0,
  4778. cpu, pfm_get_cpu_data(pmu_owner, cpu) ? pfm_get_cpu_data(pmu_owner, cpu)->pid: -1,
  4779. cpu, pfm_get_cpu_data(pmu_ctx, cpu),
  4780. cpu, pfm_get_cpu_data(pmu_activation_number, cpu));
  4781. if (num_online_cpus() == 1 && pfm_sysctl.debug > 0) {
  4782. psr = pfm_get_psr();
  4783. ia64_srlz_d();
  4784. seq_printf(m,
  4785. "CPU%-2d psr : 0x%lx\n"
  4786. "CPU%-2d pmc0 : 0x%lx\n",
  4787. cpu, psr,
  4788. cpu, ia64_get_pmc(0));
  4789. for (i=0; PMC_IS_LAST(i) == 0; i++) {
  4790. if (PMC_IS_COUNTING(i) == 0) continue;
  4791. seq_printf(m,
  4792. "CPU%-2d pmc%u : 0x%lx\n"
  4793. "CPU%-2d pmd%u : 0x%lx\n",
  4794. cpu, i, ia64_get_pmc(i),
  4795. cpu, i, ia64_get_pmd(i));
  4796. }
  4797. }
  4798. return 0;
  4799. }
  4800. struct seq_operations pfm_seq_ops = {
  4801. .start = pfm_proc_start,
  4802. .next = pfm_proc_next,
  4803. .stop = pfm_proc_stop,
  4804. .show = pfm_proc_show
  4805. };
  4806. static int
  4807. pfm_proc_open(struct inode *inode, struct file *file)
  4808. {
  4809. return seq_open(file, &pfm_seq_ops);
  4810. }
  4811. /*
  4812. * we come here as soon as local_cpu_data->pfm_syst_wide is set. this happens
  4813. * during pfm_enable() hence before pfm_start(). We cannot assume monitoring
  4814. * is active or inactive based on mode. We must rely on the value in
  4815. * local_cpu_data->pfm_syst_info
  4816. */
  4817. void
  4818. pfm_syst_wide_update_task(struct task_struct *task, unsigned long info, int is_ctxswin)
  4819. {
  4820. struct pt_regs *regs;
  4821. unsigned long dcr;
  4822. unsigned long dcr_pp;
  4823. dcr_pp = info & PFM_CPUINFO_DCR_PP ? 1 : 0;
  4824. /*
  4825. * pid 0 is guaranteed to be the idle task. There is one such task with pid 0
  4826. * on every CPU, so we can rely on the pid to identify the idle task.
  4827. */
  4828. if ((info & PFM_CPUINFO_EXCL_IDLE) == 0 || task->pid) {
  4829. regs = ia64_task_regs(task);
  4830. ia64_psr(regs)->pp = is_ctxswin ? dcr_pp : 0;
  4831. return;
  4832. }
  4833. /*
  4834. * if monitoring has started
  4835. */
  4836. if (dcr_pp) {
  4837. dcr = ia64_getreg(_IA64_REG_CR_DCR);
  4838. /*
  4839. * context switching in?
  4840. */
  4841. if (is_ctxswin) {
  4842. /* mask monitoring for the idle task */
  4843. ia64_setreg(_IA64_REG_CR_DCR, dcr & ~IA64_DCR_PP);
  4844. pfm_clear_psr_pp();
  4845. ia64_srlz_i();
  4846. return;
  4847. }
  4848. /*
  4849. * context switching out
  4850. * restore monitoring for next task
  4851. *
  4852. * Due to inlining this odd if-then-else construction generates
  4853. * better code.
  4854. */
  4855. ia64_setreg(_IA64_REG_CR_DCR, dcr |IA64_DCR_PP);
  4856. pfm_set_psr_pp();
  4857. ia64_srlz_i();
  4858. }
  4859. }
  4860. #ifdef CONFIG_SMP
  4861. static void
  4862. pfm_force_cleanup(pfm_context_t *ctx, struct pt_regs *regs)
  4863. {
  4864. struct task_struct *task = ctx->ctx_task;
  4865. ia64_psr(regs)->up = 0;
  4866. ia64_psr(regs)->sp = 1;
  4867. if (GET_PMU_OWNER() == task) {
  4868. DPRINT(("cleared ownership for [%d]\n", ctx->ctx_task->pid));
  4869. SET_PMU_OWNER(NULL, NULL);
  4870. }
  4871. /*
  4872. * disconnect the task from the context and vice-versa
  4873. */
  4874. PFM_SET_WORK_PENDING(task, 0);
  4875. task->thread.pfm_context = NULL;
  4876. task->thread.flags &= ~IA64_THREAD_PM_VALID;
  4877. DPRINT(("force cleanup for [%d]\n", task->pid));
  4878. }
  4879. /*
  4880. * in 2.6, interrupts are masked when we come here and the runqueue lock is held
  4881. */
  4882. void
  4883. pfm_save_regs(struct task_struct *task)
  4884. {
  4885. pfm_context_t *ctx;
  4886. struct thread_struct *t;
  4887. unsigned long flags;
  4888. u64 psr;
  4889. ctx = PFM_GET_CTX(task);
  4890. if (ctx == NULL) return;
  4891. t = &task->thread;
  4892. /*
  4893. * we always come here with interrupts ALREADY disabled by
  4894. * the scheduler. So we simply need to protect against concurrent
  4895. * access, not CPU concurrency.
  4896. */
  4897. flags = pfm_protect_ctx_ctxsw(ctx);
  4898. if (ctx->ctx_state == PFM_CTX_ZOMBIE) {
  4899. struct pt_regs *regs = ia64_task_regs(task);
  4900. pfm_clear_psr_up();
  4901. pfm_force_cleanup(ctx, regs);
  4902. BUG_ON(ctx->ctx_smpl_hdr);
  4903. pfm_unprotect_ctx_ctxsw(ctx, flags);
  4904. pfm_context_free(ctx);
  4905. return;
  4906. }
  4907. /*
  4908. * save current PSR: needed because we modify it
  4909. */
  4910. ia64_srlz_d();
  4911. psr = pfm_get_psr();
  4912. BUG_ON(psr & (IA64_PSR_I));
  4913. /*
  4914. * stop monitoring:
  4915. * This is the last instruction which may generate an overflow
  4916. *
  4917. * We do not need to set psr.sp because, it is irrelevant in kernel.
  4918. * It will be restored from ipsr when going back to user level
  4919. */
  4920. pfm_clear_psr_up();
  4921. /*
  4922. * keep a copy of psr.up (for reload)
  4923. */
  4924. ctx->ctx_saved_psr_up = psr & IA64_PSR_UP;
  4925. /*
  4926. * release ownership of this PMU.
  4927. * PM interrupts are masked, so nothing
  4928. * can happen.
  4929. */
  4930. SET_PMU_OWNER(NULL, NULL);
  4931. /*
  4932. * we systematically save the PMD as we have no
  4933. * guarantee we will be schedule at that same
  4934. * CPU again.
  4935. */
  4936. pfm_save_pmds(t->pmds, ctx->ctx_used_pmds[0]);
  4937. /*
  4938. * save pmc0 ia64_srlz_d() done in pfm_save_pmds()
  4939. * we will need it on the restore path to check
  4940. * for pending overflow.
  4941. */
  4942. t->pmcs[0] = ia64_get_pmc(0);
  4943. /*
  4944. * unfreeze PMU if had pending overflows
  4945. */
  4946. if (t->pmcs[0] & ~0x1UL) pfm_unfreeze_pmu();
  4947. /*
  4948. * finally, allow context access.
  4949. * interrupts will still be masked after this call.
  4950. */
  4951. pfm_unprotect_ctx_ctxsw(ctx, flags);
  4952. }
  4953. #else /* !CONFIG_SMP */
  4954. void
  4955. pfm_save_regs(struct task_struct *task)
  4956. {
  4957. pfm_context_t *ctx;
  4958. u64 psr;
  4959. ctx = PFM_GET_CTX(task);
  4960. if (ctx == NULL) return;
  4961. /*
  4962. * save current PSR: needed because we modify it
  4963. */
  4964. psr = pfm_get_psr();
  4965. BUG_ON(psr & (IA64_PSR_I));
  4966. /*
  4967. * stop monitoring:
  4968. * This is the last instruction which may generate an overflow
  4969. *
  4970. * We do not need to set psr.sp because, it is irrelevant in kernel.
  4971. * It will be restored from ipsr when going back to user level
  4972. */
  4973. pfm_clear_psr_up();
  4974. /*
  4975. * keep a copy of psr.up (for reload)
  4976. */
  4977. ctx->ctx_saved_psr_up = psr & IA64_PSR_UP;
  4978. }
  4979. static void
  4980. pfm_lazy_save_regs (struct task_struct *task)
  4981. {
  4982. pfm_context_t *ctx;
  4983. struct thread_struct *t;
  4984. unsigned long flags;
  4985. { u64 psr = pfm_get_psr();
  4986. BUG_ON(psr & IA64_PSR_UP);
  4987. }
  4988. ctx = PFM_GET_CTX(task);
  4989. t = &task->thread;
  4990. /*
  4991. * we need to mask PMU overflow here to
  4992. * make sure that we maintain pmc0 until
  4993. * we save it. overflow interrupts are
  4994. * treated as spurious if there is no
  4995. * owner.
  4996. *
  4997. * XXX: I don't think this is necessary
  4998. */
  4999. PROTECT_CTX(ctx,flags);
  5000. /*
  5001. * release ownership of this PMU.
  5002. * must be done before we save the registers.
  5003. *
  5004. * after this call any PMU interrupt is treated
  5005. * as spurious.
  5006. */
  5007. SET_PMU_OWNER(NULL, NULL);
  5008. /*
  5009. * save all the pmds we use
  5010. */
  5011. pfm_save_pmds(t->pmds, ctx->ctx_used_pmds[0]);
  5012. /*
  5013. * save pmc0 ia64_srlz_d() done in pfm_save_pmds()
  5014. * it is needed to check for pended overflow
  5015. * on the restore path
  5016. */
  5017. t->pmcs[0] = ia64_get_pmc(0);
  5018. /*
  5019. * unfreeze PMU if had pending overflows
  5020. */
  5021. if (t->pmcs[0] & ~0x1UL) pfm_unfreeze_pmu();
  5022. /*
  5023. * now get can unmask PMU interrupts, they will
  5024. * be treated as purely spurious and we will not
  5025. * lose any information
  5026. */
  5027. UNPROTECT_CTX(ctx,flags);
  5028. }
  5029. #endif /* CONFIG_SMP */
  5030. #ifdef CONFIG_SMP
  5031. /*
  5032. * in 2.6, interrupts are masked when we come here and the runqueue lock is held
  5033. */
  5034. void
  5035. pfm_load_regs (struct task_struct *task)
  5036. {
  5037. pfm_context_t *ctx;
  5038. struct thread_struct *t;
  5039. unsigned long pmc_mask = 0UL, pmd_mask = 0UL;
  5040. unsigned long flags;
  5041. u64 psr, psr_up;
  5042. int need_irq_resend;
  5043. ctx = PFM_GET_CTX(task);
  5044. if (unlikely(ctx == NULL)) return;
  5045. BUG_ON(GET_PMU_OWNER());
  5046. t = &task->thread;
  5047. /*
  5048. * possible on unload
  5049. */
  5050. if (unlikely((t->flags & IA64_THREAD_PM_VALID) == 0)) return;
  5051. /*
  5052. * we always come here with interrupts ALREADY disabled by
  5053. * the scheduler. So we simply need to protect against concurrent
  5054. * access, not CPU concurrency.
  5055. */
  5056. flags = pfm_protect_ctx_ctxsw(ctx);
  5057. psr = pfm_get_psr();
  5058. need_irq_resend = pmu_conf->flags & PFM_PMU_IRQ_RESEND;
  5059. BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
  5060. BUG_ON(psr & IA64_PSR_I);
  5061. if (unlikely(ctx->ctx_state == PFM_CTX_ZOMBIE)) {
  5062. struct pt_regs *regs = ia64_task_regs(task);
  5063. BUG_ON(ctx->ctx_smpl_hdr);
  5064. pfm_force_cleanup(ctx, regs);
  5065. pfm_unprotect_ctx_ctxsw(ctx, flags);
  5066. /*
  5067. * this one (kmalloc'ed) is fine with interrupts disabled
  5068. */
  5069. pfm_context_free(ctx);
  5070. return;
  5071. }
  5072. /*
  5073. * we restore ALL the debug registers to avoid picking up
  5074. * stale state.
  5075. */
  5076. if (ctx->ctx_fl_using_dbreg) {
  5077. pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
  5078. pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
  5079. }
  5080. /*
  5081. * retrieve saved psr.up
  5082. */
  5083. psr_up = ctx->ctx_saved_psr_up;
  5084. /*
  5085. * if we were the last user of the PMU on that CPU,
  5086. * then nothing to do except restore psr
  5087. */
  5088. if (GET_LAST_CPU(ctx) == smp_processor_id() && ctx->ctx_last_activation == GET_ACTIVATION()) {
  5089. /*
  5090. * retrieve partial reload masks (due to user modifications)
  5091. */
  5092. pmc_mask = ctx->ctx_reload_pmcs[0];
  5093. pmd_mask = ctx->ctx_reload_pmds[0];
  5094. } else {
  5095. /*
  5096. * To avoid leaking information to the user level when psr.sp=0,
  5097. * we must reload ALL implemented pmds (even the ones we don't use).
  5098. * In the kernel we only allow PFM_READ_PMDS on registers which
  5099. * we initialized or requested (sampling) so there is no risk there.
  5100. */
  5101. pmd_mask = pfm_sysctl.fastctxsw ? ctx->ctx_used_pmds[0] : ctx->ctx_all_pmds[0];
  5102. /*
  5103. * ALL accessible PMCs are systematically reloaded, unused registers
  5104. * get their default (from pfm_reset_pmu_state()) values to avoid picking
  5105. * up stale configuration.
  5106. *
  5107. * PMC0 is never in the mask. It is always restored separately.
  5108. */
  5109. pmc_mask = ctx->ctx_all_pmcs[0];
  5110. }
  5111. /*
  5112. * when context is MASKED, we will restore PMC with plm=0
  5113. * and PMD with stale information, but that's ok, nothing
  5114. * will be captured.
  5115. *
  5116. * XXX: optimize here
  5117. */
  5118. if (pmd_mask) pfm_restore_pmds(t->pmds, pmd_mask);
  5119. if (pmc_mask) pfm_restore_pmcs(t->pmcs, pmc_mask);
  5120. /*
  5121. * check for pending overflow at the time the state
  5122. * was saved.
  5123. */
  5124. if (unlikely(PMC0_HAS_OVFL(t->pmcs[0]))) {
  5125. /*
  5126. * reload pmc0 with the overflow information
  5127. * On McKinley PMU, this will trigger a PMU interrupt
  5128. */
  5129. ia64_set_pmc(0, t->pmcs[0]);
  5130. ia64_srlz_d();
  5131. t->pmcs[0] = 0UL;
  5132. /*
  5133. * will replay the PMU interrupt
  5134. */
  5135. if (need_irq_resend) hw_resend_irq(NULL, IA64_PERFMON_VECTOR);
  5136. pfm_stats[smp_processor_id()].pfm_replay_ovfl_intr_count++;
  5137. }
  5138. /*
  5139. * we just did a reload, so we reset the partial reload fields
  5140. */
  5141. ctx->ctx_reload_pmcs[0] = 0UL;
  5142. ctx->ctx_reload_pmds[0] = 0UL;
  5143. SET_LAST_CPU(ctx, smp_processor_id());
  5144. /*
  5145. * dump activation value for this PMU
  5146. */
  5147. INC_ACTIVATION();
  5148. /*
  5149. * record current activation for this context
  5150. */
  5151. SET_ACTIVATION(ctx);
  5152. /*
  5153. * establish new ownership.
  5154. */
  5155. SET_PMU_OWNER(task, ctx);
  5156. /*
  5157. * restore the psr.up bit. measurement
  5158. * is active again.
  5159. * no PMU interrupt can happen at this point
  5160. * because we still have interrupts disabled.
  5161. */
  5162. if (likely(psr_up)) pfm_set_psr_up();
  5163. /*
  5164. * allow concurrent access to context
  5165. */
  5166. pfm_unprotect_ctx_ctxsw(ctx, flags);
  5167. }
  5168. #else /* !CONFIG_SMP */
  5169. /*
  5170. * reload PMU state for UP kernels
  5171. * in 2.5 we come here with interrupts disabled
  5172. */
  5173. void
  5174. pfm_load_regs (struct task_struct *task)
  5175. {
  5176. struct thread_struct *t;
  5177. pfm_context_t *ctx;
  5178. struct task_struct *owner;
  5179. unsigned long pmd_mask, pmc_mask;
  5180. u64 psr, psr_up;
  5181. int need_irq_resend;
  5182. owner = GET_PMU_OWNER();
  5183. ctx = PFM_GET_CTX(task);
  5184. t = &task->thread;
  5185. psr = pfm_get_psr();
  5186. BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
  5187. BUG_ON(psr & IA64_PSR_I);
  5188. /*
  5189. * we restore ALL the debug registers to avoid picking up
  5190. * stale state.
  5191. *
  5192. * This must be done even when the task is still the owner
  5193. * as the registers may have been modified via ptrace()
  5194. * (not perfmon) by the previous task.
  5195. */
  5196. if (ctx->ctx_fl_using_dbreg) {
  5197. pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
  5198. pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
  5199. }
  5200. /*
  5201. * retrieved saved psr.up
  5202. */
  5203. psr_up = ctx->ctx_saved_psr_up;
  5204. need_irq_resend = pmu_conf->flags & PFM_PMU_IRQ_RESEND;
  5205. /*
  5206. * short path, our state is still there, just
  5207. * need to restore psr and we go
  5208. *
  5209. * we do not touch either PMC nor PMD. the psr is not touched
  5210. * by the overflow_handler. So we are safe w.r.t. to interrupt
  5211. * concurrency even without interrupt masking.
  5212. */
  5213. if (likely(owner == task)) {
  5214. if (likely(psr_up)) pfm_set_psr_up();
  5215. return;
  5216. }
  5217. /*
  5218. * someone else is still using the PMU, first push it out and
  5219. * then we'll be able to install our stuff !
  5220. *
  5221. * Upon return, there will be no owner for the current PMU
  5222. */
  5223. if (owner) pfm_lazy_save_regs(owner);
  5224. /*
  5225. * To avoid leaking information to the user level when psr.sp=0,
  5226. * we must reload ALL implemented pmds (even the ones we don't use).
  5227. * In the kernel we only allow PFM_READ_PMDS on registers which
  5228. * we initialized or requested (sampling) so there is no risk there.
  5229. */
  5230. pmd_mask = pfm_sysctl.fastctxsw ? ctx->ctx_used_pmds[0] : ctx->ctx_all_pmds[0];
  5231. /*
  5232. * ALL accessible PMCs are systematically reloaded, unused registers
  5233. * get their default (from pfm_reset_pmu_state()) values to avoid picking
  5234. * up stale configuration.
  5235. *
  5236. * PMC0 is never in the mask. It is always restored separately
  5237. */
  5238. pmc_mask = ctx->ctx_all_pmcs[0];
  5239. pfm_restore_pmds(t->pmds, pmd_mask);
  5240. pfm_restore_pmcs(t->pmcs, pmc_mask);
  5241. /*
  5242. * check for pending overflow at the time the state
  5243. * was saved.
  5244. */
  5245. if (unlikely(PMC0_HAS_OVFL(t->pmcs[0]))) {
  5246. /*
  5247. * reload pmc0 with the overflow information
  5248. * On McKinley PMU, this will trigger a PMU interrupt
  5249. */
  5250. ia64_set_pmc(0, t->pmcs[0]);
  5251. ia64_srlz_d();
  5252. t->pmcs[0] = 0UL;
  5253. /*
  5254. * will replay the PMU interrupt
  5255. */
  5256. if (need_irq_resend) hw_resend_irq(NULL, IA64_PERFMON_VECTOR);
  5257. pfm_stats[smp_processor_id()].pfm_replay_ovfl_intr_count++;
  5258. }
  5259. /*
  5260. * establish new ownership.
  5261. */
  5262. SET_PMU_OWNER(task, ctx);
  5263. /*
  5264. * restore the psr.up bit. measurement
  5265. * is active again.
  5266. * no PMU interrupt can happen at this point
  5267. * because we still have interrupts disabled.
  5268. */
  5269. if (likely(psr_up)) pfm_set_psr_up();
  5270. }
  5271. #endif /* CONFIG_SMP */
  5272. /*
  5273. * this function assumes monitoring is stopped
  5274. */
  5275. static void
  5276. pfm_flush_pmds(struct task_struct *task, pfm_context_t *ctx)
  5277. {
  5278. u64 pmc0;
  5279. unsigned long mask2, val, pmd_val, ovfl_val;
  5280. int i, can_access_pmu = 0;
  5281. int is_self;
  5282. /*
  5283. * is the caller the task being monitored (or which initiated the
  5284. * session for system wide measurements)
  5285. */
  5286. is_self = ctx->ctx_task == task ? 1 : 0;
  5287. /*
  5288. * can access PMU is task is the owner of the PMU state on the current CPU
  5289. * or if we are running on the CPU bound to the context in system-wide mode
  5290. * (that is not necessarily the task the context is attached to in this mode).
  5291. * In system-wide we always have can_access_pmu true because a task running on an
  5292. * invalid processor is flagged earlier in the call stack (see pfm_stop).
  5293. */
  5294. can_access_pmu = (GET_PMU_OWNER() == task) || (ctx->ctx_fl_system && ctx->ctx_cpu == smp_processor_id());
  5295. if (can_access_pmu) {
  5296. /*
  5297. * Mark the PMU as not owned
  5298. * This will cause the interrupt handler to do nothing in case an overflow
  5299. * interrupt was in-flight
  5300. * This also guarantees that pmc0 will contain the final state
  5301. * It virtually gives us full control on overflow processing from that point
  5302. * on.
  5303. */
  5304. SET_PMU_OWNER(NULL, NULL);
  5305. DPRINT(("releasing ownership\n"));
  5306. /*
  5307. * read current overflow status:
  5308. *
  5309. * we are guaranteed to read the final stable state
  5310. */
  5311. ia64_srlz_d();
  5312. pmc0 = ia64_get_pmc(0); /* slow */
  5313. /*
  5314. * reset freeze bit, overflow status information destroyed
  5315. */
  5316. pfm_unfreeze_pmu();
  5317. } else {
  5318. pmc0 = task->thread.pmcs[0];
  5319. /*
  5320. * clear whatever overflow status bits there were
  5321. */
  5322. task->thread.pmcs[0] = 0;
  5323. }
  5324. ovfl_val = pmu_conf->ovfl_val;
  5325. /*
  5326. * we save all the used pmds
  5327. * we take care of overflows for counting PMDs
  5328. *
  5329. * XXX: sampling situation is not taken into account here
  5330. */
  5331. mask2 = ctx->ctx_used_pmds[0];
  5332. DPRINT(("is_self=%d ovfl_val=0x%lx mask2=0x%lx\n", is_self, ovfl_val, mask2));
  5333. for (i = 0; mask2; i++, mask2>>=1) {
  5334. /* skip non used pmds */
  5335. if ((mask2 & 0x1) == 0) continue;
  5336. /*
  5337. * can access PMU always true in system wide mode
  5338. */
  5339. val = pmd_val = can_access_pmu ? ia64_get_pmd(i) : task->thread.pmds[i];
  5340. if (PMD_IS_COUNTING(i)) {
  5341. DPRINT(("[%d] pmd[%d] ctx_pmd=0x%lx hw_pmd=0x%lx\n",
  5342. task->pid,
  5343. i,
  5344. ctx->ctx_pmds[i].val,
  5345. val & ovfl_val));
  5346. /*
  5347. * we rebuild the full 64 bit value of the counter
  5348. */
  5349. val = ctx->ctx_pmds[i].val + (val & ovfl_val);
  5350. /*
  5351. * now everything is in ctx_pmds[] and we need
  5352. * to clear the saved context from save_regs() such that
  5353. * pfm_read_pmds() gets the correct value
  5354. */
  5355. pmd_val = 0UL;
  5356. /*
  5357. * take care of overflow inline
  5358. */
  5359. if (pmc0 & (1UL << i)) {
  5360. val += 1 + ovfl_val;
  5361. DPRINT(("[%d] pmd[%d] overflowed\n", task->pid, i));
  5362. }
  5363. }
  5364. DPRINT(("[%d] ctx_pmd[%d]=0x%lx pmd_val=0x%lx\n", task->pid, i, val, pmd_val));
  5365. if (is_self) task->thread.pmds[i] = pmd_val;
  5366. ctx->ctx_pmds[i].val = val;
  5367. }
  5368. }
  5369. static struct irqaction perfmon_irqaction = {
  5370. .handler = pfm_interrupt_handler,
  5371. .flags = SA_INTERRUPT,
  5372. .name = "perfmon"
  5373. };
  5374. static void
  5375. pfm_alt_save_pmu_state(void *data)
  5376. {
  5377. struct pt_regs *regs;
  5378. regs = ia64_task_regs(current);
  5379. DPRINT(("called\n"));
  5380. /*
  5381. * should not be necessary but
  5382. * let's take not risk
  5383. */
  5384. pfm_clear_psr_up();
  5385. pfm_clear_psr_pp();
  5386. ia64_psr(regs)->pp = 0;
  5387. /*
  5388. * This call is required
  5389. * May cause a spurious interrupt on some processors
  5390. */
  5391. pfm_freeze_pmu();
  5392. ia64_srlz_d();
  5393. }
  5394. void
  5395. pfm_alt_restore_pmu_state(void *data)
  5396. {
  5397. struct pt_regs *regs;
  5398. regs = ia64_task_regs(current);
  5399. DPRINT(("called\n"));
  5400. /*
  5401. * put PMU back in state expected
  5402. * by perfmon
  5403. */
  5404. pfm_clear_psr_up();
  5405. pfm_clear_psr_pp();
  5406. ia64_psr(regs)->pp = 0;
  5407. /*
  5408. * perfmon runs with PMU unfrozen at all times
  5409. */
  5410. pfm_unfreeze_pmu();
  5411. ia64_srlz_d();
  5412. }
  5413. int
  5414. pfm_install_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl)
  5415. {
  5416. int ret, i;
  5417. int reserve_cpu;
  5418. /* some sanity checks */
  5419. if (hdl == NULL || hdl->handler == NULL) return -EINVAL;
  5420. /* do the easy test first */
  5421. if (pfm_alt_intr_handler) return -EBUSY;
  5422. /* one at a time in the install or remove, just fail the others */
  5423. if (!spin_trylock(&pfm_alt_install_check)) {
  5424. return -EBUSY;
  5425. }
  5426. /* reserve our session */
  5427. for_each_online_cpu(reserve_cpu) {
  5428. ret = pfm_reserve_session(NULL, 1, reserve_cpu);
  5429. if (ret) goto cleanup_reserve;
  5430. }
  5431. /* save the current system wide pmu states */
  5432. ret = on_each_cpu(pfm_alt_save_pmu_state, NULL, 0, 1);
  5433. if (ret) {
  5434. DPRINT(("on_each_cpu() failed: %d\n", ret));
  5435. goto cleanup_reserve;
  5436. }
  5437. /* officially change to the alternate interrupt handler */
  5438. pfm_alt_intr_handler = hdl;
  5439. spin_unlock(&pfm_alt_install_check);
  5440. return 0;
  5441. cleanup_reserve:
  5442. for_each_online_cpu(i) {
  5443. /* don't unreserve more than we reserved */
  5444. if (i >= reserve_cpu) break;
  5445. pfm_unreserve_session(NULL, 1, i);
  5446. }
  5447. spin_unlock(&pfm_alt_install_check);
  5448. return ret;
  5449. }
  5450. EXPORT_SYMBOL_GPL(pfm_install_alt_pmu_interrupt);
  5451. int
  5452. pfm_remove_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl)
  5453. {
  5454. int i;
  5455. int ret;
  5456. if (hdl == NULL) return -EINVAL;
  5457. /* cannot remove someone else's handler! */
  5458. if (pfm_alt_intr_handler != hdl) return -EINVAL;
  5459. /* one at a time in the install or remove, just fail the others */
  5460. if (!spin_trylock(&pfm_alt_install_check)) {
  5461. return -EBUSY;
  5462. }
  5463. pfm_alt_intr_handler = NULL;
  5464. ret = on_each_cpu(pfm_alt_restore_pmu_state, NULL, 0, 1);
  5465. if (ret) {
  5466. DPRINT(("on_each_cpu() failed: %d\n", ret));
  5467. }
  5468. for_each_online_cpu(i) {
  5469. pfm_unreserve_session(NULL, 1, i);
  5470. }
  5471. spin_unlock(&pfm_alt_install_check);
  5472. return 0;
  5473. }
  5474. EXPORT_SYMBOL_GPL(pfm_remove_alt_pmu_interrupt);
  5475. /*
  5476. * perfmon initialization routine, called from the initcall() table
  5477. */
  5478. static int init_pfm_fs(void);
  5479. static int __init
  5480. pfm_probe_pmu(void)
  5481. {
  5482. pmu_config_t **p;
  5483. int family;
  5484. family = local_cpu_data->family;
  5485. p = pmu_confs;
  5486. while(*p) {
  5487. if ((*p)->probe) {
  5488. if ((*p)->probe() == 0) goto found;
  5489. } else if ((*p)->pmu_family == family || (*p)->pmu_family == 0xff) {
  5490. goto found;
  5491. }
  5492. p++;
  5493. }
  5494. return -1;
  5495. found:
  5496. pmu_conf = *p;
  5497. return 0;
  5498. }
  5499. static struct file_operations pfm_proc_fops = {
  5500. .open = pfm_proc_open,
  5501. .read = seq_read,
  5502. .llseek = seq_lseek,
  5503. .release = seq_release,
  5504. };
  5505. int __init
  5506. pfm_init(void)
  5507. {
  5508. unsigned int n, n_counters, i;
  5509. printk("perfmon: version %u.%u IRQ %u\n",
  5510. PFM_VERSION_MAJ,
  5511. PFM_VERSION_MIN,
  5512. IA64_PERFMON_VECTOR);
  5513. if (pfm_probe_pmu()) {
  5514. printk(KERN_INFO "perfmon: disabled, there is no support for processor family %d\n",
  5515. local_cpu_data->family);
  5516. return -ENODEV;
  5517. }
  5518. /*
  5519. * compute the number of implemented PMD/PMC from the
  5520. * description tables
  5521. */
  5522. n = 0;
  5523. for (i=0; PMC_IS_LAST(i) == 0; i++) {
  5524. if (PMC_IS_IMPL(i) == 0) continue;
  5525. pmu_conf->impl_pmcs[i>>6] |= 1UL << (i&63);
  5526. n++;
  5527. }
  5528. pmu_conf->num_pmcs = n;
  5529. n = 0; n_counters = 0;
  5530. for (i=0; PMD_IS_LAST(i) == 0; i++) {
  5531. if (PMD_IS_IMPL(i) == 0) continue;
  5532. pmu_conf->impl_pmds[i>>6] |= 1UL << (i&63);
  5533. n++;
  5534. if (PMD_IS_COUNTING(i)) n_counters++;
  5535. }
  5536. pmu_conf->num_pmds = n;
  5537. pmu_conf->num_counters = n_counters;
  5538. /*
  5539. * sanity checks on the number of debug registers
  5540. */
  5541. if (pmu_conf->use_rr_dbregs) {
  5542. if (pmu_conf->num_ibrs > IA64_NUM_DBG_REGS) {
  5543. printk(KERN_INFO "perfmon: unsupported number of code debug registers (%u)\n", pmu_conf->num_ibrs);
  5544. pmu_conf = NULL;
  5545. return -1;
  5546. }
  5547. if (pmu_conf->num_dbrs > IA64_NUM_DBG_REGS) {
  5548. printk(KERN_INFO "perfmon: unsupported number of data debug registers (%u)\n", pmu_conf->num_ibrs);
  5549. pmu_conf = NULL;
  5550. return -1;
  5551. }
  5552. }
  5553. printk("perfmon: %s PMU detected, %u PMCs, %u PMDs, %u counters (%lu bits)\n",
  5554. pmu_conf->pmu_name,
  5555. pmu_conf->num_pmcs,
  5556. pmu_conf->num_pmds,
  5557. pmu_conf->num_counters,
  5558. ffz(pmu_conf->ovfl_val));
  5559. /* sanity check */
  5560. if (pmu_conf->num_pmds >= IA64_NUM_PMD_REGS || pmu_conf->num_pmcs >= IA64_NUM_PMC_REGS) {
  5561. printk(KERN_ERR "perfmon: not enough pmc/pmd, perfmon disabled\n");
  5562. pmu_conf = NULL;
  5563. return -1;
  5564. }
  5565. /*
  5566. * create /proc/perfmon (mostly for debugging purposes)
  5567. */
  5568. perfmon_dir = create_proc_entry("perfmon", S_IRUGO, NULL);
  5569. if (perfmon_dir == NULL) {
  5570. printk(KERN_ERR "perfmon: cannot create /proc entry, perfmon disabled\n");
  5571. pmu_conf = NULL;
  5572. return -1;
  5573. }
  5574. /*
  5575. * install customized file operations for /proc/perfmon entry
  5576. */
  5577. perfmon_dir->proc_fops = &pfm_proc_fops;
  5578. /*
  5579. * create /proc/sys/kernel/perfmon (for debugging purposes)
  5580. */
  5581. pfm_sysctl_header = register_sysctl_table(pfm_sysctl_root, 0);
  5582. /*
  5583. * initialize all our spinlocks
  5584. */
  5585. spin_lock_init(&pfm_sessions.pfs_lock);
  5586. spin_lock_init(&pfm_buffer_fmt_lock);
  5587. init_pfm_fs();
  5588. for(i=0; i < NR_CPUS; i++) pfm_stats[i].pfm_ovfl_intr_cycles_min = ~0UL;
  5589. return 0;
  5590. }
  5591. __initcall(pfm_init);
  5592. /*
  5593. * this function is called before pfm_init()
  5594. */
  5595. void
  5596. pfm_init_percpu (void)
  5597. {
  5598. /*
  5599. * make sure no measurement is active
  5600. * (may inherit programmed PMCs from EFI).
  5601. */
  5602. pfm_clear_psr_pp();
  5603. pfm_clear_psr_up();
  5604. /*
  5605. * we run with the PMU not frozen at all times
  5606. */
  5607. pfm_unfreeze_pmu();
  5608. if (smp_processor_id() == 0)
  5609. register_percpu_irq(IA64_PERFMON_VECTOR, &perfmon_irqaction);
  5610. ia64_setreg(_IA64_REG_CR_PMV, IA64_PERFMON_VECTOR);
  5611. ia64_srlz_d();
  5612. }
  5613. /*
  5614. * used for debug purposes only
  5615. */
  5616. void
  5617. dump_pmu_state(const char *from)
  5618. {
  5619. struct task_struct *task;
  5620. struct thread_struct *t;
  5621. struct pt_regs *regs;
  5622. pfm_context_t *ctx;
  5623. unsigned long psr, dcr, info, flags;
  5624. int i, this_cpu;
  5625. local_irq_save(flags);
  5626. this_cpu = smp_processor_id();
  5627. regs = ia64_task_regs(current);
  5628. info = PFM_CPUINFO_GET();
  5629. dcr = ia64_getreg(_IA64_REG_CR_DCR);
  5630. if (info == 0 && ia64_psr(regs)->pp == 0 && (dcr & IA64_DCR_PP) == 0) {
  5631. local_irq_restore(flags);
  5632. return;
  5633. }
  5634. printk("CPU%d from %s() current [%d] iip=0x%lx %s\n",
  5635. this_cpu,
  5636. from,
  5637. current->pid,
  5638. regs->cr_iip,
  5639. current->comm);
  5640. task = GET_PMU_OWNER();
  5641. ctx = GET_PMU_CTX();
  5642. printk("->CPU%d owner [%d] ctx=%p\n", this_cpu, task ? task->pid : -1, ctx);
  5643. psr = pfm_get_psr();
  5644. printk("->CPU%d pmc0=0x%lx psr.pp=%d psr.up=%d dcr.pp=%d syst_info=0x%lx user_psr.up=%d user_psr.pp=%d\n",
  5645. this_cpu,
  5646. ia64_get_pmc(0),
  5647. psr & IA64_PSR_PP ? 1 : 0,
  5648. psr & IA64_PSR_UP ? 1 : 0,
  5649. dcr & IA64_DCR_PP ? 1 : 0,
  5650. info,
  5651. ia64_psr(regs)->up,
  5652. ia64_psr(regs)->pp);
  5653. ia64_psr(regs)->up = 0;
  5654. ia64_psr(regs)->pp = 0;
  5655. t = &current->thread;
  5656. for (i=1; PMC_IS_LAST(i) == 0; i++) {
  5657. if (PMC_IS_IMPL(i) == 0) continue;
  5658. printk("->CPU%d pmc[%d]=0x%lx thread_pmc[%d]=0x%lx\n", this_cpu, i, ia64_get_pmc(i), i, t->pmcs[i]);
  5659. }
  5660. for (i=1; PMD_IS_LAST(i) == 0; i++) {
  5661. if (PMD_IS_IMPL(i) == 0) continue;
  5662. printk("->CPU%d pmd[%d]=0x%lx thread_pmd[%d]=0x%lx\n", this_cpu, i, ia64_get_pmd(i), i, t->pmds[i]);
  5663. }
  5664. if (ctx) {
  5665. printk("->CPU%d ctx_state=%d vaddr=%p addr=%p fd=%d ctx_task=[%d] saved_psr_up=0x%lx\n",
  5666. this_cpu,
  5667. ctx->ctx_state,
  5668. ctx->ctx_smpl_vaddr,
  5669. ctx->ctx_smpl_hdr,
  5670. ctx->ctx_msgq_head,
  5671. ctx->ctx_msgq_tail,
  5672. ctx->ctx_saved_psr_up);
  5673. }
  5674. local_irq_restore(flags);
  5675. }
  5676. /*
  5677. * called from process.c:copy_thread(). task is new child.
  5678. */
  5679. void
  5680. pfm_inherit(struct task_struct *task, struct pt_regs *regs)
  5681. {
  5682. struct thread_struct *thread;
  5683. DPRINT(("perfmon: pfm_inherit clearing state for [%d]\n", task->pid));
  5684. thread = &task->thread;
  5685. /*
  5686. * cut links inherited from parent (current)
  5687. */
  5688. thread->pfm_context = NULL;
  5689. PFM_SET_WORK_PENDING(task, 0);
  5690. /*
  5691. * the psr bits are already set properly in copy_threads()
  5692. */
  5693. }
  5694. #else /* !CONFIG_PERFMON */
  5695. asmlinkage long
  5696. sys_perfmonctl (int fd, int cmd, void *arg, int count)
  5697. {
  5698. return -ENOSYS;
  5699. }
  5700. #endif /* CONFIG_PERFMON */