perfmon.c 170 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875
  1. /*
  2. * This file implements the perfmon-2 subsystem which is used
  3. * to program the IA-64 Performance Monitoring Unit (PMU).
  4. *
  5. * The initial version of perfmon.c was written by
  6. * Ganesh Venkitachalam, IBM Corp.
  7. *
  8. * Then it was modified for perfmon-1.x by Stephane Eranian and
  9. * David Mosberger, Hewlett Packard Co.
  10. *
  11. * Version Perfmon-2.x is a rewrite of perfmon-1.x
  12. * by Stephane Eranian, Hewlett Packard Co.
  13. *
  14. * Copyright (C) 1999-2005 Hewlett Packard Co
  15. * Stephane Eranian <eranian@hpl.hp.com>
  16. * David Mosberger-Tang <davidm@hpl.hp.com>
  17. *
  18. * More information about perfmon available at:
  19. * http://www.hpl.hp.com/research/linux/perfmon
  20. */
  21. #include <linux/module.h>
  22. #include <linux/kernel.h>
  23. #include <linux/sched.h>
  24. #include <linux/interrupt.h>
  25. #include <linux/proc_fs.h>
  26. #include <linux/seq_file.h>
  27. #include <linux/init.h>
  28. #include <linux/vmalloc.h>
  29. #include <linux/mm.h>
  30. #include <linux/sysctl.h>
  31. #include <linux/list.h>
  32. #include <linux/file.h>
  33. #include <linux/poll.h>
  34. #include <linux/vfs.h>
  35. #include <linux/smp.h>
  36. #include <linux/pagemap.h>
  37. #include <linux/mount.h>
  38. #include <linux/bitops.h>
  39. #include <linux/capability.h>
  40. #include <linux/rcupdate.h>
  41. #include <linux/completion.h>
  42. #include <asm/errno.h>
  43. #include <asm/intrinsics.h>
  44. #include <asm/page.h>
  45. #include <asm/perfmon.h>
  46. #include <asm/processor.h>
  47. #include <asm/signal.h>
  48. #include <asm/system.h>
  49. #include <asm/uaccess.h>
  50. #include <asm/delay.h>
  51. #ifdef CONFIG_PERFMON
  52. /*
  53. * perfmon context state
  54. */
  55. #define PFM_CTX_UNLOADED 1 /* context is not loaded onto any task */
  56. #define PFM_CTX_LOADED 2 /* context is loaded onto a task */
  57. #define PFM_CTX_MASKED 3 /* context is loaded but monitoring is masked due to overflow */
  58. #define PFM_CTX_ZOMBIE 4 /* owner of the context is closing it */
  59. #define PFM_INVALID_ACTIVATION (~0UL)
  60. #define PFM_NUM_PMC_REGS 64 /* PMC save area for ctxsw */
  61. #define PFM_NUM_PMD_REGS 64 /* PMD save area for ctxsw */
  62. /*
  63. * depth of message queue
  64. */
  65. #define PFM_MAX_MSGS 32
  66. #define PFM_CTXQ_EMPTY(g) ((g)->ctx_msgq_head == (g)->ctx_msgq_tail)
  67. /*
  68. * type of a PMU register (bitmask).
  69. * bitmask structure:
  70. * bit0 : register implemented
  71. * bit1 : end marker
  72. * bit2-3 : reserved
  73. * bit4 : pmc has pmc.pm
  74. * bit5 : pmc controls a counter (has pmc.oi), pmd is used as counter
  75. * bit6-7 : register type
  76. * bit8-31: reserved
  77. */
  78. #define PFM_REG_NOTIMPL 0x0 /* not implemented at all */
  79. #define PFM_REG_IMPL 0x1 /* register implemented */
  80. #define PFM_REG_END 0x2 /* end marker */
  81. #define PFM_REG_MONITOR (0x1<<4|PFM_REG_IMPL) /* a PMC with a pmc.pm field only */
  82. #define PFM_REG_COUNTING (0x2<<4|PFM_REG_MONITOR) /* a monitor + pmc.oi+ PMD used as a counter */
  83. #define PFM_REG_CONTROL (0x4<<4|PFM_REG_IMPL) /* PMU control register */
  84. #define PFM_REG_CONFIG (0x8<<4|PFM_REG_IMPL) /* configuration register */
  85. #define PFM_REG_BUFFER (0xc<<4|PFM_REG_IMPL) /* PMD used as buffer */
  86. #define PMC_IS_LAST(i) (pmu_conf->pmc_desc[i].type & PFM_REG_END)
  87. #define PMD_IS_LAST(i) (pmu_conf->pmd_desc[i].type & PFM_REG_END)
  88. #define PMC_OVFL_NOTIFY(ctx, i) ((ctx)->ctx_pmds[i].flags & PFM_REGFL_OVFL_NOTIFY)
  89. /* i assumed unsigned */
  90. #define PMC_IS_IMPL(i) (i< PMU_MAX_PMCS && (pmu_conf->pmc_desc[i].type & PFM_REG_IMPL))
  91. #define PMD_IS_IMPL(i) (i< PMU_MAX_PMDS && (pmu_conf->pmd_desc[i].type & PFM_REG_IMPL))
  92. /* XXX: these assume that register i is implemented */
  93. #define PMD_IS_COUNTING(i) ((pmu_conf->pmd_desc[i].type & PFM_REG_COUNTING) == PFM_REG_COUNTING)
  94. #define PMC_IS_COUNTING(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_COUNTING) == PFM_REG_COUNTING)
  95. #define PMC_IS_MONITOR(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_MONITOR) == PFM_REG_MONITOR)
  96. #define PMC_IS_CONTROL(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_CONTROL) == PFM_REG_CONTROL)
  97. #define PMC_DFL_VAL(i) pmu_conf->pmc_desc[i].default_value
  98. #define PMC_RSVD_MASK(i) pmu_conf->pmc_desc[i].reserved_mask
  99. #define PMD_PMD_DEP(i) pmu_conf->pmd_desc[i].dep_pmd[0]
  100. #define PMC_PMD_DEP(i) pmu_conf->pmc_desc[i].dep_pmd[0]
  101. #define PFM_NUM_IBRS IA64_NUM_DBG_REGS
  102. #define PFM_NUM_DBRS IA64_NUM_DBG_REGS
  103. #define CTX_OVFL_NOBLOCK(c) ((c)->ctx_fl_block == 0)
  104. #define CTX_HAS_SMPL(c) ((c)->ctx_fl_is_sampling)
  105. #define PFM_CTX_TASK(h) (h)->ctx_task
  106. #define PMU_PMC_OI 5 /* position of pmc.oi bit */
  107. /* XXX: does not support more than 64 PMDs */
  108. #define CTX_USED_PMD(ctx, mask) (ctx)->ctx_used_pmds[0] |= (mask)
  109. #define CTX_IS_USED_PMD(ctx, c) (((ctx)->ctx_used_pmds[0] & (1UL << (c))) != 0UL)
  110. #define CTX_USED_MONITOR(ctx, mask) (ctx)->ctx_used_monitors[0] |= (mask)
  111. #define CTX_USED_IBR(ctx,n) (ctx)->ctx_used_ibrs[(n)>>6] |= 1UL<< ((n) % 64)
  112. #define CTX_USED_DBR(ctx,n) (ctx)->ctx_used_dbrs[(n)>>6] |= 1UL<< ((n) % 64)
  113. #define CTX_USES_DBREGS(ctx) (((pfm_context_t *)(ctx))->ctx_fl_using_dbreg==1)
  114. #define PFM_CODE_RR 0 /* requesting code range restriction */
  115. #define PFM_DATA_RR 1 /* requestion data range restriction */
  116. #define PFM_CPUINFO_CLEAR(v) pfm_get_cpu_var(pfm_syst_info) &= ~(v)
  117. #define PFM_CPUINFO_SET(v) pfm_get_cpu_var(pfm_syst_info) |= (v)
  118. #define PFM_CPUINFO_GET() pfm_get_cpu_var(pfm_syst_info)
  119. #define RDEP(x) (1UL<<(x))
  120. /*
  121. * context protection macros
  122. * in SMP:
  123. * - we need to protect against CPU concurrency (spin_lock)
  124. * - we need to protect against PMU overflow interrupts (local_irq_disable)
  125. * in UP:
  126. * - we need to protect against PMU overflow interrupts (local_irq_disable)
  127. *
  128. * spin_lock_irqsave()/spin_unlock_irqrestore():
  129. * in SMP: local_irq_disable + spin_lock
  130. * in UP : local_irq_disable
  131. *
  132. * spin_lock()/spin_lock():
  133. * in UP : removed automatically
  134. * in SMP: protect against context accesses from other CPU. interrupts
  135. * are not masked. This is useful for the PMU interrupt handler
  136. * because we know we will not get PMU concurrency in that code.
  137. */
  138. #define PROTECT_CTX(c, f) \
  139. do { \
  140. DPRINT(("spinlock_irq_save ctx %p by [%d]\n", c, task_pid_nr(current))); \
  141. spin_lock_irqsave(&(c)->ctx_lock, f); \
  142. DPRINT(("spinlocked ctx %p by [%d]\n", c, task_pid_nr(current))); \
  143. } while(0)
  144. #define UNPROTECT_CTX(c, f) \
  145. do { \
  146. DPRINT(("spinlock_irq_restore ctx %p by [%d]\n", c, task_pid_nr(current))); \
  147. spin_unlock_irqrestore(&(c)->ctx_lock, f); \
  148. } while(0)
  149. #define PROTECT_CTX_NOPRINT(c, f) \
  150. do { \
  151. spin_lock_irqsave(&(c)->ctx_lock, f); \
  152. } while(0)
  153. #define UNPROTECT_CTX_NOPRINT(c, f) \
  154. do { \
  155. spin_unlock_irqrestore(&(c)->ctx_lock, f); \
  156. } while(0)
  157. #define PROTECT_CTX_NOIRQ(c) \
  158. do { \
  159. spin_lock(&(c)->ctx_lock); \
  160. } while(0)
  161. #define UNPROTECT_CTX_NOIRQ(c) \
  162. do { \
  163. spin_unlock(&(c)->ctx_lock); \
  164. } while(0)
  165. #ifdef CONFIG_SMP
  166. #define GET_ACTIVATION() pfm_get_cpu_var(pmu_activation_number)
  167. #define INC_ACTIVATION() pfm_get_cpu_var(pmu_activation_number)++
  168. #define SET_ACTIVATION(c) (c)->ctx_last_activation = GET_ACTIVATION()
  169. #else /* !CONFIG_SMP */
  170. #define SET_ACTIVATION(t) do {} while(0)
  171. #define GET_ACTIVATION(t) do {} while(0)
  172. #define INC_ACTIVATION(t) do {} while(0)
  173. #endif /* CONFIG_SMP */
  174. #define SET_PMU_OWNER(t, c) do { pfm_get_cpu_var(pmu_owner) = (t); pfm_get_cpu_var(pmu_ctx) = (c); } while(0)
  175. #define GET_PMU_OWNER() pfm_get_cpu_var(pmu_owner)
  176. #define GET_PMU_CTX() pfm_get_cpu_var(pmu_ctx)
  177. #define LOCK_PFS(g) spin_lock_irqsave(&pfm_sessions.pfs_lock, g)
  178. #define UNLOCK_PFS(g) spin_unlock_irqrestore(&pfm_sessions.pfs_lock, g)
  179. #define PFM_REG_RETFLAG_SET(flags, val) do { flags &= ~PFM_REG_RETFL_MASK; flags |= (val); } while(0)
  180. /*
  181. * cmp0 must be the value of pmc0
  182. */
  183. #define PMC0_HAS_OVFL(cmp0) (cmp0 & ~0x1UL)
  184. #define PFMFS_MAGIC 0xa0b4d889
  185. /*
  186. * debugging
  187. */
  188. #define PFM_DEBUGGING 1
  189. #ifdef PFM_DEBUGGING
  190. #define DPRINT(a) \
  191. do { \
  192. if (unlikely(pfm_sysctl.debug >0)) { printk("%s.%d: CPU%d [%d] ", __FUNCTION__, __LINE__, smp_processor_id(), task_pid_nr(current)); printk a; } \
  193. } while (0)
  194. #define DPRINT_ovfl(a) \
  195. do { \
  196. if (unlikely(pfm_sysctl.debug > 0 && pfm_sysctl.debug_ovfl >0)) { printk("%s.%d: CPU%d [%d] ", __FUNCTION__, __LINE__, smp_processor_id(), task_pid_nr(current)); printk a; } \
  197. } while (0)
  198. #endif
  199. /*
  200. * 64-bit software counter structure
  201. *
  202. * the next_reset_type is applied to the next call to pfm_reset_regs()
  203. */
  204. typedef struct {
  205. unsigned long val; /* virtual 64bit counter value */
  206. unsigned long lval; /* last reset value */
  207. unsigned long long_reset; /* reset value on sampling overflow */
  208. unsigned long short_reset; /* reset value on overflow */
  209. unsigned long reset_pmds[4]; /* which other pmds to reset when this counter overflows */
  210. unsigned long smpl_pmds[4]; /* which pmds are accessed when counter overflow */
  211. unsigned long seed; /* seed for random-number generator */
  212. unsigned long mask; /* mask for random-number generator */
  213. unsigned int flags; /* notify/do not notify */
  214. unsigned long eventid; /* overflow event identifier */
  215. } pfm_counter_t;
  216. /*
  217. * context flags
  218. */
  219. typedef struct {
  220. unsigned int block:1; /* when 1, task will blocked on user notifications */
  221. unsigned int system:1; /* do system wide monitoring */
  222. unsigned int using_dbreg:1; /* using range restrictions (debug registers) */
  223. unsigned int is_sampling:1; /* true if using a custom format */
  224. unsigned int excl_idle:1; /* exclude idle task in system wide session */
  225. unsigned int going_zombie:1; /* context is zombie (MASKED+blocking) */
  226. unsigned int trap_reason:2; /* reason for going into pfm_handle_work() */
  227. unsigned int no_msg:1; /* no message sent on overflow */
  228. unsigned int can_restart:1; /* allowed to issue a PFM_RESTART */
  229. unsigned int reserved:22;
  230. } pfm_context_flags_t;
  231. #define PFM_TRAP_REASON_NONE 0x0 /* default value */
  232. #define PFM_TRAP_REASON_BLOCK 0x1 /* we need to block on overflow */
  233. #define PFM_TRAP_REASON_RESET 0x2 /* we need to reset PMDs */
  234. /*
  235. * perfmon context: encapsulates all the state of a monitoring session
  236. */
  237. typedef struct pfm_context {
  238. spinlock_t ctx_lock; /* context protection */
  239. pfm_context_flags_t ctx_flags; /* bitmask of flags (block reason incl.) */
  240. unsigned int ctx_state; /* state: active/inactive (no bitfield) */
  241. struct task_struct *ctx_task; /* task to which context is attached */
  242. unsigned long ctx_ovfl_regs[4]; /* which registers overflowed (notification) */
  243. struct completion ctx_restart_done; /* use for blocking notification mode */
  244. unsigned long ctx_used_pmds[4]; /* bitmask of PMD used */
  245. unsigned long ctx_all_pmds[4]; /* bitmask of all accessible PMDs */
  246. unsigned long ctx_reload_pmds[4]; /* bitmask of force reload PMD on ctxsw in */
  247. unsigned long ctx_all_pmcs[4]; /* bitmask of all accessible PMCs */
  248. unsigned long ctx_reload_pmcs[4]; /* bitmask of force reload PMC on ctxsw in */
  249. unsigned long ctx_used_monitors[4]; /* bitmask of monitor PMC being used */
  250. unsigned long ctx_pmcs[PFM_NUM_PMC_REGS]; /* saved copies of PMC values */
  251. unsigned int ctx_used_ibrs[1]; /* bitmask of used IBR (speedup ctxsw in) */
  252. unsigned int ctx_used_dbrs[1]; /* bitmask of used DBR (speedup ctxsw in) */
  253. unsigned long ctx_dbrs[IA64_NUM_DBG_REGS]; /* DBR values (cache) when not loaded */
  254. unsigned long ctx_ibrs[IA64_NUM_DBG_REGS]; /* IBR values (cache) when not loaded */
  255. pfm_counter_t ctx_pmds[PFM_NUM_PMD_REGS]; /* software state for PMDS */
  256. unsigned long th_pmcs[PFM_NUM_PMC_REGS]; /* PMC thread save state */
  257. unsigned long th_pmds[PFM_NUM_PMD_REGS]; /* PMD thread save state */
  258. u64 ctx_saved_psr_up; /* only contains psr.up value */
  259. unsigned long ctx_last_activation; /* context last activation number for last_cpu */
  260. unsigned int ctx_last_cpu; /* CPU id of current or last CPU used (SMP only) */
  261. unsigned int ctx_cpu; /* cpu to which perfmon is applied (system wide) */
  262. int ctx_fd; /* file descriptor used my this context */
  263. pfm_ovfl_arg_t ctx_ovfl_arg; /* argument to custom buffer format handler */
  264. pfm_buffer_fmt_t *ctx_buf_fmt; /* buffer format callbacks */
  265. void *ctx_smpl_hdr; /* points to sampling buffer header kernel vaddr */
  266. unsigned long ctx_smpl_size; /* size of sampling buffer */
  267. void *ctx_smpl_vaddr; /* user level virtual address of smpl buffer */
  268. wait_queue_head_t ctx_msgq_wait;
  269. pfm_msg_t ctx_msgq[PFM_MAX_MSGS];
  270. int ctx_msgq_head;
  271. int ctx_msgq_tail;
  272. struct fasync_struct *ctx_async_queue;
  273. wait_queue_head_t ctx_zombieq; /* termination cleanup wait queue */
  274. } pfm_context_t;
  275. /*
  276. * magic number used to verify that structure is really
  277. * a perfmon context
  278. */
  279. #define PFM_IS_FILE(f) ((f)->f_op == &pfm_file_ops)
  280. #define PFM_GET_CTX(t) ((pfm_context_t *)(t)->thread.pfm_context)
  281. #ifdef CONFIG_SMP
  282. #define SET_LAST_CPU(ctx, v) (ctx)->ctx_last_cpu = (v)
  283. #define GET_LAST_CPU(ctx) (ctx)->ctx_last_cpu
  284. #else
  285. #define SET_LAST_CPU(ctx, v) do {} while(0)
  286. #define GET_LAST_CPU(ctx) do {} while(0)
  287. #endif
  288. #define ctx_fl_block ctx_flags.block
  289. #define ctx_fl_system ctx_flags.system
  290. #define ctx_fl_using_dbreg ctx_flags.using_dbreg
  291. #define ctx_fl_is_sampling ctx_flags.is_sampling
  292. #define ctx_fl_excl_idle ctx_flags.excl_idle
  293. #define ctx_fl_going_zombie ctx_flags.going_zombie
  294. #define ctx_fl_trap_reason ctx_flags.trap_reason
  295. #define ctx_fl_no_msg ctx_flags.no_msg
  296. #define ctx_fl_can_restart ctx_flags.can_restart
  297. #define PFM_SET_WORK_PENDING(t, v) do { (t)->thread.pfm_needs_checking = v; } while(0);
  298. #define PFM_GET_WORK_PENDING(t) (t)->thread.pfm_needs_checking
  299. /*
  300. * global information about all sessions
  301. * mostly used to synchronize between system wide and per-process
  302. */
  303. typedef struct {
  304. spinlock_t pfs_lock; /* lock the structure */
  305. unsigned int pfs_task_sessions; /* number of per task sessions */
  306. unsigned int pfs_sys_sessions; /* number of per system wide sessions */
  307. unsigned int pfs_sys_use_dbregs; /* incremented when a system wide session uses debug regs */
  308. unsigned int pfs_ptrace_use_dbregs; /* incremented when a process uses debug regs */
  309. struct task_struct *pfs_sys_session[NR_CPUS]; /* point to task owning a system-wide session */
  310. } pfm_session_t;
  311. /*
  312. * information about a PMC or PMD.
  313. * dep_pmd[]: a bitmask of dependent PMD registers
  314. * dep_pmc[]: a bitmask of dependent PMC registers
  315. */
  316. typedef int (*pfm_reg_check_t)(struct task_struct *task, pfm_context_t *ctx, unsigned int cnum, unsigned long *val, struct pt_regs *regs);
  317. typedef struct {
  318. unsigned int type;
  319. int pm_pos;
  320. unsigned long default_value; /* power-on default value */
  321. unsigned long reserved_mask; /* bitmask of reserved bits */
  322. pfm_reg_check_t read_check;
  323. pfm_reg_check_t write_check;
  324. unsigned long dep_pmd[4];
  325. unsigned long dep_pmc[4];
  326. } pfm_reg_desc_t;
  327. /* assume cnum is a valid monitor */
  328. #define PMC_PM(cnum, val) (((val) >> (pmu_conf->pmc_desc[cnum].pm_pos)) & 0x1)
  329. /*
  330. * This structure is initialized at boot time and contains
  331. * a description of the PMU main characteristics.
  332. *
  333. * If the probe function is defined, detection is based
  334. * on its return value:
  335. * - 0 means recognized PMU
  336. * - anything else means not supported
  337. * When the probe function is not defined, then the pmu_family field
  338. * is used and it must match the host CPU family such that:
  339. * - cpu->family & config->pmu_family != 0
  340. */
  341. typedef struct {
  342. unsigned long ovfl_val; /* overflow value for counters */
  343. pfm_reg_desc_t *pmc_desc; /* detailed PMC register dependencies descriptions */
  344. pfm_reg_desc_t *pmd_desc; /* detailed PMD register dependencies descriptions */
  345. unsigned int num_pmcs; /* number of PMCS: computed at init time */
  346. unsigned int num_pmds; /* number of PMDS: computed at init time */
  347. unsigned long impl_pmcs[4]; /* bitmask of implemented PMCS */
  348. unsigned long impl_pmds[4]; /* bitmask of implemented PMDS */
  349. char *pmu_name; /* PMU family name */
  350. unsigned int pmu_family; /* cpuid family pattern used to identify pmu */
  351. unsigned int flags; /* pmu specific flags */
  352. unsigned int num_ibrs; /* number of IBRS: computed at init time */
  353. unsigned int num_dbrs; /* number of DBRS: computed at init time */
  354. unsigned int num_counters; /* PMC/PMD counting pairs : computed at init time */
  355. int (*probe)(void); /* customized probe routine */
  356. unsigned int use_rr_dbregs:1; /* set if debug registers used for range restriction */
  357. } pmu_config_t;
  358. /*
  359. * PMU specific flags
  360. */
  361. #define PFM_PMU_IRQ_RESEND 1 /* PMU needs explicit IRQ resend */
  362. /*
  363. * debug register related type definitions
  364. */
  365. typedef struct {
  366. unsigned long ibr_mask:56;
  367. unsigned long ibr_plm:4;
  368. unsigned long ibr_ig:3;
  369. unsigned long ibr_x:1;
  370. } ibr_mask_reg_t;
  371. typedef struct {
  372. unsigned long dbr_mask:56;
  373. unsigned long dbr_plm:4;
  374. unsigned long dbr_ig:2;
  375. unsigned long dbr_w:1;
  376. unsigned long dbr_r:1;
  377. } dbr_mask_reg_t;
  378. typedef union {
  379. unsigned long val;
  380. ibr_mask_reg_t ibr;
  381. dbr_mask_reg_t dbr;
  382. } dbreg_t;
  383. /*
  384. * perfmon command descriptions
  385. */
  386. typedef struct {
  387. int (*cmd_func)(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
  388. char *cmd_name;
  389. int cmd_flags;
  390. unsigned int cmd_narg;
  391. size_t cmd_argsize;
  392. int (*cmd_getsize)(void *arg, size_t *sz);
  393. } pfm_cmd_desc_t;
  394. #define PFM_CMD_FD 0x01 /* command requires a file descriptor */
  395. #define PFM_CMD_ARG_READ 0x02 /* command must read argument(s) */
  396. #define PFM_CMD_ARG_RW 0x04 /* command must read/write argument(s) */
  397. #define PFM_CMD_STOP 0x08 /* command does not work on zombie context */
  398. #define PFM_CMD_NAME(cmd) pfm_cmd_tab[(cmd)].cmd_name
  399. #define PFM_CMD_READ_ARG(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_ARG_READ)
  400. #define PFM_CMD_RW_ARG(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_ARG_RW)
  401. #define PFM_CMD_USE_FD(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_FD)
  402. #define PFM_CMD_STOPPED(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_STOP)
  403. #define PFM_CMD_ARG_MANY -1 /* cannot be zero */
  404. typedef struct {
  405. unsigned long pfm_spurious_ovfl_intr_count; /* keep track of spurious ovfl interrupts */
  406. unsigned long pfm_replay_ovfl_intr_count; /* keep track of replayed ovfl interrupts */
  407. unsigned long pfm_ovfl_intr_count; /* keep track of ovfl interrupts */
  408. unsigned long pfm_ovfl_intr_cycles; /* cycles spent processing ovfl interrupts */
  409. unsigned long pfm_ovfl_intr_cycles_min; /* min cycles spent processing ovfl interrupts */
  410. unsigned long pfm_ovfl_intr_cycles_max; /* max cycles spent processing ovfl interrupts */
  411. unsigned long pfm_smpl_handler_calls;
  412. unsigned long pfm_smpl_handler_cycles;
  413. char pad[SMP_CACHE_BYTES] ____cacheline_aligned;
  414. } pfm_stats_t;
  415. /*
  416. * perfmon internal variables
  417. */
  418. static pfm_stats_t pfm_stats[NR_CPUS];
  419. static pfm_session_t pfm_sessions; /* global sessions information */
  420. static DEFINE_SPINLOCK(pfm_alt_install_check);
  421. static pfm_intr_handler_desc_t *pfm_alt_intr_handler;
  422. static struct proc_dir_entry *perfmon_dir;
  423. static pfm_uuid_t pfm_null_uuid = {0,};
  424. static spinlock_t pfm_buffer_fmt_lock;
  425. static LIST_HEAD(pfm_buffer_fmt_list);
  426. static pmu_config_t *pmu_conf;
  427. /* sysctl() controls */
  428. pfm_sysctl_t pfm_sysctl;
  429. EXPORT_SYMBOL(pfm_sysctl);
  430. static ctl_table pfm_ctl_table[]={
  431. {
  432. .ctl_name = CTL_UNNUMBERED,
  433. .procname = "debug",
  434. .data = &pfm_sysctl.debug,
  435. .maxlen = sizeof(int),
  436. .mode = 0666,
  437. .proc_handler = &proc_dointvec,
  438. },
  439. {
  440. .ctl_name = CTL_UNNUMBERED,
  441. .procname = "debug_ovfl",
  442. .data = &pfm_sysctl.debug_ovfl,
  443. .maxlen = sizeof(int),
  444. .mode = 0666,
  445. .proc_handler = &proc_dointvec,
  446. },
  447. {
  448. .ctl_name = CTL_UNNUMBERED,
  449. .procname = "fastctxsw",
  450. .data = &pfm_sysctl.fastctxsw,
  451. .maxlen = sizeof(int),
  452. .mode = 0600,
  453. .proc_handler = &proc_dointvec,
  454. },
  455. {
  456. .ctl_name = CTL_UNNUMBERED,
  457. .procname = "expert_mode",
  458. .data = &pfm_sysctl.expert_mode,
  459. .maxlen = sizeof(int),
  460. .mode = 0600,
  461. .proc_handler = &proc_dointvec,
  462. },
  463. {}
  464. };
  465. static ctl_table pfm_sysctl_dir[] = {
  466. {
  467. .ctl_name = CTL_UNNUMBERED,
  468. .procname = "perfmon",
  469. .mode = 0755,
  470. .child = pfm_ctl_table,
  471. },
  472. {}
  473. };
  474. static ctl_table pfm_sysctl_root[] = {
  475. {
  476. .ctl_name = CTL_KERN,
  477. .procname = "kernel",
  478. .mode = 0755,
  479. .child = pfm_sysctl_dir,
  480. },
  481. {}
  482. };
  483. static struct ctl_table_header *pfm_sysctl_header;
  484. static int pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
  485. #define pfm_get_cpu_var(v) __ia64_per_cpu_var(v)
  486. #define pfm_get_cpu_data(a,b) per_cpu(a, b)
  487. static inline void
  488. pfm_put_task(struct task_struct *task)
  489. {
  490. if (task != current) put_task_struct(task);
  491. }
  492. static inline void
  493. pfm_set_task_notify(struct task_struct *task)
  494. {
  495. struct thread_info *info;
  496. info = (struct thread_info *) ((char *) task + IA64_TASK_SIZE);
  497. set_bit(TIF_PERFMON_WORK, &info->flags);
  498. }
  499. static inline void
  500. pfm_clear_task_notify(void)
  501. {
  502. clear_thread_flag(TIF_PERFMON_WORK);
  503. }
  504. static inline void
  505. pfm_reserve_page(unsigned long a)
  506. {
  507. SetPageReserved(vmalloc_to_page((void *)a));
  508. }
  509. static inline void
  510. pfm_unreserve_page(unsigned long a)
  511. {
  512. ClearPageReserved(vmalloc_to_page((void*)a));
  513. }
  514. static inline unsigned long
  515. pfm_protect_ctx_ctxsw(pfm_context_t *x)
  516. {
  517. spin_lock(&(x)->ctx_lock);
  518. return 0UL;
  519. }
  520. static inline void
  521. pfm_unprotect_ctx_ctxsw(pfm_context_t *x, unsigned long f)
  522. {
  523. spin_unlock(&(x)->ctx_lock);
  524. }
  525. static inline unsigned int
  526. pfm_do_munmap(struct mm_struct *mm, unsigned long addr, size_t len, int acct)
  527. {
  528. return do_munmap(mm, addr, len);
  529. }
  530. static inline unsigned long
  531. pfm_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags, unsigned long exec)
  532. {
  533. return get_unmapped_area(file, addr, len, pgoff, flags);
  534. }
  535. static int
  536. pfmfs_get_sb(struct file_system_type *fs_type, int flags, const char *dev_name, void *data,
  537. struct vfsmount *mnt)
  538. {
  539. return get_sb_pseudo(fs_type, "pfm:", NULL, PFMFS_MAGIC, mnt);
  540. }
  541. static struct file_system_type pfm_fs_type = {
  542. .name = "pfmfs",
  543. .get_sb = pfmfs_get_sb,
  544. .kill_sb = kill_anon_super,
  545. };
  546. DEFINE_PER_CPU(unsigned long, pfm_syst_info);
  547. DEFINE_PER_CPU(struct task_struct *, pmu_owner);
  548. DEFINE_PER_CPU(pfm_context_t *, pmu_ctx);
  549. DEFINE_PER_CPU(unsigned long, pmu_activation_number);
  550. EXPORT_PER_CPU_SYMBOL_GPL(pfm_syst_info);
  551. /* forward declaration */
  552. static const struct file_operations pfm_file_ops;
  553. /*
  554. * forward declarations
  555. */
  556. #ifndef CONFIG_SMP
  557. static void pfm_lazy_save_regs (struct task_struct *ta);
  558. #endif
  559. void dump_pmu_state(const char *);
  560. static int pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
  561. #include "perfmon_itanium.h"
  562. #include "perfmon_mckinley.h"
  563. #include "perfmon_montecito.h"
  564. #include "perfmon_generic.h"
  565. static pmu_config_t *pmu_confs[]={
  566. &pmu_conf_mont,
  567. &pmu_conf_mck,
  568. &pmu_conf_ita,
  569. &pmu_conf_gen, /* must be last */
  570. NULL
  571. };
  572. static int pfm_end_notify_user(pfm_context_t *ctx);
  573. static inline void
  574. pfm_clear_psr_pp(void)
  575. {
  576. ia64_rsm(IA64_PSR_PP);
  577. ia64_srlz_i();
  578. }
  579. static inline void
  580. pfm_set_psr_pp(void)
  581. {
  582. ia64_ssm(IA64_PSR_PP);
  583. ia64_srlz_i();
  584. }
  585. static inline void
  586. pfm_clear_psr_up(void)
  587. {
  588. ia64_rsm(IA64_PSR_UP);
  589. ia64_srlz_i();
  590. }
  591. static inline void
  592. pfm_set_psr_up(void)
  593. {
  594. ia64_ssm(IA64_PSR_UP);
  595. ia64_srlz_i();
  596. }
  597. static inline unsigned long
  598. pfm_get_psr(void)
  599. {
  600. unsigned long tmp;
  601. tmp = ia64_getreg(_IA64_REG_PSR);
  602. ia64_srlz_i();
  603. return tmp;
  604. }
  605. static inline void
  606. pfm_set_psr_l(unsigned long val)
  607. {
  608. ia64_setreg(_IA64_REG_PSR_L, val);
  609. ia64_srlz_i();
  610. }
  611. static inline void
  612. pfm_freeze_pmu(void)
  613. {
  614. ia64_set_pmc(0,1UL);
  615. ia64_srlz_d();
  616. }
  617. static inline void
  618. pfm_unfreeze_pmu(void)
  619. {
  620. ia64_set_pmc(0,0UL);
  621. ia64_srlz_d();
  622. }
  623. static inline void
  624. pfm_restore_ibrs(unsigned long *ibrs, unsigned int nibrs)
  625. {
  626. int i;
  627. for (i=0; i < nibrs; i++) {
  628. ia64_set_ibr(i, ibrs[i]);
  629. ia64_dv_serialize_instruction();
  630. }
  631. ia64_srlz_i();
  632. }
  633. static inline void
  634. pfm_restore_dbrs(unsigned long *dbrs, unsigned int ndbrs)
  635. {
  636. int i;
  637. for (i=0; i < ndbrs; i++) {
  638. ia64_set_dbr(i, dbrs[i]);
  639. ia64_dv_serialize_data();
  640. }
  641. ia64_srlz_d();
  642. }
  643. /*
  644. * PMD[i] must be a counter. no check is made
  645. */
  646. static inline unsigned long
  647. pfm_read_soft_counter(pfm_context_t *ctx, int i)
  648. {
  649. return ctx->ctx_pmds[i].val + (ia64_get_pmd(i) & pmu_conf->ovfl_val);
  650. }
  651. /*
  652. * PMD[i] must be a counter. no check is made
  653. */
  654. static inline void
  655. pfm_write_soft_counter(pfm_context_t *ctx, int i, unsigned long val)
  656. {
  657. unsigned long ovfl_val = pmu_conf->ovfl_val;
  658. ctx->ctx_pmds[i].val = val & ~ovfl_val;
  659. /*
  660. * writing to unimplemented part is ignore, so we do not need to
  661. * mask off top part
  662. */
  663. ia64_set_pmd(i, val & ovfl_val);
  664. }
  665. static pfm_msg_t *
  666. pfm_get_new_msg(pfm_context_t *ctx)
  667. {
  668. int idx, next;
  669. next = (ctx->ctx_msgq_tail+1) % PFM_MAX_MSGS;
  670. DPRINT(("ctx_fd=%p head=%d tail=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail));
  671. if (next == ctx->ctx_msgq_head) return NULL;
  672. idx = ctx->ctx_msgq_tail;
  673. ctx->ctx_msgq_tail = next;
  674. DPRINT(("ctx=%p head=%d tail=%d msg=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail, idx));
  675. return ctx->ctx_msgq+idx;
  676. }
  677. static pfm_msg_t *
  678. pfm_get_next_msg(pfm_context_t *ctx)
  679. {
  680. pfm_msg_t *msg;
  681. DPRINT(("ctx=%p head=%d tail=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail));
  682. if (PFM_CTXQ_EMPTY(ctx)) return NULL;
  683. /*
  684. * get oldest message
  685. */
  686. msg = ctx->ctx_msgq+ctx->ctx_msgq_head;
  687. /*
  688. * and move forward
  689. */
  690. ctx->ctx_msgq_head = (ctx->ctx_msgq_head+1) % PFM_MAX_MSGS;
  691. DPRINT(("ctx=%p head=%d tail=%d type=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail, msg->pfm_gen_msg.msg_type));
  692. return msg;
  693. }
  694. static void
  695. pfm_reset_msgq(pfm_context_t *ctx)
  696. {
  697. ctx->ctx_msgq_head = ctx->ctx_msgq_tail = 0;
  698. DPRINT(("ctx=%p msgq reset\n", ctx));
  699. }
  700. static void *
  701. pfm_rvmalloc(unsigned long size)
  702. {
  703. void *mem;
  704. unsigned long addr;
  705. size = PAGE_ALIGN(size);
  706. mem = vmalloc(size);
  707. if (mem) {
  708. //printk("perfmon: CPU%d pfm_rvmalloc(%ld)=%p\n", smp_processor_id(), size, mem);
  709. memset(mem, 0, size);
  710. addr = (unsigned long)mem;
  711. while (size > 0) {
  712. pfm_reserve_page(addr);
  713. addr+=PAGE_SIZE;
  714. size-=PAGE_SIZE;
  715. }
  716. }
  717. return mem;
  718. }
  719. static void
  720. pfm_rvfree(void *mem, unsigned long size)
  721. {
  722. unsigned long addr;
  723. if (mem) {
  724. DPRINT(("freeing physical buffer @%p size=%lu\n", mem, size));
  725. addr = (unsigned long) mem;
  726. while ((long) size > 0) {
  727. pfm_unreserve_page(addr);
  728. addr+=PAGE_SIZE;
  729. size-=PAGE_SIZE;
  730. }
  731. vfree(mem);
  732. }
  733. return;
  734. }
  735. static pfm_context_t *
  736. pfm_context_alloc(void)
  737. {
  738. pfm_context_t *ctx;
  739. /*
  740. * allocate context descriptor
  741. * must be able to free with interrupts disabled
  742. */
  743. ctx = kzalloc(sizeof(pfm_context_t), GFP_KERNEL);
  744. if (ctx) {
  745. DPRINT(("alloc ctx @%p\n", ctx));
  746. }
  747. return ctx;
  748. }
  749. static void
  750. pfm_context_free(pfm_context_t *ctx)
  751. {
  752. if (ctx) {
  753. DPRINT(("free ctx @%p\n", ctx));
  754. kfree(ctx);
  755. }
  756. }
  757. static void
  758. pfm_mask_monitoring(struct task_struct *task)
  759. {
  760. pfm_context_t *ctx = PFM_GET_CTX(task);
  761. unsigned long mask, val, ovfl_mask;
  762. int i;
  763. DPRINT_ovfl(("masking monitoring for [%d]\n", task_pid_nr(task)));
  764. ovfl_mask = pmu_conf->ovfl_val;
  765. /*
  766. * monitoring can only be masked as a result of a valid
  767. * counter overflow. In UP, it means that the PMU still
  768. * has an owner. Note that the owner can be different
  769. * from the current task. However the PMU state belongs
  770. * to the owner.
  771. * In SMP, a valid overflow only happens when task is
  772. * current. Therefore if we come here, we know that
  773. * the PMU state belongs to the current task, therefore
  774. * we can access the live registers.
  775. *
  776. * So in both cases, the live register contains the owner's
  777. * state. We can ONLY touch the PMU registers and NOT the PSR.
  778. *
  779. * As a consequence to this call, the ctx->th_pmds[] array
  780. * contains stale information which must be ignored
  781. * when context is reloaded AND monitoring is active (see
  782. * pfm_restart).
  783. */
  784. mask = ctx->ctx_used_pmds[0];
  785. for (i = 0; mask; i++, mask>>=1) {
  786. /* skip non used pmds */
  787. if ((mask & 0x1) == 0) continue;
  788. val = ia64_get_pmd(i);
  789. if (PMD_IS_COUNTING(i)) {
  790. /*
  791. * we rebuild the full 64 bit value of the counter
  792. */
  793. ctx->ctx_pmds[i].val += (val & ovfl_mask);
  794. } else {
  795. ctx->ctx_pmds[i].val = val;
  796. }
  797. DPRINT_ovfl(("pmd[%d]=0x%lx hw_pmd=0x%lx\n",
  798. i,
  799. ctx->ctx_pmds[i].val,
  800. val & ovfl_mask));
  801. }
  802. /*
  803. * mask monitoring by setting the privilege level to 0
  804. * we cannot use psr.pp/psr.up for this, it is controlled by
  805. * the user
  806. *
  807. * if task is current, modify actual registers, otherwise modify
  808. * thread save state, i.e., what will be restored in pfm_load_regs()
  809. */
  810. mask = ctx->ctx_used_monitors[0] >> PMU_FIRST_COUNTER;
  811. for(i= PMU_FIRST_COUNTER; mask; i++, mask>>=1) {
  812. if ((mask & 0x1) == 0UL) continue;
  813. ia64_set_pmc(i, ctx->th_pmcs[i] & ~0xfUL);
  814. ctx->th_pmcs[i] &= ~0xfUL;
  815. DPRINT_ovfl(("pmc[%d]=0x%lx\n", i, ctx->th_pmcs[i]));
  816. }
  817. /*
  818. * make all of this visible
  819. */
  820. ia64_srlz_d();
  821. }
  822. /*
  823. * must always be done with task == current
  824. *
  825. * context must be in MASKED state when calling
  826. */
  827. static void
  828. pfm_restore_monitoring(struct task_struct *task)
  829. {
  830. pfm_context_t *ctx = PFM_GET_CTX(task);
  831. unsigned long mask, ovfl_mask;
  832. unsigned long psr, val;
  833. int i, is_system;
  834. is_system = ctx->ctx_fl_system;
  835. ovfl_mask = pmu_conf->ovfl_val;
  836. if (task != current) {
  837. printk(KERN_ERR "perfmon.%d: invalid task[%d] current[%d]\n", __LINE__, task_pid_nr(task), task_pid_nr(current));
  838. return;
  839. }
  840. if (ctx->ctx_state != PFM_CTX_MASKED) {
  841. printk(KERN_ERR "perfmon.%d: task[%d] current[%d] invalid state=%d\n", __LINE__,
  842. task_pid_nr(task), task_pid_nr(current), ctx->ctx_state);
  843. return;
  844. }
  845. psr = pfm_get_psr();
  846. /*
  847. * monitoring is masked via the PMC.
  848. * As we restore their value, we do not want each counter to
  849. * restart right away. We stop monitoring using the PSR,
  850. * restore the PMC (and PMD) and then re-establish the psr
  851. * as it was. Note that there can be no pending overflow at
  852. * this point, because monitoring was MASKED.
  853. *
  854. * system-wide session are pinned and self-monitoring
  855. */
  856. if (is_system && (PFM_CPUINFO_GET() & PFM_CPUINFO_DCR_PP)) {
  857. /* disable dcr pp */
  858. ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) & ~IA64_DCR_PP);
  859. pfm_clear_psr_pp();
  860. } else {
  861. pfm_clear_psr_up();
  862. }
  863. /*
  864. * first, we restore the PMD
  865. */
  866. mask = ctx->ctx_used_pmds[0];
  867. for (i = 0; mask; i++, mask>>=1) {
  868. /* skip non used pmds */
  869. if ((mask & 0x1) == 0) continue;
  870. if (PMD_IS_COUNTING(i)) {
  871. /*
  872. * we split the 64bit value according to
  873. * counter width
  874. */
  875. val = ctx->ctx_pmds[i].val & ovfl_mask;
  876. ctx->ctx_pmds[i].val &= ~ovfl_mask;
  877. } else {
  878. val = ctx->ctx_pmds[i].val;
  879. }
  880. ia64_set_pmd(i, val);
  881. DPRINT(("pmd[%d]=0x%lx hw_pmd=0x%lx\n",
  882. i,
  883. ctx->ctx_pmds[i].val,
  884. val));
  885. }
  886. /*
  887. * restore the PMCs
  888. */
  889. mask = ctx->ctx_used_monitors[0] >> PMU_FIRST_COUNTER;
  890. for(i= PMU_FIRST_COUNTER; mask; i++, mask>>=1) {
  891. if ((mask & 0x1) == 0UL) continue;
  892. ctx->th_pmcs[i] = ctx->ctx_pmcs[i];
  893. ia64_set_pmc(i, ctx->th_pmcs[i]);
  894. DPRINT(("[%d] pmc[%d]=0x%lx\n",
  895. task_pid_nr(task), i, ctx->th_pmcs[i]));
  896. }
  897. ia64_srlz_d();
  898. /*
  899. * must restore DBR/IBR because could be modified while masked
  900. * XXX: need to optimize
  901. */
  902. if (ctx->ctx_fl_using_dbreg) {
  903. pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
  904. pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
  905. }
  906. /*
  907. * now restore PSR
  908. */
  909. if (is_system && (PFM_CPUINFO_GET() & PFM_CPUINFO_DCR_PP)) {
  910. /* enable dcr pp */
  911. ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) | IA64_DCR_PP);
  912. ia64_srlz_i();
  913. }
  914. pfm_set_psr_l(psr);
  915. }
  916. static inline void
  917. pfm_save_pmds(unsigned long *pmds, unsigned long mask)
  918. {
  919. int i;
  920. ia64_srlz_d();
  921. for (i=0; mask; i++, mask>>=1) {
  922. if (mask & 0x1) pmds[i] = ia64_get_pmd(i);
  923. }
  924. }
  925. /*
  926. * reload from thread state (used for ctxw only)
  927. */
  928. static inline void
  929. pfm_restore_pmds(unsigned long *pmds, unsigned long mask)
  930. {
  931. int i;
  932. unsigned long val, ovfl_val = pmu_conf->ovfl_val;
  933. for (i=0; mask; i++, mask>>=1) {
  934. if ((mask & 0x1) == 0) continue;
  935. val = PMD_IS_COUNTING(i) ? pmds[i] & ovfl_val : pmds[i];
  936. ia64_set_pmd(i, val);
  937. }
  938. ia64_srlz_d();
  939. }
  940. /*
  941. * propagate PMD from context to thread-state
  942. */
  943. static inline void
  944. pfm_copy_pmds(struct task_struct *task, pfm_context_t *ctx)
  945. {
  946. unsigned long ovfl_val = pmu_conf->ovfl_val;
  947. unsigned long mask = ctx->ctx_all_pmds[0];
  948. unsigned long val;
  949. int i;
  950. DPRINT(("mask=0x%lx\n", mask));
  951. for (i=0; mask; i++, mask>>=1) {
  952. val = ctx->ctx_pmds[i].val;
  953. /*
  954. * We break up the 64 bit value into 2 pieces
  955. * the lower bits go to the machine state in the
  956. * thread (will be reloaded on ctxsw in).
  957. * The upper part stays in the soft-counter.
  958. */
  959. if (PMD_IS_COUNTING(i)) {
  960. ctx->ctx_pmds[i].val = val & ~ovfl_val;
  961. val &= ovfl_val;
  962. }
  963. ctx->th_pmds[i] = val;
  964. DPRINT(("pmd[%d]=0x%lx soft_val=0x%lx\n",
  965. i,
  966. ctx->th_pmds[i],
  967. ctx->ctx_pmds[i].val));
  968. }
  969. }
  970. /*
  971. * propagate PMC from context to thread-state
  972. */
  973. static inline void
  974. pfm_copy_pmcs(struct task_struct *task, pfm_context_t *ctx)
  975. {
  976. unsigned long mask = ctx->ctx_all_pmcs[0];
  977. int i;
  978. DPRINT(("mask=0x%lx\n", mask));
  979. for (i=0; mask; i++, mask>>=1) {
  980. /* masking 0 with ovfl_val yields 0 */
  981. ctx->th_pmcs[i] = ctx->ctx_pmcs[i];
  982. DPRINT(("pmc[%d]=0x%lx\n", i, ctx->th_pmcs[i]));
  983. }
  984. }
  985. static inline void
  986. pfm_restore_pmcs(unsigned long *pmcs, unsigned long mask)
  987. {
  988. int i;
  989. for (i=0; mask; i++, mask>>=1) {
  990. if ((mask & 0x1) == 0) continue;
  991. ia64_set_pmc(i, pmcs[i]);
  992. }
  993. ia64_srlz_d();
  994. }
  995. static inline int
  996. pfm_uuid_cmp(pfm_uuid_t a, pfm_uuid_t b)
  997. {
  998. return memcmp(a, b, sizeof(pfm_uuid_t));
  999. }
  1000. static inline int
  1001. pfm_buf_fmt_exit(pfm_buffer_fmt_t *fmt, struct task_struct *task, void *buf, struct pt_regs *regs)
  1002. {
  1003. int ret = 0;
  1004. if (fmt->fmt_exit) ret = (*fmt->fmt_exit)(task, buf, regs);
  1005. return ret;
  1006. }
  1007. static inline int
  1008. pfm_buf_fmt_getsize(pfm_buffer_fmt_t *fmt, struct task_struct *task, unsigned int flags, int cpu, void *arg, unsigned long *size)
  1009. {
  1010. int ret = 0;
  1011. if (fmt->fmt_getsize) ret = (*fmt->fmt_getsize)(task, flags, cpu, arg, size);
  1012. return ret;
  1013. }
  1014. static inline int
  1015. pfm_buf_fmt_validate(pfm_buffer_fmt_t *fmt, struct task_struct *task, unsigned int flags,
  1016. int cpu, void *arg)
  1017. {
  1018. int ret = 0;
  1019. if (fmt->fmt_validate) ret = (*fmt->fmt_validate)(task, flags, cpu, arg);
  1020. return ret;
  1021. }
  1022. static inline int
  1023. pfm_buf_fmt_init(pfm_buffer_fmt_t *fmt, struct task_struct *task, void *buf, unsigned int flags,
  1024. int cpu, void *arg)
  1025. {
  1026. int ret = 0;
  1027. if (fmt->fmt_init) ret = (*fmt->fmt_init)(task, buf, flags, cpu, arg);
  1028. return ret;
  1029. }
  1030. static inline int
  1031. pfm_buf_fmt_restart(pfm_buffer_fmt_t *fmt, struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs)
  1032. {
  1033. int ret = 0;
  1034. if (fmt->fmt_restart) ret = (*fmt->fmt_restart)(task, ctrl, buf, regs);
  1035. return ret;
  1036. }
  1037. static inline int
  1038. pfm_buf_fmt_restart_active(pfm_buffer_fmt_t *fmt, struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs)
  1039. {
  1040. int ret = 0;
  1041. if (fmt->fmt_restart_active) ret = (*fmt->fmt_restart_active)(task, ctrl, buf, regs);
  1042. return ret;
  1043. }
  1044. static pfm_buffer_fmt_t *
  1045. __pfm_find_buffer_fmt(pfm_uuid_t uuid)
  1046. {
  1047. struct list_head * pos;
  1048. pfm_buffer_fmt_t * entry;
  1049. list_for_each(pos, &pfm_buffer_fmt_list) {
  1050. entry = list_entry(pos, pfm_buffer_fmt_t, fmt_list);
  1051. if (pfm_uuid_cmp(uuid, entry->fmt_uuid) == 0)
  1052. return entry;
  1053. }
  1054. return NULL;
  1055. }
  1056. /*
  1057. * find a buffer format based on its uuid
  1058. */
  1059. static pfm_buffer_fmt_t *
  1060. pfm_find_buffer_fmt(pfm_uuid_t uuid)
  1061. {
  1062. pfm_buffer_fmt_t * fmt;
  1063. spin_lock(&pfm_buffer_fmt_lock);
  1064. fmt = __pfm_find_buffer_fmt(uuid);
  1065. spin_unlock(&pfm_buffer_fmt_lock);
  1066. return fmt;
  1067. }
  1068. int
  1069. pfm_register_buffer_fmt(pfm_buffer_fmt_t *fmt)
  1070. {
  1071. int ret = 0;
  1072. /* some sanity checks */
  1073. if (fmt == NULL || fmt->fmt_name == NULL) return -EINVAL;
  1074. /* we need at least a handler */
  1075. if (fmt->fmt_handler == NULL) return -EINVAL;
  1076. /*
  1077. * XXX: need check validity of fmt_arg_size
  1078. */
  1079. spin_lock(&pfm_buffer_fmt_lock);
  1080. if (__pfm_find_buffer_fmt(fmt->fmt_uuid)) {
  1081. printk(KERN_ERR "perfmon: duplicate sampling format: %s\n", fmt->fmt_name);
  1082. ret = -EBUSY;
  1083. goto out;
  1084. }
  1085. list_add(&fmt->fmt_list, &pfm_buffer_fmt_list);
  1086. printk(KERN_INFO "perfmon: added sampling format %s\n", fmt->fmt_name);
  1087. out:
  1088. spin_unlock(&pfm_buffer_fmt_lock);
  1089. return ret;
  1090. }
  1091. EXPORT_SYMBOL(pfm_register_buffer_fmt);
  1092. int
  1093. pfm_unregister_buffer_fmt(pfm_uuid_t uuid)
  1094. {
  1095. pfm_buffer_fmt_t *fmt;
  1096. int ret = 0;
  1097. spin_lock(&pfm_buffer_fmt_lock);
  1098. fmt = __pfm_find_buffer_fmt(uuid);
  1099. if (!fmt) {
  1100. printk(KERN_ERR "perfmon: cannot unregister format, not found\n");
  1101. ret = -EINVAL;
  1102. goto out;
  1103. }
  1104. list_del_init(&fmt->fmt_list);
  1105. printk(KERN_INFO "perfmon: removed sampling format: %s\n", fmt->fmt_name);
  1106. out:
  1107. spin_unlock(&pfm_buffer_fmt_lock);
  1108. return ret;
  1109. }
  1110. EXPORT_SYMBOL(pfm_unregister_buffer_fmt);
  1111. extern void update_pal_halt_status(int);
  1112. static int
  1113. pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu)
  1114. {
  1115. unsigned long flags;
  1116. /*
  1117. * validity checks on cpu_mask have been done upstream
  1118. */
  1119. LOCK_PFS(flags);
  1120. DPRINT(("in sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
  1121. pfm_sessions.pfs_sys_sessions,
  1122. pfm_sessions.pfs_task_sessions,
  1123. pfm_sessions.pfs_sys_use_dbregs,
  1124. is_syswide,
  1125. cpu));
  1126. if (is_syswide) {
  1127. /*
  1128. * cannot mix system wide and per-task sessions
  1129. */
  1130. if (pfm_sessions.pfs_task_sessions > 0UL) {
  1131. DPRINT(("system wide not possible, %u conflicting task_sessions\n",
  1132. pfm_sessions.pfs_task_sessions));
  1133. goto abort;
  1134. }
  1135. if (pfm_sessions.pfs_sys_session[cpu]) goto error_conflict;
  1136. DPRINT(("reserving system wide session on CPU%u currently on CPU%u\n", cpu, smp_processor_id()));
  1137. pfm_sessions.pfs_sys_session[cpu] = task;
  1138. pfm_sessions.pfs_sys_sessions++ ;
  1139. } else {
  1140. if (pfm_sessions.pfs_sys_sessions) goto abort;
  1141. pfm_sessions.pfs_task_sessions++;
  1142. }
  1143. DPRINT(("out sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
  1144. pfm_sessions.pfs_sys_sessions,
  1145. pfm_sessions.pfs_task_sessions,
  1146. pfm_sessions.pfs_sys_use_dbregs,
  1147. is_syswide,
  1148. cpu));
  1149. /*
  1150. * disable default_idle() to go to PAL_HALT
  1151. */
  1152. update_pal_halt_status(0);
  1153. UNLOCK_PFS(flags);
  1154. return 0;
  1155. error_conflict:
  1156. DPRINT(("system wide not possible, conflicting session [%d] on CPU%d\n",
  1157. task_pid_nr(pfm_sessions.pfs_sys_session[cpu]),
  1158. cpu));
  1159. abort:
  1160. UNLOCK_PFS(flags);
  1161. return -EBUSY;
  1162. }
  1163. static int
  1164. pfm_unreserve_session(pfm_context_t *ctx, int is_syswide, unsigned int cpu)
  1165. {
  1166. unsigned long flags;
  1167. /*
  1168. * validity checks on cpu_mask have been done upstream
  1169. */
  1170. LOCK_PFS(flags);
  1171. DPRINT(("in sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
  1172. pfm_sessions.pfs_sys_sessions,
  1173. pfm_sessions.pfs_task_sessions,
  1174. pfm_sessions.pfs_sys_use_dbregs,
  1175. is_syswide,
  1176. cpu));
  1177. if (is_syswide) {
  1178. pfm_sessions.pfs_sys_session[cpu] = NULL;
  1179. /*
  1180. * would not work with perfmon+more than one bit in cpu_mask
  1181. */
  1182. if (ctx && ctx->ctx_fl_using_dbreg) {
  1183. if (pfm_sessions.pfs_sys_use_dbregs == 0) {
  1184. printk(KERN_ERR "perfmon: invalid release for ctx %p sys_use_dbregs=0\n", ctx);
  1185. } else {
  1186. pfm_sessions.pfs_sys_use_dbregs--;
  1187. }
  1188. }
  1189. pfm_sessions.pfs_sys_sessions--;
  1190. } else {
  1191. pfm_sessions.pfs_task_sessions--;
  1192. }
  1193. DPRINT(("out sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
  1194. pfm_sessions.pfs_sys_sessions,
  1195. pfm_sessions.pfs_task_sessions,
  1196. pfm_sessions.pfs_sys_use_dbregs,
  1197. is_syswide,
  1198. cpu));
  1199. /*
  1200. * if possible, enable default_idle() to go into PAL_HALT
  1201. */
  1202. if (pfm_sessions.pfs_task_sessions == 0 && pfm_sessions.pfs_sys_sessions == 0)
  1203. update_pal_halt_status(1);
  1204. UNLOCK_PFS(flags);
  1205. return 0;
  1206. }
  1207. /*
  1208. * removes virtual mapping of the sampling buffer.
  1209. * IMPORTANT: cannot be called with interrupts disable, e.g. inside
  1210. * a PROTECT_CTX() section.
  1211. */
  1212. static int
  1213. pfm_remove_smpl_mapping(struct task_struct *task, void *vaddr, unsigned long size)
  1214. {
  1215. int r;
  1216. /* sanity checks */
  1217. if (task->mm == NULL || size == 0UL || vaddr == NULL) {
  1218. printk(KERN_ERR "perfmon: pfm_remove_smpl_mapping [%d] invalid context mm=%p\n", task_pid_nr(task), task->mm);
  1219. return -EINVAL;
  1220. }
  1221. DPRINT(("smpl_vaddr=%p size=%lu\n", vaddr, size));
  1222. /*
  1223. * does the actual unmapping
  1224. */
  1225. down_write(&task->mm->mmap_sem);
  1226. DPRINT(("down_write done smpl_vaddr=%p size=%lu\n", vaddr, size));
  1227. r = pfm_do_munmap(task->mm, (unsigned long)vaddr, size, 0);
  1228. up_write(&task->mm->mmap_sem);
  1229. if (r !=0) {
  1230. printk(KERN_ERR "perfmon: [%d] unable to unmap sampling buffer @%p size=%lu\n", task_pid_nr(task), vaddr, size);
  1231. }
  1232. DPRINT(("do_unmap(%p, %lu)=%d\n", vaddr, size, r));
  1233. return 0;
  1234. }
  1235. /*
  1236. * free actual physical storage used by sampling buffer
  1237. */
  1238. #if 0
  1239. static int
  1240. pfm_free_smpl_buffer(pfm_context_t *ctx)
  1241. {
  1242. pfm_buffer_fmt_t *fmt;
  1243. if (ctx->ctx_smpl_hdr == NULL) goto invalid_free;
  1244. /*
  1245. * we won't use the buffer format anymore
  1246. */
  1247. fmt = ctx->ctx_buf_fmt;
  1248. DPRINT(("sampling buffer @%p size %lu vaddr=%p\n",
  1249. ctx->ctx_smpl_hdr,
  1250. ctx->ctx_smpl_size,
  1251. ctx->ctx_smpl_vaddr));
  1252. pfm_buf_fmt_exit(fmt, current, NULL, NULL);
  1253. /*
  1254. * free the buffer
  1255. */
  1256. pfm_rvfree(ctx->ctx_smpl_hdr, ctx->ctx_smpl_size);
  1257. ctx->ctx_smpl_hdr = NULL;
  1258. ctx->ctx_smpl_size = 0UL;
  1259. return 0;
  1260. invalid_free:
  1261. printk(KERN_ERR "perfmon: pfm_free_smpl_buffer [%d] no buffer\n", task_pid_nr(current));
  1262. return -EINVAL;
  1263. }
  1264. #endif
  1265. static inline void
  1266. pfm_exit_smpl_buffer(pfm_buffer_fmt_t *fmt)
  1267. {
  1268. if (fmt == NULL) return;
  1269. pfm_buf_fmt_exit(fmt, current, NULL, NULL);
  1270. }
  1271. /*
  1272. * pfmfs should _never_ be mounted by userland - too much of security hassle,
  1273. * no real gain from having the whole whorehouse mounted. So we don't need
  1274. * any operations on the root directory. However, we need a non-trivial
  1275. * d_name - pfm: will go nicely and kill the special-casing in procfs.
  1276. */
  1277. static struct vfsmount *pfmfs_mnt;
  1278. static int __init
  1279. init_pfm_fs(void)
  1280. {
  1281. int err = register_filesystem(&pfm_fs_type);
  1282. if (!err) {
  1283. pfmfs_mnt = kern_mount(&pfm_fs_type);
  1284. err = PTR_ERR(pfmfs_mnt);
  1285. if (IS_ERR(pfmfs_mnt))
  1286. unregister_filesystem(&pfm_fs_type);
  1287. else
  1288. err = 0;
  1289. }
  1290. return err;
  1291. }
  1292. static ssize_t
  1293. pfm_read(struct file *filp, char __user *buf, size_t size, loff_t *ppos)
  1294. {
  1295. pfm_context_t *ctx;
  1296. pfm_msg_t *msg;
  1297. ssize_t ret;
  1298. unsigned long flags;
  1299. DECLARE_WAITQUEUE(wait, current);
  1300. if (PFM_IS_FILE(filp) == 0) {
  1301. printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", task_pid_nr(current));
  1302. return -EINVAL;
  1303. }
  1304. ctx = (pfm_context_t *)filp->private_data;
  1305. if (ctx == NULL) {
  1306. printk(KERN_ERR "perfmon: pfm_read: NULL ctx [%d]\n", task_pid_nr(current));
  1307. return -EINVAL;
  1308. }
  1309. /*
  1310. * check even when there is no message
  1311. */
  1312. if (size < sizeof(pfm_msg_t)) {
  1313. DPRINT(("message is too small ctx=%p (>=%ld)\n", ctx, sizeof(pfm_msg_t)));
  1314. return -EINVAL;
  1315. }
  1316. PROTECT_CTX(ctx, flags);
  1317. /*
  1318. * put ourselves on the wait queue
  1319. */
  1320. add_wait_queue(&ctx->ctx_msgq_wait, &wait);
  1321. for(;;) {
  1322. /*
  1323. * check wait queue
  1324. */
  1325. set_current_state(TASK_INTERRUPTIBLE);
  1326. DPRINT(("head=%d tail=%d\n", ctx->ctx_msgq_head, ctx->ctx_msgq_tail));
  1327. ret = 0;
  1328. if(PFM_CTXQ_EMPTY(ctx) == 0) break;
  1329. UNPROTECT_CTX(ctx, flags);
  1330. /*
  1331. * check non-blocking read
  1332. */
  1333. ret = -EAGAIN;
  1334. if(filp->f_flags & O_NONBLOCK) break;
  1335. /*
  1336. * check pending signals
  1337. */
  1338. if(signal_pending(current)) {
  1339. ret = -EINTR;
  1340. break;
  1341. }
  1342. /*
  1343. * no message, so wait
  1344. */
  1345. schedule();
  1346. PROTECT_CTX(ctx, flags);
  1347. }
  1348. DPRINT(("[%d] back to running ret=%ld\n", task_pid_nr(current), ret));
  1349. set_current_state(TASK_RUNNING);
  1350. remove_wait_queue(&ctx->ctx_msgq_wait, &wait);
  1351. if (ret < 0) goto abort;
  1352. ret = -EINVAL;
  1353. msg = pfm_get_next_msg(ctx);
  1354. if (msg == NULL) {
  1355. printk(KERN_ERR "perfmon: pfm_read no msg for ctx=%p [%d]\n", ctx, task_pid_nr(current));
  1356. goto abort_locked;
  1357. }
  1358. DPRINT(("fd=%d type=%d\n", msg->pfm_gen_msg.msg_ctx_fd, msg->pfm_gen_msg.msg_type));
  1359. ret = -EFAULT;
  1360. if(copy_to_user(buf, msg, sizeof(pfm_msg_t)) == 0) ret = sizeof(pfm_msg_t);
  1361. abort_locked:
  1362. UNPROTECT_CTX(ctx, flags);
  1363. abort:
  1364. return ret;
  1365. }
  1366. static ssize_t
  1367. pfm_write(struct file *file, const char __user *ubuf,
  1368. size_t size, loff_t *ppos)
  1369. {
  1370. DPRINT(("pfm_write called\n"));
  1371. return -EINVAL;
  1372. }
  1373. static unsigned int
  1374. pfm_poll(struct file *filp, poll_table * wait)
  1375. {
  1376. pfm_context_t *ctx;
  1377. unsigned long flags;
  1378. unsigned int mask = 0;
  1379. if (PFM_IS_FILE(filp) == 0) {
  1380. printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", task_pid_nr(current));
  1381. return 0;
  1382. }
  1383. ctx = (pfm_context_t *)filp->private_data;
  1384. if (ctx == NULL) {
  1385. printk(KERN_ERR "perfmon: pfm_poll: NULL ctx [%d]\n", task_pid_nr(current));
  1386. return 0;
  1387. }
  1388. DPRINT(("pfm_poll ctx_fd=%d before poll_wait\n", ctx->ctx_fd));
  1389. poll_wait(filp, &ctx->ctx_msgq_wait, wait);
  1390. PROTECT_CTX(ctx, flags);
  1391. if (PFM_CTXQ_EMPTY(ctx) == 0)
  1392. mask = POLLIN | POLLRDNORM;
  1393. UNPROTECT_CTX(ctx, flags);
  1394. DPRINT(("pfm_poll ctx_fd=%d mask=0x%x\n", ctx->ctx_fd, mask));
  1395. return mask;
  1396. }
  1397. static int
  1398. pfm_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
  1399. {
  1400. DPRINT(("pfm_ioctl called\n"));
  1401. return -EINVAL;
  1402. }
  1403. /*
  1404. * interrupt cannot be masked when coming here
  1405. */
  1406. static inline int
  1407. pfm_do_fasync(int fd, struct file *filp, pfm_context_t *ctx, int on)
  1408. {
  1409. int ret;
  1410. ret = fasync_helper (fd, filp, on, &ctx->ctx_async_queue);
  1411. DPRINT(("pfm_fasync called by [%d] on ctx_fd=%d on=%d async_queue=%p ret=%d\n",
  1412. task_pid_nr(current),
  1413. fd,
  1414. on,
  1415. ctx->ctx_async_queue, ret));
  1416. return ret;
  1417. }
  1418. static int
  1419. pfm_fasync(int fd, struct file *filp, int on)
  1420. {
  1421. pfm_context_t *ctx;
  1422. int ret;
  1423. if (PFM_IS_FILE(filp) == 0) {
  1424. printk(KERN_ERR "perfmon: pfm_fasync bad magic [%d]\n", task_pid_nr(current));
  1425. return -EBADF;
  1426. }
  1427. ctx = (pfm_context_t *)filp->private_data;
  1428. if (ctx == NULL) {
  1429. printk(KERN_ERR "perfmon: pfm_fasync NULL ctx [%d]\n", task_pid_nr(current));
  1430. return -EBADF;
  1431. }
  1432. /*
  1433. * we cannot mask interrupts during this call because this may
  1434. * may go to sleep if memory is not readily avalaible.
  1435. *
  1436. * We are protected from the conetxt disappearing by the get_fd()/put_fd()
  1437. * done in caller. Serialization of this function is ensured by caller.
  1438. */
  1439. ret = pfm_do_fasync(fd, filp, ctx, on);
  1440. DPRINT(("pfm_fasync called on ctx_fd=%d on=%d async_queue=%p ret=%d\n",
  1441. fd,
  1442. on,
  1443. ctx->ctx_async_queue, ret));
  1444. return ret;
  1445. }
  1446. #ifdef CONFIG_SMP
  1447. /*
  1448. * this function is exclusively called from pfm_close().
  1449. * The context is not protected at that time, nor are interrupts
  1450. * on the remote CPU. That's necessary to avoid deadlocks.
  1451. */
  1452. static void
  1453. pfm_syswide_force_stop(void *info)
  1454. {
  1455. pfm_context_t *ctx = (pfm_context_t *)info;
  1456. struct pt_regs *regs = task_pt_regs(current);
  1457. struct task_struct *owner;
  1458. unsigned long flags;
  1459. int ret;
  1460. if (ctx->ctx_cpu != smp_processor_id()) {
  1461. printk(KERN_ERR "perfmon: pfm_syswide_force_stop for CPU%d but on CPU%d\n",
  1462. ctx->ctx_cpu,
  1463. smp_processor_id());
  1464. return;
  1465. }
  1466. owner = GET_PMU_OWNER();
  1467. if (owner != ctx->ctx_task) {
  1468. printk(KERN_ERR "perfmon: pfm_syswide_force_stop CPU%d unexpected owner [%d] instead of [%d]\n",
  1469. smp_processor_id(),
  1470. task_pid_nr(owner), task_pid_nr(ctx->ctx_task));
  1471. return;
  1472. }
  1473. if (GET_PMU_CTX() != ctx) {
  1474. printk(KERN_ERR "perfmon: pfm_syswide_force_stop CPU%d unexpected ctx %p instead of %p\n",
  1475. smp_processor_id(),
  1476. GET_PMU_CTX(), ctx);
  1477. return;
  1478. }
  1479. DPRINT(("on CPU%d forcing system wide stop for [%d]\n", smp_processor_id(), task_pid_nr(ctx->ctx_task)));
  1480. /*
  1481. * the context is already protected in pfm_close(), we simply
  1482. * need to mask interrupts to avoid a PMU interrupt race on
  1483. * this CPU
  1484. */
  1485. local_irq_save(flags);
  1486. ret = pfm_context_unload(ctx, NULL, 0, regs);
  1487. if (ret) {
  1488. DPRINT(("context_unload returned %d\n", ret));
  1489. }
  1490. /*
  1491. * unmask interrupts, PMU interrupts are now spurious here
  1492. */
  1493. local_irq_restore(flags);
  1494. }
  1495. static void
  1496. pfm_syswide_cleanup_other_cpu(pfm_context_t *ctx)
  1497. {
  1498. int ret;
  1499. DPRINT(("calling CPU%d for cleanup\n", ctx->ctx_cpu));
  1500. ret = smp_call_function_single(ctx->ctx_cpu, pfm_syswide_force_stop, ctx, 0, 1);
  1501. DPRINT(("called CPU%d for cleanup ret=%d\n", ctx->ctx_cpu, ret));
  1502. }
  1503. #endif /* CONFIG_SMP */
  1504. /*
  1505. * called for each close(). Partially free resources.
  1506. * When caller is self-monitoring, the context is unloaded.
  1507. */
  1508. static int
  1509. pfm_flush(struct file *filp, fl_owner_t id)
  1510. {
  1511. pfm_context_t *ctx;
  1512. struct task_struct *task;
  1513. struct pt_regs *regs;
  1514. unsigned long flags;
  1515. unsigned long smpl_buf_size = 0UL;
  1516. void *smpl_buf_vaddr = NULL;
  1517. int state, is_system;
  1518. if (PFM_IS_FILE(filp) == 0) {
  1519. DPRINT(("bad magic for\n"));
  1520. return -EBADF;
  1521. }
  1522. ctx = (pfm_context_t *)filp->private_data;
  1523. if (ctx == NULL) {
  1524. printk(KERN_ERR "perfmon: pfm_flush: NULL ctx [%d]\n", task_pid_nr(current));
  1525. return -EBADF;
  1526. }
  1527. /*
  1528. * remove our file from the async queue, if we use this mode.
  1529. * This can be done without the context being protected. We come
  1530. * here when the context has become unreachable by other tasks.
  1531. *
  1532. * We may still have active monitoring at this point and we may
  1533. * end up in pfm_overflow_handler(). However, fasync_helper()
  1534. * operates with interrupts disabled and it cleans up the
  1535. * queue. If the PMU handler is called prior to entering
  1536. * fasync_helper() then it will send a signal. If it is
  1537. * invoked after, it will find an empty queue and no
  1538. * signal will be sent. In both case, we are safe
  1539. */
  1540. if (filp->f_flags & FASYNC) {
  1541. DPRINT(("cleaning up async_queue=%p\n", ctx->ctx_async_queue));
  1542. pfm_do_fasync (-1, filp, ctx, 0);
  1543. }
  1544. PROTECT_CTX(ctx, flags);
  1545. state = ctx->ctx_state;
  1546. is_system = ctx->ctx_fl_system;
  1547. task = PFM_CTX_TASK(ctx);
  1548. regs = task_pt_regs(task);
  1549. DPRINT(("ctx_state=%d is_current=%d\n",
  1550. state,
  1551. task == current ? 1 : 0));
  1552. /*
  1553. * if state == UNLOADED, then task is NULL
  1554. */
  1555. /*
  1556. * we must stop and unload because we are losing access to the context.
  1557. */
  1558. if (task == current) {
  1559. #ifdef CONFIG_SMP
  1560. /*
  1561. * the task IS the owner but it migrated to another CPU: that's bad
  1562. * but we must handle this cleanly. Unfortunately, the kernel does
  1563. * not provide a mechanism to block migration (while the context is loaded).
  1564. *
  1565. * We need to release the resource on the ORIGINAL cpu.
  1566. */
  1567. if (is_system && ctx->ctx_cpu != smp_processor_id()) {
  1568. DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
  1569. /*
  1570. * keep context protected but unmask interrupt for IPI
  1571. */
  1572. local_irq_restore(flags);
  1573. pfm_syswide_cleanup_other_cpu(ctx);
  1574. /*
  1575. * restore interrupt masking
  1576. */
  1577. local_irq_save(flags);
  1578. /*
  1579. * context is unloaded at this point
  1580. */
  1581. } else
  1582. #endif /* CONFIG_SMP */
  1583. {
  1584. DPRINT(("forcing unload\n"));
  1585. /*
  1586. * stop and unload, returning with state UNLOADED
  1587. * and session unreserved.
  1588. */
  1589. pfm_context_unload(ctx, NULL, 0, regs);
  1590. DPRINT(("ctx_state=%d\n", ctx->ctx_state));
  1591. }
  1592. }
  1593. /*
  1594. * remove virtual mapping, if any, for the calling task.
  1595. * cannot reset ctx field until last user is calling close().
  1596. *
  1597. * ctx_smpl_vaddr must never be cleared because it is needed
  1598. * by every task with access to the context
  1599. *
  1600. * When called from do_exit(), the mm context is gone already, therefore
  1601. * mm is NULL, i.e., the VMA is already gone and we do not have to
  1602. * do anything here
  1603. */
  1604. if (ctx->ctx_smpl_vaddr && current->mm) {
  1605. smpl_buf_vaddr = ctx->ctx_smpl_vaddr;
  1606. smpl_buf_size = ctx->ctx_smpl_size;
  1607. }
  1608. UNPROTECT_CTX(ctx, flags);
  1609. /*
  1610. * if there was a mapping, then we systematically remove it
  1611. * at this point. Cannot be done inside critical section
  1612. * because some VM function reenables interrupts.
  1613. *
  1614. */
  1615. if (smpl_buf_vaddr) pfm_remove_smpl_mapping(current, smpl_buf_vaddr, smpl_buf_size);
  1616. return 0;
  1617. }
  1618. /*
  1619. * called either on explicit close() or from exit_files().
  1620. * Only the LAST user of the file gets to this point, i.e., it is
  1621. * called only ONCE.
  1622. *
  1623. * IMPORTANT: we get called ONLY when the refcnt on the file gets to zero
  1624. * (fput()),i.e, last task to access the file. Nobody else can access the
  1625. * file at this point.
  1626. *
  1627. * When called from exit_files(), the VMA has been freed because exit_mm()
  1628. * is executed before exit_files().
  1629. *
  1630. * When called from exit_files(), the current task is not yet ZOMBIE but we
  1631. * flush the PMU state to the context.
  1632. */
  1633. static int
  1634. pfm_close(struct inode *inode, struct file *filp)
  1635. {
  1636. pfm_context_t *ctx;
  1637. struct task_struct *task;
  1638. struct pt_regs *regs;
  1639. DECLARE_WAITQUEUE(wait, current);
  1640. unsigned long flags;
  1641. unsigned long smpl_buf_size = 0UL;
  1642. void *smpl_buf_addr = NULL;
  1643. int free_possible = 1;
  1644. int state, is_system;
  1645. DPRINT(("pfm_close called private=%p\n", filp->private_data));
  1646. if (PFM_IS_FILE(filp) == 0) {
  1647. DPRINT(("bad magic\n"));
  1648. return -EBADF;
  1649. }
  1650. ctx = (pfm_context_t *)filp->private_data;
  1651. if (ctx == NULL) {
  1652. printk(KERN_ERR "perfmon: pfm_close: NULL ctx [%d]\n", task_pid_nr(current));
  1653. return -EBADF;
  1654. }
  1655. PROTECT_CTX(ctx, flags);
  1656. state = ctx->ctx_state;
  1657. is_system = ctx->ctx_fl_system;
  1658. task = PFM_CTX_TASK(ctx);
  1659. regs = task_pt_regs(task);
  1660. DPRINT(("ctx_state=%d is_current=%d\n",
  1661. state,
  1662. task == current ? 1 : 0));
  1663. /*
  1664. * if task == current, then pfm_flush() unloaded the context
  1665. */
  1666. if (state == PFM_CTX_UNLOADED) goto doit;
  1667. /*
  1668. * context is loaded/masked and task != current, we need to
  1669. * either force an unload or go zombie
  1670. */
  1671. /*
  1672. * The task is currently blocked or will block after an overflow.
  1673. * we must force it to wakeup to get out of the
  1674. * MASKED state and transition to the unloaded state by itself.
  1675. *
  1676. * This situation is only possible for per-task mode
  1677. */
  1678. if (state == PFM_CTX_MASKED && CTX_OVFL_NOBLOCK(ctx) == 0) {
  1679. /*
  1680. * set a "partial" zombie state to be checked
  1681. * upon return from down() in pfm_handle_work().
  1682. *
  1683. * We cannot use the ZOMBIE state, because it is checked
  1684. * by pfm_load_regs() which is called upon wakeup from down().
  1685. * In such case, it would free the context and then we would
  1686. * return to pfm_handle_work() which would access the
  1687. * stale context. Instead, we set a flag invisible to pfm_load_regs()
  1688. * but visible to pfm_handle_work().
  1689. *
  1690. * For some window of time, we have a zombie context with
  1691. * ctx_state = MASKED and not ZOMBIE
  1692. */
  1693. ctx->ctx_fl_going_zombie = 1;
  1694. /*
  1695. * force task to wake up from MASKED state
  1696. */
  1697. complete(&ctx->ctx_restart_done);
  1698. DPRINT(("waking up ctx_state=%d\n", state));
  1699. /*
  1700. * put ourself to sleep waiting for the other
  1701. * task to report completion
  1702. *
  1703. * the context is protected by mutex, therefore there
  1704. * is no risk of being notified of completion before
  1705. * begin actually on the waitq.
  1706. */
  1707. set_current_state(TASK_INTERRUPTIBLE);
  1708. add_wait_queue(&ctx->ctx_zombieq, &wait);
  1709. UNPROTECT_CTX(ctx, flags);
  1710. /*
  1711. * XXX: check for signals :
  1712. * - ok for explicit close
  1713. * - not ok when coming from exit_files()
  1714. */
  1715. schedule();
  1716. PROTECT_CTX(ctx, flags);
  1717. remove_wait_queue(&ctx->ctx_zombieq, &wait);
  1718. set_current_state(TASK_RUNNING);
  1719. /*
  1720. * context is unloaded at this point
  1721. */
  1722. DPRINT(("after zombie wakeup ctx_state=%d for\n", state));
  1723. }
  1724. else if (task != current) {
  1725. #ifdef CONFIG_SMP
  1726. /*
  1727. * switch context to zombie state
  1728. */
  1729. ctx->ctx_state = PFM_CTX_ZOMBIE;
  1730. DPRINT(("zombie ctx for [%d]\n", task_pid_nr(task)));
  1731. /*
  1732. * cannot free the context on the spot. deferred until
  1733. * the task notices the ZOMBIE state
  1734. */
  1735. free_possible = 0;
  1736. #else
  1737. pfm_context_unload(ctx, NULL, 0, regs);
  1738. #endif
  1739. }
  1740. doit:
  1741. /* reload state, may have changed during opening of critical section */
  1742. state = ctx->ctx_state;
  1743. /*
  1744. * the context is still attached to a task (possibly current)
  1745. * we cannot destroy it right now
  1746. */
  1747. /*
  1748. * we must free the sampling buffer right here because
  1749. * we cannot rely on it being cleaned up later by the
  1750. * monitored task. It is not possible to free vmalloc'ed
  1751. * memory in pfm_load_regs(). Instead, we remove the buffer
  1752. * now. should there be subsequent PMU overflow originally
  1753. * meant for sampling, the will be converted to spurious
  1754. * and that's fine because the monitoring tools is gone anyway.
  1755. */
  1756. if (ctx->ctx_smpl_hdr) {
  1757. smpl_buf_addr = ctx->ctx_smpl_hdr;
  1758. smpl_buf_size = ctx->ctx_smpl_size;
  1759. /* no more sampling */
  1760. ctx->ctx_smpl_hdr = NULL;
  1761. ctx->ctx_fl_is_sampling = 0;
  1762. }
  1763. DPRINT(("ctx_state=%d free_possible=%d addr=%p size=%lu\n",
  1764. state,
  1765. free_possible,
  1766. smpl_buf_addr,
  1767. smpl_buf_size));
  1768. if (smpl_buf_addr) pfm_exit_smpl_buffer(ctx->ctx_buf_fmt);
  1769. /*
  1770. * UNLOADED that the session has already been unreserved.
  1771. */
  1772. if (state == PFM_CTX_ZOMBIE) {
  1773. pfm_unreserve_session(ctx, ctx->ctx_fl_system , ctx->ctx_cpu);
  1774. }
  1775. /*
  1776. * disconnect file descriptor from context must be done
  1777. * before we unlock.
  1778. */
  1779. filp->private_data = NULL;
  1780. /*
  1781. * if we free on the spot, the context is now completely unreachable
  1782. * from the callers side. The monitored task side is also cut, so we
  1783. * can freely cut.
  1784. *
  1785. * If we have a deferred free, only the caller side is disconnected.
  1786. */
  1787. UNPROTECT_CTX(ctx, flags);
  1788. /*
  1789. * All memory free operations (especially for vmalloc'ed memory)
  1790. * MUST be done with interrupts ENABLED.
  1791. */
  1792. if (smpl_buf_addr) pfm_rvfree(smpl_buf_addr, smpl_buf_size);
  1793. /*
  1794. * return the memory used by the context
  1795. */
  1796. if (free_possible) pfm_context_free(ctx);
  1797. return 0;
  1798. }
  1799. static int
  1800. pfm_no_open(struct inode *irrelevant, struct file *dontcare)
  1801. {
  1802. DPRINT(("pfm_no_open called\n"));
  1803. return -ENXIO;
  1804. }
  1805. static const struct file_operations pfm_file_ops = {
  1806. .llseek = no_llseek,
  1807. .read = pfm_read,
  1808. .write = pfm_write,
  1809. .poll = pfm_poll,
  1810. .ioctl = pfm_ioctl,
  1811. .open = pfm_no_open, /* special open code to disallow open via /proc */
  1812. .fasync = pfm_fasync,
  1813. .release = pfm_close,
  1814. .flush = pfm_flush
  1815. };
  1816. static int
  1817. pfmfs_delete_dentry(struct dentry *dentry)
  1818. {
  1819. return 1;
  1820. }
  1821. static struct dentry_operations pfmfs_dentry_operations = {
  1822. .d_delete = pfmfs_delete_dentry,
  1823. };
  1824. static int
  1825. pfm_alloc_fd(struct file **cfile)
  1826. {
  1827. int fd, ret = 0;
  1828. struct file *file = NULL;
  1829. struct inode * inode;
  1830. char name[32];
  1831. struct qstr this;
  1832. fd = get_unused_fd();
  1833. if (fd < 0) return -ENFILE;
  1834. ret = -ENFILE;
  1835. file = get_empty_filp();
  1836. if (!file) goto out;
  1837. /*
  1838. * allocate a new inode
  1839. */
  1840. inode = new_inode(pfmfs_mnt->mnt_sb);
  1841. if (!inode) goto out;
  1842. DPRINT(("new inode ino=%ld @%p\n", inode->i_ino, inode));
  1843. inode->i_mode = S_IFCHR|S_IRUGO;
  1844. inode->i_uid = current->fsuid;
  1845. inode->i_gid = current->fsgid;
  1846. sprintf(name, "[%lu]", inode->i_ino);
  1847. this.name = name;
  1848. this.len = strlen(name);
  1849. this.hash = inode->i_ino;
  1850. ret = -ENOMEM;
  1851. /*
  1852. * allocate a new dcache entry
  1853. */
  1854. file->f_path.dentry = d_alloc(pfmfs_mnt->mnt_sb->s_root, &this);
  1855. if (!file->f_path.dentry) goto out;
  1856. file->f_path.dentry->d_op = &pfmfs_dentry_operations;
  1857. d_add(file->f_path.dentry, inode);
  1858. file->f_path.mnt = mntget(pfmfs_mnt);
  1859. file->f_mapping = inode->i_mapping;
  1860. file->f_op = &pfm_file_ops;
  1861. file->f_mode = FMODE_READ;
  1862. file->f_flags = O_RDONLY;
  1863. file->f_pos = 0;
  1864. /*
  1865. * may have to delay until context is attached?
  1866. */
  1867. fd_install(fd, file);
  1868. /*
  1869. * the file structure we will use
  1870. */
  1871. *cfile = file;
  1872. return fd;
  1873. out:
  1874. if (file) put_filp(file);
  1875. put_unused_fd(fd);
  1876. return ret;
  1877. }
  1878. static void
  1879. pfm_free_fd(int fd, struct file *file)
  1880. {
  1881. struct files_struct *files = current->files;
  1882. struct fdtable *fdt;
  1883. /*
  1884. * there ie no fd_uninstall(), so we do it here
  1885. */
  1886. spin_lock(&files->file_lock);
  1887. fdt = files_fdtable(files);
  1888. rcu_assign_pointer(fdt->fd[fd], NULL);
  1889. spin_unlock(&files->file_lock);
  1890. if (file)
  1891. put_filp(file);
  1892. put_unused_fd(fd);
  1893. }
  1894. static int
  1895. pfm_remap_buffer(struct vm_area_struct *vma, unsigned long buf, unsigned long addr, unsigned long size)
  1896. {
  1897. DPRINT(("CPU%d buf=0x%lx addr=0x%lx size=%ld\n", smp_processor_id(), buf, addr, size));
  1898. while (size > 0) {
  1899. unsigned long pfn = ia64_tpa(buf) >> PAGE_SHIFT;
  1900. if (remap_pfn_range(vma, addr, pfn, PAGE_SIZE, PAGE_READONLY))
  1901. return -ENOMEM;
  1902. addr += PAGE_SIZE;
  1903. buf += PAGE_SIZE;
  1904. size -= PAGE_SIZE;
  1905. }
  1906. return 0;
  1907. }
  1908. /*
  1909. * allocate a sampling buffer and remaps it into the user address space of the task
  1910. */
  1911. static int
  1912. pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t *ctx, unsigned long rsize, void **user_vaddr)
  1913. {
  1914. struct mm_struct *mm = task->mm;
  1915. struct vm_area_struct *vma = NULL;
  1916. unsigned long size;
  1917. void *smpl_buf;
  1918. /*
  1919. * the fixed header + requested size and align to page boundary
  1920. */
  1921. size = PAGE_ALIGN(rsize);
  1922. DPRINT(("sampling buffer rsize=%lu size=%lu bytes\n", rsize, size));
  1923. /*
  1924. * check requested size to avoid Denial-of-service attacks
  1925. * XXX: may have to refine this test
  1926. * Check against address space limit.
  1927. *
  1928. * if ((mm->total_vm << PAGE_SHIFT) + len> task->rlim[RLIMIT_AS].rlim_cur)
  1929. * return -ENOMEM;
  1930. */
  1931. if (size > task->signal->rlim[RLIMIT_MEMLOCK].rlim_cur)
  1932. return -ENOMEM;
  1933. /*
  1934. * We do the easy to undo allocations first.
  1935. *
  1936. * pfm_rvmalloc(), clears the buffer, so there is no leak
  1937. */
  1938. smpl_buf = pfm_rvmalloc(size);
  1939. if (smpl_buf == NULL) {
  1940. DPRINT(("Can't allocate sampling buffer\n"));
  1941. return -ENOMEM;
  1942. }
  1943. DPRINT(("smpl_buf @%p\n", smpl_buf));
  1944. /* allocate vma */
  1945. vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
  1946. if (!vma) {
  1947. DPRINT(("Cannot allocate vma\n"));
  1948. goto error_kmem;
  1949. }
  1950. /*
  1951. * partially initialize the vma for the sampling buffer
  1952. */
  1953. vma->vm_mm = mm;
  1954. vma->vm_file = filp;
  1955. vma->vm_flags = VM_READ| VM_MAYREAD |VM_RESERVED;
  1956. vma->vm_page_prot = PAGE_READONLY; /* XXX may need to change */
  1957. /*
  1958. * Now we have everything we need and we can initialize
  1959. * and connect all the data structures
  1960. */
  1961. ctx->ctx_smpl_hdr = smpl_buf;
  1962. ctx->ctx_smpl_size = size; /* aligned size */
  1963. /*
  1964. * Let's do the difficult operations next.
  1965. *
  1966. * now we atomically find some area in the address space and
  1967. * remap the buffer in it.
  1968. */
  1969. down_write(&task->mm->mmap_sem);
  1970. /* find some free area in address space, must have mmap sem held */
  1971. vma->vm_start = pfm_get_unmapped_area(NULL, 0, size, 0, MAP_PRIVATE|MAP_ANONYMOUS, 0);
  1972. if (vma->vm_start == 0UL) {
  1973. DPRINT(("Cannot find unmapped area for size %ld\n", size));
  1974. up_write(&task->mm->mmap_sem);
  1975. goto error;
  1976. }
  1977. vma->vm_end = vma->vm_start + size;
  1978. vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT;
  1979. DPRINT(("aligned size=%ld, hdr=%p mapped @0x%lx\n", size, ctx->ctx_smpl_hdr, vma->vm_start));
  1980. /* can only be applied to current task, need to have the mm semaphore held when called */
  1981. if (pfm_remap_buffer(vma, (unsigned long)smpl_buf, vma->vm_start, size)) {
  1982. DPRINT(("Can't remap buffer\n"));
  1983. up_write(&task->mm->mmap_sem);
  1984. goto error;
  1985. }
  1986. get_file(filp);
  1987. /*
  1988. * now insert the vma in the vm list for the process, must be
  1989. * done with mmap lock held
  1990. */
  1991. insert_vm_struct(mm, vma);
  1992. mm->total_vm += size >> PAGE_SHIFT;
  1993. vm_stat_account(vma->vm_mm, vma->vm_flags, vma->vm_file,
  1994. vma_pages(vma));
  1995. up_write(&task->mm->mmap_sem);
  1996. /*
  1997. * keep track of user level virtual address
  1998. */
  1999. ctx->ctx_smpl_vaddr = (void *)vma->vm_start;
  2000. *(unsigned long *)user_vaddr = vma->vm_start;
  2001. return 0;
  2002. error:
  2003. kmem_cache_free(vm_area_cachep, vma);
  2004. error_kmem:
  2005. pfm_rvfree(smpl_buf, size);
  2006. return -ENOMEM;
  2007. }
  2008. /*
  2009. * XXX: do something better here
  2010. */
  2011. static int
  2012. pfm_bad_permissions(struct task_struct *task)
  2013. {
  2014. /* inspired by ptrace_attach() */
  2015. DPRINT(("cur: uid=%d gid=%d task: euid=%d suid=%d uid=%d egid=%d sgid=%d\n",
  2016. current->uid,
  2017. current->gid,
  2018. task->euid,
  2019. task->suid,
  2020. task->uid,
  2021. task->egid,
  2022. task->sgid));
  2023. return ((current->uid != task->euid)
  2024. || (current->uid != task->suid)
  2025. || (current->uid != task->uid)
  2026. || (current->gid != task->egid)
  2027. || (current->gid != task->sgid)
  2028. || (current->gid != task->gid)) && !capable(CAP_SYS_PTRACE);
  2029. }
  2030. static int
  2031. pfarg_is_sane(struct task_struct *task, pfarg_context_t *pfx)
  2032. {
  2033. int ctx_flags;
  2034. /* valid signal */
  2035. ctx_flags = pfx->ctx_flags;
  2036. if (ctx_flags & PFM_FL_SYSTEM_WIDE) {
  2037. /*
  2038. * cannot block in this mode
  2039. */
  2040. if (ctx_flags & PFM_FL_NOTIFY_BLOCK) {
  2041. DPRINT(("cannot use blocking mode when in system wide monitoring\n"));
  2042. return -EINVAL;
  2043. }
  2044. } else {
  2045. }
  2046. /* probably more to add here */
  2047. return 0;
  2048. }
  2049. static int
  2050. pfm_setup_buffer_fmt(struct task_struct *task, struct file *filp, pfm_context_t *ctx, unsigned int ctx_flags,
  2051. unsigned int cpu, pfarg_context_t *arg)
  2052. {
  2053. pfm_buffer_fmt_t *fmt = NULL;
  2054. unsigned long size = 0UL;
  2055. void *uaddr = NULL;
  2056. void *fmt_arg = NULL;
  2057. int ret = 0;
  2058. #define PFM_CTXARG_BUF_ARG(a) (pfm_buffer_fmt_t *)(a+1)
  2059. /* invoke and lock buffer format, if found */
  2060. fmt = pfm_find_buffer_fmt(arg->ctx_smpl_buf_id);
  2061. if (fmt == NULL) {
  2062. DPRINT(("[%d] cannot find buffer format\n", task_pid_nr(task)));
  2063. return -EINVAL;
  2064. }
  2065. /*
  2066. * buffer argument MUST be contiguous to pfarg_context_t
  2067. */
  2068. if (fmt->fmt_arg_size) fmt_arg = PFM_CTXARG_BUF_ARG(arg);
  2069. ret = pfm_buf_fmt_validate(fmt, task, ctx_flags, cpu, fmt_arg);
  2070. DPRINT(("[%d] after validate(0x%x,%d,%p)=%d\n", task_pid_nr(task), ctx_flags, cpu, fmt_arg, ret));
  2071. if (ret) goto error;
  2072. /* link buffer format and context */
  2073. ctx->ctx_buf_fmt = fmt;
  2074. /*
  2075. * check if buffer format wants to use perfmon buffer allocation/mapping service
  2076. */
  2077. ret = pfm_buf_fmt_getsize(fmt, task, ctx_flags, cpu, fmt_arg, &size);
  2078. if (ret) goto error;
  2079. if (size) {
  2080. /*
  2081. * buffer is always remapped into the caller's address space
  2082. */
  2083. ret = pfm_smpl_buffer_alloc(current, filp, ctx, size, &uaddr);
  2084. if (ret) goto error;
  2085. /* keep track of user address of buffer */
  2086. arg->ctx_smpl_vaddr = uaddr;
  2087. }
  2088. ret = pfm_buf_fmt_init(fmt, task, ctx->ctx_smpl_hdr, ctx_flags, cpu, fmt_arg);
  2089. error:
  2090. return ret;
  2091. }
  2092. static void
  2093. pfm_reset_pmu_state(pfm_context_t *ctx)
  2094. {
  2095. int i;
  2096. /*
  2097. * install reset values for PMC.
  2098. */
  2099. for (i=1; PMC_IS_LAST(i) == 0; i++) {
  2100. if (PMC_IS_IMPL(i) == 0) continue;
  2101. ctx->ctx_pmcs[i] = PMC_DFL_VAL(i);
  2102. DPRINT(("pmc[%d]=0x%lx\n", i, ctx->ctx_pmcs[i]));
  2103. }
  2104. /*
  2105. * PMD registers are set to 0UL when the context in memset()
  2106. */
  2107. /*
  2108. * On context switched restore, we must restore ALL pmc and ALL pmd even
  2109. * when they are not actively used by the task. In UP, the incoming process
  2110. * may otherwise pick up left over PMC, PMD state from the previous process.
  2111. * As opposed to PMD, stale PMC can cause harm to the incoming
  2112. * process because they may change what is being measured.
  2113. * Therefore, we must systematically reinstall the entire
  2114. * PMC state. In SMP, the same thing is possible on the
  2115. * same CPU but also on between 2 CPUs.
  2116. *
  2117. * The problem with PMD is information leaking especially
  2118. * to user level when psr.sp=0
  2119. *
  2120. * There is unfortunately no easy way to avoid this problem
  2121. * on either UP or SMP. This definitively slows down the
  2122. * pfm_load_regs() function.
  2123. */
  2124. /*
  2125. * bitmask of all PMCs accessible to this context
  2126. *
  2127. * PMC0 is treated differently.
  2128. */
  2129. ctx->ctx_all_pmcs[0] = pmu_conf->impl_pmcs[0] & ~0x1;
  2130. /*
  2131. * bitmask of all PMDs that are accessible to this context
  2132. */
  2133. ctx->ctx_all_pmds[0] = pmu_conf->impl_pmds[0];
  2134. DPRINT(("<%d> all_pmcs=0x%lx all_pmds=0x%lx\n", ctx->ctx_fd, ctx->ctx_all_pmcs[0],ctx->ctx_all_pmds[0]));
  2135. /*
  2136. * useful in case of re-enable after disable
  2137. */
  2138. ctx->ctx_used_ibrs[0] = 0UL;
  2139. ctx->ctx_used_dbrs[0] = 0UL;
  2140. }
  2141. static int
  2142. pfm_ctx_getsize(void *arg, size_t *sz)
  2143. {
  2144. pfarg_context_t *req = (pfarg_context_t *)arg;
  2145. pfm_buffer_fmt_t *fmt;
  2146. *sz = 0;
  2147. if (!pfm_uuid_cmp(req->ctx_smpl_buf_id, pfm_null_uuid)) return 0;
  2148. fmt = pfm_find_buffer_fmt(req->ctx_smpl_buf_id);
  2149. if (fmt == NULL) {
  2150. DPRINT(("cannot find buffer format\n"));
  2151. return -EINVAL;
  2152. }
  2153. /* get just enough to copy in user parameters */
  2154. *sz = fmt->fmt_arg_size;
  2155. DPRINT(("arg_size=%lu\n", *sz));
  2156. return 0;
  2157. }
  2158. /*
  2159. * cannot attach if :
  2160. * - kernel task
  2161. * - task not owned by caller
  2162. * - task incompatible with context mode
  2163. */
  2164. static int
  2165. pfm_task_incompatible(pfm_context_t *ctx, struct task_struct *task)
  2166. {
  2167. /*
  2168. * no kernel task or task not owner by caller
  2169. */
  2170. if (task->mm == NULL) {
  2171. DPRINT(("task [%d] has not memory context (kernel thread)\n", task_pid_nr(task)));
  2172. return -EPERM;
  2173. }
  2174. if (pfm_bad_permissions(task)) {
  2175. DPRINT(("no permission to attach to [%d]\n", task_pid_nr(task)));
  2176. return -EPERM;
  2177. }
  2178. /*
  2179. * cannot block in self-monitoring mode
  2180. */
  2181. if (CTX_OVFL_NOBLOCK(ctx) == 0 && task == current) {
  2182. DPRINT(("cannot load a blocking context on self for [%d]\n", task_pid_nr(task)));
  2183. return -EINVAL;
  2184. }
  2185. if (task->exit_state == EXIT_ZOMBIE) {
  2186. DPRINT(("cannot attach to zombie task [%d]\n", task_pid_nr(task)));
  2187. return -EBUSY;
  2188. }
  2189. /*
  2190. * always ok for self
  2191. */
  2192. if (task == current) return 0;
  2193. if ((task->state != TASK_STOPPED) && (task->state != TASK_TRACED)) {
  2194. DPRINT(("cannot attach to non-stopped task [%d] state=%ld\n", task_pid_nr(task), task->state));
  2195. return -EBUSY;
  2196. }
  2197. /*
  2198. * make sure the task is off any CPU
  2199. */
  2200. wait_task_inactive(task);
  2201. /* more to come... */
  2202. return 0;
  2203. }
  2204. static int
  2205. pfm_get_task(pfm_context_t *ctx, pid_t pid, struct task_struct **task)
  2206. {
  2207. struct task_struct *p = current;
  2208. int ret;
  2209. /* XXX: need to add more checks here */
  2210. if (pid < 2) return -EPERM;
  2211. if (pid != current->pid) {
  2212. read_lock(&tasklist_lock);
  2213. p = find_task_by_pid(pid);
  2214. /* make sure task cannot go away while we operate on it */
  2215. if (p) get_task_struct(p);
  2216. read_unlock(&tasklist_lock);
  2217. if (p == NULL) return -ESRCH;
  2218. }
  2219. ret = pfm_task_incompatible(ctx, p);
  2220. if (ret == 0) {
  2221. *task = p;
  2222. } else if (p != current) {
  2223. pfm_put_task(p);
  2224. }
  2225. return ret;
  2226. }
  2227. static int
  2228. pfm_context_create(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
  2229. {
  2230. pfarg_context_t *req = (pfarg_context_t *)arg;
  2231. struct file *filp;
  2232. int ctx_flags;
  2233. int ret;
  2234. /* let's check the arguments first */
  2235. ret = pfarg_is_sane(current, req);
  2236. if (ret < 0) return ret;
  2237. ctx_flags = req->ctx_flags;
  2238. ret = -ENOMEM;
  2239. ctx = pfm_context_alloc();
  2240. if (!ctx) goto error;
  2241. ret = pfm_alloc_fd(&filp);
  2242. if (ret < 0) goto error_file;
  2243. req->ctx_fd = ctx->ctx_fd = ret;
  2244. /*
  2245. * attach context to file
  2246. */
  2247. filp->private_data = ctx;
  2248. /*
  2249. * does the user want to sample?
  2250. */
  2251. if (pfm_uuid_cmp(req->ctx_smpl_buf_id, pfm_null_uuid)) {
  2252. ret = pfm_setup_buffer_fmt(current, filp, ctx, ctx_flags, 0, req);
  2253. if (ret) goto buffer_error;
  2254. }
  2255. /*
  2256. * init context protection lock
  2257. */
  2258. spin_lock_init(&ctx->ctx_lock);
  2259. /*
  2260. * context is unloaded
  2261. */
  2262. ctx->ctx_state = PFM_CTX_UNLOADED;
  2263. /*
  2264. * initialization of context's flags
  2265. */
  2266. ctx->ctx_fl_block = (ctx_flags & PFM_FL_NOTIFY_BLOCK) ? 1 : 0;
  2267. ctx->ctx_fl_system = (ctx_flags & PFM_FL_SYSTEM_WIDE) ? 1: 0;
  2268. ctx->ctx_fl_is_sampling = ctx->ctx_buf_fmt ? 1 : 0; /* assume record() is defined */
  2269. ctx->ctx_fl_no_msg = (ctx_flags & PFM_FL_OVFL_NO_MSG) ? 1: 0;
  2270. /*
  2271. * will move to set properties
  2272. * ctx->ctx_fl_excl_idle = (ctx_flags & PFM_FL_EXCL_IDLE) ? 1: 0;
  2273. */
  2274. /*
  2275. * init restart semaphore to locked
  2276. */
  2277. init_completion(&ctx->ctx_restart_done);
  2278. /*
  2279. * activation is used in SMP only
  2280. */
  2281. ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;
  2282. SET_LAST_CPU(ctx, -1);
  2283. /*
  2284. * initialize notification message queue
  2285. */
  2286. ctx->ctx_msgq_head = ctx->ctx_msgq_tail = 0;
  2287. init_waitqueue_head(&ctx->ctx_msgq_wait);
  2288. init_waitqueue_head(&ctx->ctx_zombieq);
  2289. DPRINT(("ctx=%p flags=0x%x system=%d notify_block=%d excl_idle=%d no_msg=%d ctx_fd=%d \n",
  2290. ctx,
  2291. ctx_flags,
  2292. ctx->ctx_fl_system,
  2293. ctx->ctx_fl_block,
  2294. ctx->ctx_fl_excl_idle,
  2295. ctx->ctx_fl_no_msg,
  2296. ctx->ctx_fd));
  2297. /*
  2298. * initialize soft PMU state
  2299. */
  2300. pfm_reset_pmu_state(ctx);
  2301. return 0;
  2302. buffer_error:
  2303. pfm_free_fd(ctx->ctx_fd, filp);
  2304. if (ctx->ctx_buf_fmt) {
  2305. pfm_buf_fmt_exit(ctx->ctx_buf_fmt, current, NULL, regs);
  2306. }
  2307. error_file:
  2308. pfm_context_free(ctx);
  2309. error:
  2310. return ret;
  2311. }
  2312. static inline unsigned long
  2313. pfm_new_counter_value (pfm_counter_t *reg, int is_long_reset)
  2314. {
  2315. unsigned long val = is_long_reset ? reg->long_reset : reg->short_reset;
  2316. unsigned long new_seed, old_seed = reg->seed, mask = reg->mask;
  2317. extern unsigned long carta_random32 (unsigned long seed);
  2318. if (reg->flags & PFM_REGFL_RANDOM) {
  2319. new_seed = carta_random32(old_seed);
  2320. val -= (old_seed & mask); /* counter values are negative numbers! */
  2321. if ((mask >> 32) != 0)
  2322. /* construct a full 64-bit random value: */
  2323. new_seed |= carta_random32(old_seed >> 32) << 32;
  2324. reg->seed = new_seed;
  2325. }
  2326. reg->lval = val;
  2327. return val;
  2328. }
  2329. static void
  2330. pfm_reset_regs_masked(pfm_context_t *ctx, unsigned long *ovfl_regs, int is_long_reset)
  2331. {
  2332. unsigned long mask = ovfl_regs[0];
  2333. unsigned long reset_others = 0UL;
  2334. unsigned long val;
  2335. int i;
  2336. /*
  2337. * now restore reset value on sampling overflowed counters
  2338. */
  2339. mask >>= PMU_FIRST_COUNTER;
  2340. for(i = PMU_FIRST_COUNTER; mask; i++, mask >>= 1) {
  2341. if ((mask & 0x1UL) == 0UL) continue;
  2342. ctx->ctx_pmds[i].val = val = pfm_new_counter_value(ctx->ctx_pmds+ i, is_long_reset);
  2343. reset_others |= ctx->ctx_pmds[i].reset_pmds[0];
  2344. DPRINT_ovfl((" %s reset ctx_pmds[%d]=%lx\n", is_long_reset ? "long" : "short", i, val));
  2345. }
  2346. /*
  2347. * Now take care of resetting the other registers
  2348. */
  2349. for(i = 0; reset_others; i++, reset_others >>= 1) {
  2350. if ((reset_others & 0x1) == 0) continue;
  2351. ctx->ctx_pmds[i].val = val = pfm_new_counter_value(ctx->ctx_pmds + i, is_long_reset);
  2352. DPRINT_ovfl(("%s reset_others pmd[%d]=%lx\n",
  2353. is_long_reset ? "long" : "short", i, val));
  2354. }
  2355. }
  2356. static void
  2357. pfm_reset_regs(pfm_context_t *ctx, unsigned long *ovfl_regs, int is_long_reset)
  2358. {
  2359. unsigned long mask = ovfl_regs[0];
  2360. unsigned long reset_others = 0UL;
  2361. unsigned long val;
  2362. int i;
  2363. DPRINT_ovfl(("ovfl_regs=0x%lx is_long_reset=%d\n", ovfl_regs[0], is_long_reset));
  2364. if (ctx->ctx_state == PFM_CTX_MASKED) {
  2365. pfm_reset_regs_masked(ctx, ovfl_regs, is_long_reset);
  2366. return;
  2367. }
  2368. /*
  2369. * now restore reset value on sampling overflowed counters
  2370. */
  2371. mask >>= PMU_FIRST_COUNTER;
  2372. for(i = PMU_FIRST_COUNTER; mask; i++, mask >>= 1) {
  2373. if ((mask & 0x1UL) == 0UL) continue;
  2374. val = pfm_new_counter_value(ctx->ctx_pmds+ i, is_long_reset);
  2375. reset_others |= ctx->ctx_pmds[i].reset_pmds[0];
  2376. DPRINT_ovfl((" %s reset ctx_pmds[%d]=%lx\n", is_long_reset ? "long" : "short", i, val));
  2377. pfm_write_soft_counter(ctx, i, val);
  2378. }
  2379. /*
  2380. * Now take care of resetting the other registers
  2381. */
  2382. for(i = 0; reset_others; i++, reset_others >>= 1) {
  2383. if ((reset_others & 0x1) == 0) continue;
  2384. val = pfm_new_counter_value(ctx->ctx_pmds + i, is_long_reset);
  2385. if (PMD_IS_COUNTING(i)) {
  2386. pfm_write_soft_counter(ctx, i, val);
  2387. } else {
  2388. ia64_set_pmd(i, val);
  2389. }
  2390. DPRINT_ovfl(("%s reset_others pmd[%d]=%lx\n",
  2391. is_long_reset ? "long" : "short", i, val));
  2392. }
  2393. ia64_srlz_d();
  2394. }
  2395. static int
  2396. pfm_write_pmcs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
  2397. {
  2398. struct task_struct *task;
  2399. pfarg_reg_t *req = (pfarg_reg_t *)arg;
  2400. unsigned long value, pmc_pm;
  2401. unsigned long smpl_pmds, reset_pmds, impl_pmds;
  2402. unsigned int cnum, reg_flags, flags, pmc_type;
  2403. int i, can_access_pmu = 0, is_loaded, is_system, expert_mode;
  2404. int is_monitor, is_counting, state;
  2405. int ret = -EINVAL;
  2406. pfm_reg_check_t wr_func;
  2407. #define PFM_CHECK_PMC_PM(x, y, z) ((x)->ctx_fl_system ^ PMC_PM(y, z))
  2408. state = ctx->ctx_state;
  2409. is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
  2410. is_system = ctx->ctx_fl_system;
  2411. task = ctx->ctx_task;
  2412. impl_pmds = pmu_conf->impl_pmds[0];
  2413. if (state == PFM_CTX_ZOMBIE) return -EINVAL;
  2414. if (is_loaded) {
  2415. /*
  2416. * In system wide and when the context is loaded, access can only happen
  2417. * when the caller is running on the CPU being monitored by the session.
  2418. * It does not have to be the owner (ctx_task) of the context per se.
  2419. */
  2420. if (is_system && ctx->ctx_cpu != smp_processor_id()) {
  2421. DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
  2422. return -EBUSY;
  2423. }
  2424. can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
  2425. }
  2426. expert_mode = pfm_sysctl.expert_mode;
  2427. for (i = 0; i < count; i++, req++) {
  2428. cnum = req->reg_num;
  2429. reg_flags = req->reg_flags;
  2430. value = req->reg_value;
  2431. smpl_pmds = req->reg_smpl_pmds[0];
  2432. reset_pmds = req->reg_reset_pmds[0];
  2433. flags = 0;
  2434. if (cnum >= PMU_MAX_PMCS) {
  2435. DPRINT(("pmc%u is invalid\n", cnum));
  2436. goto error;
  2437. }
  2438. pmc_type = pmu_conf->pmc_desc[cnum].type;
  2439. pmc_pm = (value >> pmu_conf->pmc_desc[cnum].pm_pos) & 0x1;
  2440. is_counting = (pmc_type & PFM_REG_COUNTING) == PFM_REG_COUNTING ? 1 : 0;
  2441. is_monitor = (pmc_type & PFM_REG_MONITOR) == PFM_REG_MONITOR ? 1 : 0;
  2442. /*
  2443. * we reject all non implemented PMC as well
  2444. * as attempts to modify PMC[0-3] which are used
  2445. * as status registers by the PMU
  2446. */
  2447. if ((pmc_type & PFM_REG_IMPL) == 0 || (pmc_type & PFM_REG_CONTROL) == PFM_REG_CONTROL) {
  2448. DPRINT(("pmc%u is unimplemented or no-access pmc_type=%x\n", cnum, pmc_type));
  2449. goto error;
  2450. }
  2451. wr_func = pmu_conf->pmc_desc[cnum].write_check;
  2452. /*
  2453. * If the PMC is a monitor, then if the value is not the default:
  2454. * - system-wide session: PMCx.pm=1 (privileged monitor)
  2455. * - per-task : PMCx.pm=0 (user monitor)
  2456. */
  2457. if (is_monitor && value != PMC_DFL_VAL(cnum) && is_system ^ pmc_pm) {
  2458. DPRINT(("pmc%u pmc_pm=%lu is_system=%d\n",
  2459. cnum,
  2460. pmc_pm,
  2461. is_system));
  2462. goto error;
  2463. }
  2464. if (is_counting) {
  2465. /*
  2466. * enforce generation of overflow interrupt. Necessary on all
  2467. * CPUs.
  2468. */
  2469. value |= 1 << PMU_PMC_OI;
  2470. if (reg_flags & PFM_REGFL_OVFL_NOTIFY) {
  2471. flags |= PFM_REGFL_OVFL_NOTIFY;
  2472. }
  2473. if (reg_flags & PFM_REGFL_RANDOM) flags |= PFM_REGFL_RANDOM;
  2474. /* verify validity of smpl_pmds */
  2475. if ((smpl_pmds & impl_pmds) != smpl_pmds) {
  2476. DPRINT(("invalid smpl_pmds 0x%lx for pmc%u\n", smpl_pmds, cnum));
  2477. goto error;
  2478. }
  2479. /* verify validity of reset_pmds */
  2480. if ((reset_pmds & impl_pmds) != reset_pmds) {
  2481. DPRINT(("invalid reset_pmds 0x%lx for pmc%u\n", reset_pmds, cnum));
  2482. goto error;
  2483. }
  2484. } else {
  2485. if (reg_flags & (PFM_REGFL_OVFL_NOTIFY|PFM_REGFL_RANDOM)) {
  2486. DPRINT(("cannot set ovfl_notify or random on pmc%u\n", cnum));
  2487. goto error;
  2488. }
  2489. /* eventid on non-counting monitors are ignored */
  2490. }
  2491. /*
  2492. * execute write checker, if any
  2493. */
  2494. if (likely(expert_mode == 0 && wr_func)) {
  2495. ret = (*wr_func)(task, ctx, cnum, &value, regs);
  2496. if (ret) goto error;
  2497. ret = -EINVAL;
  2498. }
  2499. /*
  2500. * no error on this register
  2501. */
  2502. PFM_REG_RETFLAG_SET(req->reg_flags, 0);
  2503. /*
  2504. * Now we commit the changes to the software state
  2505. */
  2506. /*
  2507. * update overflow information
  2508. */
  2509. if (is_counting) {
  2510. /*
  2511. * full flag update each time a register is programmed
  2512. */
  2513. ctx->ctx_pmds[cnum].flags = flags;
  2514. ctx->ctx_pmds[cnum].reset_pmds[0] = reset_pmds;
  2515. ctx->ctx_pmds[cnum].smpl_pmds[0] = smpl_pmds;
  2516. ctx->ctx_pmds[cnum].eventid = req->reg_smpl_eventid;
  2517. /*
  2518. * Mark all PMDS to be accessed as used.
  2519. *
  2520. * We do not keep track of PMC because we have to
  2521. * systematically restore ALL of them.
  2522. *
  2523. * We do not update the used_monitors mask, because
  2524. * if we have not programmed them, then will be in
  2525. * a quiescent state, therefore we will not need to
  2526. * mask/restore then when context is MASKED.
  2527. */
  2528. CTX_USED_PMD(ctx, reset_pmds);
  2529. CTX_USED_PMD(ctx, smpl_pmds);
  2530. /*
  2531. * make sure we do not try to reset on
  2532. * restart because we have established new values
  2533. */
  2534. if (state == PFM_CTX_MASKED) ctx->ctx_ovfl_regs[0] &= ~1UL << cnum;
  2535. }
  2536. /*
  2537. * Needed in case the user does not initialize the equivalent
  2538. * PMD. Clearing is done indirectly via pfm_reset_pmu_state() so there is no
  2539. * possible leak here.
  2540. */
  2541. CTX_USED_PMD(ctx, pmu_conf->pmc_desc[cnum].dep_pmd[0]);
  2542. /*
  2543. * keep track of the monitor PMC that we are using.
  2544. * we save the value of the pmc in ctx_pmcs[] and if
  2545. * the monitoring is not stopped for the context we also
  2546. * place it in the saved state area so that it will be
  2547. * picked up later by the context switch code.
  2548. *
  2549. * The value in ctx_pmcs[] can only be changed in pfm_write_pmcs().
  2550. *
  2551. * The value in th_pmcs[] may be modified on overflow, i.e., when
  2552. * monitoring needs to be stopped.
  2553. */
  2554. if (is_monitor) CTX_USED_MONITOR(ctx, 1UL << cnum);
  2555. /*
  2556. * update context state
  2557. */
  2558. ctx->ctx_pmcs[cnum] = value;
  2559. if (is_loaded) {
  2560. /*
  2561. * write thread state
  2562. */
  2563. if (is_system == 0) ctx->th_pmcs[cnum] = value;
  2564. /*
  2565. * write hardware register if we can
  2566. */
  2567. if (can_access_pmu) {
  2568. ia64_set_pmc(cnum, value);
  2569. }
  2570. #ifdef CONFIG_SMP
  2571. else {
  2572. /*
  2573. * per-task SMP only here
  2574. *
  2575. * we are guaranteed that the task is not running on the other CPU,
  2576. * we indicate that this PMD will need to be reloaded if the task
  2577. * is rescheduled on the CPU it ran last on.
  2578. */
  2579. ctx->ctx_reload_pmcs[0] |= 1UL << cnum;
  2580. }
  2581. #endif
  2582. }
  2583. DPRINT(("pmc[%u]=0x%lx ld=%d apmu=%d flags=0x%x all_pmcs=0x%lx used_pmds=0x%lx eventid=%ld smpl_pmds=0x%lx reset_pmds=0x%lx reloads_pmcs=0x%lx used_monitors=0x%lx ovfl_regs=0x%lx\n",
  2584. cnum,
  2585. value,
  2586. is_loaded,
  2587. can_access_pmu,
  2588. flags,
  2589. ctx->ctx_all_pmcs[0],
  2590. ctx->ctx_used_pmds[0],
  2591. ctx->ctx_pmds[cnum].eventid,
  2592. smpl_pmds,
  2593. reset_pmds,
  2594. ctx->ctx_reload_pmcs[0],
  2595. ctx->ctx_used_monitors[0],
  2596. ctx->ctx_ovfl_regs[0]));
  2597. }
  2598. /*
  2599. * make sure the changes are visible
  2600. */
  2601. if (can_access_pmu) ia64_srlz_d();
  2602. return 0;
  2603. error:
  2604. PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
  2605. return ret;
  2606. }
  2607. static int
  2608. pfm_write_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
  2609. {
  2610. struct task_struct *task;
  2611. pfarg_reg_t *req = (pfarg_reg_t *)arg;
  2612. unsigned long value, hw_value, ovfl_mask;
  2613. unsigned int cnum;
  2614. int i, can_access_pmu = 0, state;
  2615. int is_counting, is_loaded, is_system, expert_mode;
  2616. int ret = -EINVAL;
  2617. pfm_reg_check_t wr_func;
  2618. state = ctx->ctx_state;
  2619. is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
  2620. is_system = ctx->ctx_fl_system;
  2621. ovfl_mask = pmu_conf->ovfl_val;
  2622. task = ctx->ctx_task;
  2623. if (unlikely(state == PFM_CTX_ZOMBIE)) return -EINVAL;
  2624. /*
  2625. * on both UP and SMP, we can only write to the PMC when the task is
  2626. * the owner of the local PMU.
  2627. */
  2628. if (likely(is_loaded)) {
  2629. /*
  2630. * In system wide and when the context is loaded, access can only happen
  2631. * when the caller is running on the CPU being monitored by the session.
  2632. * It does not have to be the owner (ctx_task) of the context per se.
  2633. */
  2634. if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) {
  2635. DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
  2636. return -EBUSY;
  2637. }
  2638. can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
  2639. }
  2640. expert_mode = pfm_sysctl.expert_mode;
  2641. for (i = 0; i < count; i++, req++) {
  2642. cnum = req->reg_num;
  2643. value = req->reg_value;
  2644. if (!PMD_IS_IMPL(cnum)) {
  2645. DPRINT(("pmd[%u] is unimplemented or invalid\n", cnum));
  2646. goto abort_mission;
  2647. }
  2648. is_counting = PMD_IS_COUNTING(cnum);
  2649. wr_func = pmu_conf->pmd_desc[cnum].write_check;
  2650. /*
  2651. * execute write checker, if any
  2652. */
  2653. if (unlikely(expert_mode == 0 && wr_func)) {
  2654. unsigned long v = value;
  2655. ret = (*wr_func)(task, ctx, cnum, &v, regs);
  2656. if (ret) goto abort_mission;
  2657. value = v;
  2658. ret = -EINVAL;
  2659. }
  2660. /*
  2661. * no error on this register
  2662. */
  2663. PFM_REG_RETFLAG_SET(req->reg_flags, 0);
  2664. /*
  2665. * now commit changes to software state
  2666. */
  2667. hw_value = value;
  2668. /*
  2669. * update virtualized (64bits) counter
  2670. */
  2671. if (is_counting) {
  2672. /*
  2673. * write context state
  2674. */
  2675. ctx->ctx_pmds[cnum].lval = value;
  2676. /*
  2677. * when context is load we use the split value
  2678. */
  2679. if (is_loaded) {
  2680. hw_value = value & ovfl_mask;
  2681. value = value & ~ovfl_mask;
  2682. }
  2683. }
  2684. /*
  2685. * update reset values (not just for counters)
  2686. */
  2687. ctx->ctx_pmds[cnum].long_reset = req->reg_long_reset;
  2688. ctx->ctx_pmds[cnum].short_reset = req->reg_short_reset;
  2689. /*
  2690. * update randomization parameters (not just for counters)
  2691. */
  2692. ctx->ctx_pmds[cnum].seed = req->reg_random_seed;
  2693. ctx->ctx_pmds[cnum].mask = req->reg_random_mask;
  2694. /*
  2695. * update context value
  2696. */
  2697. ctx->ctx_pmds[cnum].val = value;
  2698. /*
  2699. * Keep track of what we use
  2700. *
  2701. * We do not keep track of PMC because we have to
  2702. * systematically restore ALL of them.
  2703. */
  2704. CTX_USED_PMD(ctx, PMD_PMD_DEP(cnum));
  2705. /*
  2706. * mark this PMD register used as well
  2707. */
  2708. CTX_USED_PMD(ctx, RDEP(cnum));
  2709. /*
  2710. * make sure we do not try to reset on
  2711. * restart because we have established new values
  2712. */
  2713. if (is_counting && state == PFM_CTX_MASKED) {
  2714. ctx->ctx_ovfl_regs[0] &= ~1UL << cnum;
  2715. }
  2716. if (is_loaded) {
  2717. /*
  2718. * write thread state
  2719. */
  2720. if (is_system == 0) ctx->th_pmds[cnum] = hw_value;
  2721. /*
  2722. * write hardware register if we can
  2723. */
  2724. if (can_access_pmu) {
  2725. ia64_set_pmd(cnum, hw_value);
  2726. } else {
  2727. #ifdef CONFIG_SMP
  2728. /*
  2729. * we are guaranteed that the task is not running on the other CPU,
  2730. * we indicate that this PMD will need to be reloaded if the task
  2731. * is rescheduled on the CPU it ran last on.
  2732. */
  2733. ctx->ctx_reload_pmds[0] |= 1UL << cnum;
  2734. #endif
  2735. }
  2736. }
  2737. DPRINT(("pmd[%u]=0x%lx ld=%d apmu=%d, hw_value=0x%lx ctx_pmd=0x%lx short_reset=0x%lx "
  2738. "long_reset=0x%lx notify=%c seed=0x%lx mask=0x%lx used_pmds=0x%lx reset_pmds=0x%lx reload_pmds=0x%lx all_pmds=0x%lx ovfl_regs=0x%lx\n",
  2739. cnum,
  2740. value,
  2741. is_loaded,
  2742. can_access_pmu,
  2743. hw_value,
  2744. ctx->ctx_pmds[cnum].val,
  2745. ctx->ctx_pmds[cnum].short_reset,
  2746. ctx->ctx_pmds[cnum].long_reset,
  2747. PMC_OVFL_NOTIFY(ctx, cnum) ? 'Y':'N',
  2748. ctx->ctx_pmds[cnum].seed,
  2749. ctx->ctx_pmds[cnum].mask,
  2750. ctx->ctx_used_pmds[0],
  2751. ctx->ctx_pmds[cnum].reset_pmds[0],
  2752. ctx->ctx_reload_pmds[0],
  2753. ctx->ctx_all_pmds[0],
  2754. ctx->ctx_ovfl_regs[0]));
  2755. }
  2756. /*
  2757. * make changes visible
  2758. */
  2759. if (can_access_pmu) ia64_srlz_d();
  2760. return 0;
  2761. abort_mission:
  2762. /*
  2763. * for now, we have only one possibility for error
  2764. */
  2765. PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
  2766. return ret;
  2767. }
  2768. /*
  2769. * By the way of PROTECT_CONTEXT(), interrupts are masked while we are in this function.
  2770. * Therefore we know, we do not have to worry about the PMU overflow interrupt. If an
  2771. * interrupt is delivered during the call, it will be kept pending until we leave, making
  2772. * it appears as if it had been generated at the UNPROTECT_CONTEXT(). At least we are
  2773. * guaranteed to return consistent data to the user, it may simply be old. It is not
  2774. * trivial to treat the overflow while inside the call because you may end up in
  2775. * some module sampling buffer code causing deadlocks.
  2776. */
  2777. static int
  2778. pfm_read_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
  2779. {
  2780. struct task_struct *task;
  2781. unsigned long val = 0UL, lval, ovfl_mask, sval;
  2782. pfarg_reg_t *req = (pfarg_reg_t *)arg;
  2783. unsigned int cnum, reg_flags = 0;
  2784. int i, can_access_pmu = 0, state;
  2785. int is_loaded, is_system, is_counting, expert_mode;
  2786. int ret = -EINVAL;
  2787. pfm_reg_check_t rd_func;
  2788. /*
  2789. * access is possible when loaded only for
  2790. * self-monitoring tasks or in UP mode
  2791. */
  2792. state = ctx->ctx_state;
  2793. is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
  2794. is_system = ctx->ctx_fl_system;
  2795. ovfl_mask = pmu_conf->ovfl_val;
  2796. task = ctx->ctx_task;
  2797. if (state == PFM_CTX_ZOMBIE) return -EINVAL;
  2798. if (likely(is_loaded)) {
  2799. /*
  2800. * In system wide and when the context is loaded, access can only happen
  2801. * when the caller is running on the CPU being monitored by the session.
  2802. * It does not have to be the owner (ctx_task) of the context per se.
  2803. */
  2804. if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) {
  2805. DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
  2806. return -EBUSY;
  2807. }
  2808. /*
  2809. * this can be true when not self-monitoring only in UP
  2810. */
  2811. can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
  2812. if (can_access_pmu) ia64_srlz_d();
  2813. }
  2814. expert_mode = pfm_sysctl.expert_mode;
  2815. DPRINT(("ld=%d apmu=%d ctx_state=%d\n",
  2816. is_loaded,
  2817. can_access_pmu,
  2818. state));
  2819. /*
  2820. * on both UP and SMP, we can only read the PMD from the hardware register when
  2821. * the task is the owner of the local PMU.
  2822. */
  2823. for (i = 0; i < count; i++, req++) {
  2824. cnum = req->reg_num;
  2825. reg_flags = req->reg_flags;
  2826. if (unlikely(!PMD_IS_IMPL(cnum))) goto error;
  2827. /*
  2828. * we can only read the register that we use. That includes
  2829. * the one we explicitly initialize AND the one we want included
  2830. * in the sampling buffer (smpl_regs).
  2831. *
  2832. * Having this restriction allows optimization in the ctxsw routine
  2833. * without compromising security (leaks)
  2834. */
  2835. if (unlikely(!CTX_IS_USED_PMD(ctx, cnum))) goto error;
  2836. sval = ctx->ctx_pmds[cnum].val;
  2837. lval = ctx->ctx_pmds[cnum].lval;
  2838. is_counting = PMD_IS_COUNTING(cnum);
  2839. /*
  2840. * If the task is not the current one, then we check if the
  2841. * PMU state is still in the local live register due to lazy ctxsw.
  2842. * If true, then we read directly from the registers.
  2843. */
  2844. if (can_access_pmu){
  2845. val = ia64_get_pmd(cnum);
  2846. } else {
  2847. /*
  2848. * context has been saved
  2849. * if context is zombie, then task does not exist anymore.
  2850. * In this case, we use the full value saved in the context (pfm_flush_regs()).
  2851. */
  2852. val = is_loaded ? ctx->th_pmds[cnum] : 0UL;
  2853. }
  2854. rd_func = pmu_conf->pmd_desc[cnum].read_check;
  2855. if (is_counting) {
  2856. /*
  2857. * XXX: need to check for overflow when loaded
  2858. */
  2859. val &= ovfl_mask;
  2860. val += sval;
  2861. }
  2862. /*
  2863. * execute read checker, if any
  2864. */
  2865. if (unlikely(expert_mode == 0 && rd_func)) {
  2866. unsigned long v = val;
  2867. ret = (*rd_func)(ctx->ctx_task, ctx, cnum, &v, regs);
  2868. if (ret) goto error;
  2869. val = v;
  2870. ret = -EINVAL;
  2871. }
  2872. PFM_REG_RETFLAG_SET(reg_flags, 0);
  2873. DPRINT(("pmd[%u]=0x%lx\n", cnum, val));
  2874. /*
  2875. * update register return value, abort all if problem during copy.
  2876. * we only modify the reg_flags field. no check mode is fine because
  2877. * access has been verified upfront in sys_perfmonctl().
  2878. */
  2879. req->reg_value = val;
  2880. req->reg_flags = reg_flags;
  2881. req->reg_last_reset_val = lval;
  2882. }
  2883. return 0;
  2884. error:
  2885. PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
  2886. return ret;
  2887. }
  2888. int
  2889. pfm_mod_write_pmcs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
  2890. {
  2891. pfm_context_t *ctx;
  2892. if (req == NULL) return -EINVAL;
  2893. ctx = GET_PMU_CTX();
  2894. if (ctx == NULL) return -EINVAL;
  2895. /*
  2896. * for now limit to current task, which is enough when calling
  2897. * from overflow handler
  2898. */
  2899. if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
  2900. return pfm_write_pmcs(ctx, req, nreq, regs);
  2901. }
  2902. EXPORT_SYMBOL(pfm_mod_write_pmcs);
  2903. int
  2904. pfm_mod_read_pmds(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
  2905. {
  2906. pfm_context_t *ctx;
  2907. if (req == NULL) return -EINVAL;
  2908. ctx = GET_PMU_CTX();
  2909. if (ctx == NULL) return -EINVAL;
  2910. /*
  2911. * for now limit to current task, which is enough when calling
  2912. * from overflow handler
  2913. */
  2914. if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
  2915. return pfm_read_pmds(ctx, req, nreq, regs);
  2916. }
  2917. EXPORT_SYMBOL(pfm_mod_read_pmds);
  2918. /*
  2919. * Only call this function when a process it trying to
  2920. * write the debug registers (reading is always allowed)
  2921. */
  2922. int
  2923. pfm_use_debug_registers(struct task_struct *task)
  2924. {
  2925. pfm_context_t *ctx = task->thread.pfm_context;
  2926. unsigned long flags;
  2927. int ret = 0;
  2928. if (pmu_conf->use_rr_dbregs == 0) return 0;
  2929. DPRINT(("called for [%d]\n", task_pid_nr(task)));
  2930. /*
  2931. * do it only once
  2932. */
  2933. if (task->thread.flags & IA64_THREAD_DBG_VALID) return 0;
  2934. /*
  2935. * Even on SMP, we do not need to use an atomic here because
  2936. * the only way in is via ptrace() and this is possible only when the
  2937. * process is stopped. Even in the case where the ctxsw out is not totally
  2938. * completed by the time we come here, there is no way the 'stopped' process
  2939. * could be in the middle of fiddling with the pfm_write_ibr_dbr() routine.
  2940. * So this is always safe.
  2941. */
  2942. if (ctx && ctx->ctx_fl_using_dbreg == 1) return -1;
  2943. LOCK_PFS(flags);
  2944. /*
  2945. * We cannot allow setting breakpoints when system wide monitoring
  2946. * sessions are using the debug registers.
  2947. */
  2948. if (pfm_sessions.pfs_sys_use_dbregs> 0)
  2949. ret = -1;
  2950. else
  2951. pfm_sessions.pfs_ptrace_use_dbregs++;
  2952. DPRINT(("ptrace_use_dbregs=%u sys_use_dbregs=%u by [%d] ret = %d\n",
  2953. pfm_sessions.pfs_ptrace_use_dbregs,
  2954. pfm_sessions.pfs_sys_use_dbregs,
  2955. task_pid_nr(task), ret));
  2956. UNLOCK_PFS(flags);
  2957. return ret;
  2958. }
  2959. /*
  2960. * This function is called for every task that exits with the
  2961. * IA64_THREAD_DBG_VALID set. This indicates a task which was
  2962. * able to use the debug registers for debugging purposes via
  2963. * ptrace(). Therefore we know it was not using them for
  2964. * perfmormance monitoring, so we only decrement the number
  2965. * of "ptraced" debug register users to keep the count up to date
  2966. */
  2967. int
  2968. pfm_release_debug_registers(struct task_struct *task)
  2969. {
  2970. unsigned long flags;
  2971. int ret;
  2972. if (pmu_conf->use_rr_dbregs == 0) return 0;
  2973. LOCK_PFS(flags);
  2974. if (pfm_sessions.pfs_ptrace_use_dbregs == 0) {
  2975. printk(KERN_ERR "perfmon: invalid release for [%d] ptrace_use_dbregs=0\n", task_pid_nr(task));
  2976. ret = -1;
  2977. } else {
  2978. pfm_sessions.pfs_ptrace_use_dbregs--;
  2979. ret = 0;
  2980. }
  2981. UNLOCK_PFS(flags);
  2982. return ret;
  2983. }
  2984. static int
  2985. pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
  2986. {
  2987. struct task_struct *task;
  2988. pfm_buffer_fmt_t *fmt;
  2989. pfm_ovfl_ctrl_t rst_ctrl;
  2990. int state, is_system;
  2991. int ret = 0;
  2992. state = ctx->ctx_state;
  2993. fmt = ctx->ctx_buf_fmt;
  2994. is_system = ctx->ctx_fl_system;
  2995. task = PFM_CTX_TASK(ctx);
  2996. switch(state) {
  2997. case PFM_CTX_MASKED:
  2998. break;
  2999. case PFM_CTX_LOADED:
  3000. if (CTX_HAS_SMPL(ctx) && fmt->fmt_restart_active) break;
  3001. /* fall through */
  3002. case PFM_CTX_UNLOADED:
  3003. case PFM_CTX_ZOMBIE:
  3004. DPRINT(("invalid state=%d\n", state));
  3005. return -EBUSY;
  3006. default:
  3007. DPRINT(("state=%d, cannot operate (no active_restart handler)\n", state));
  3008. return -EINVAL;
  3009. }
  3010. /*
  3011. * In system wide and when the context is loaded, access can only happen
  3012. * when the caller is running on the CPU being monitored by the session.
  3013. * It does not have to be the owner (ctx_task) of the context per se.
  3014. */
  3015. if (is_system && ctx->ctx_cpu != smp_processor_id()) {
  3016. DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
  3017. return -EBUSY;
  3018. }
  3019. /* sanity check */
  3020. if (unlikely(task == NULL)) {
  3021. printk(KERN_ERR "perfmon: [%d] pfm_restart no task\n", task_pid_nr(current));
  3022. return -EINVAL;
  3023. }
  3024. if (task == current || is_system) {
  3025. fmt = ctx->ctx_buf_fmt;
  3026. DPRINT(("restarting self %d ovfl=0x%lx\n",
  3027. task_pid_nr(task),
  3028. ctx->ctx_ovfl_regs[0]));
  3029. if (CTX_HAS_SMPL(ctx)) {
  3030. prefetch(ctx->ctx_smpl_hdr);
  3031. rst_ctrl.bits.mask_monitoring = 0;
  3032. rst_ctrl.bits.reset_ovfl_pmds = 0;
  3033. if (state == PFM_CTX_LOADED)
  3034. ret = pfm_buf_fmt_restart_active(fmt, task, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
  3035. else
  3036. ret = pfm_buf_fmt_restart(fmt, task, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
  3037. } else {
  3038. rst_ctrl.bits.mask_monitoring = 0;
  3039. rst_ctrl.bits.reset_ovfl_pmds = 1;
  3040. }
  3041. if (ret == 0) {
  3042. if (rst_ctrl.bits.reset_ovfl_pmds)
  3043. pfm_reset_regs(ctx, ctx->ctx_ovfl_regs, PFM_PMD_LONG_RESET);
  3044. if (rst_ctrl.bits.mask_monitoring == 0) {
  3045. DPRINT(("resuming monitoring for [%d]\n", task_pid_nr(task)));
  3046. if (state == PFM_CTX_MASKED) pfm_restore_monitoring(task);
  3047. } else {
  3048. DPRINT(("keeping monitoring stopped for [%d]\n", task_pid_nr(task)));
  3049. // cannot use pfm_stop_monitoring(task, regs);
  3050. }
  3051. }
  3052. /*
  3053. * clear overflowed PMD mask to remove any stale information
  3054. */
  3055. ctx->ctx_ovfl_regs[0] = 0UL;
  3056. /*
  3057. * back to LOADED state
  3058. */
  3059. ctx->ctx_state = PFM_CTX_LOADED;
  3060. /*
  3061. * XXX: not really useful for self monitoring
  3062. */
  3063. ctx->ctx_fl_can_restart = 0;
  3064. return 0;
  3065. }
  3066. /*
  3067. * restart another task
  3068. */
  3069. /*
  3070. * When PFM_CTX_MASKED, we cannot issue a restart before the previous
  3071. * one is seen by the task.
  3072. */
  3073. if (state == PFM_CTX_MASKED) {
  3074. if (ctx->ctx_fl_can_restart == 0) return -EINVAL;
  3075. /*
  3076. * will prevent subsequent restart before this one is
  3077. * seen by other task
  3078. */
  3079. ctx->ctx_fl_can_restart = 0;
  3080. }
  3081. /*
  3082. * if blocking, then post the semaphore is PFM_CTX_MASKED, i.e.
  3083. * the task is blocked or on its way to block. That's the normal
  3084. * restart path. If the monitoring is not masked, then the task
  3085. * can be actively monitoring and we cannot directly intervene.
  3086. * Therefore we use the trap mechanism to catch the task and
  3087. * force it to reset the buffer/reset PMDs.
  3088. *
  3089. * if non-blocking, then we ensure that the task will go into
  3090. * pfm_handle_work() before returning to user mode.
  3091. *
  3092. * We cannot explicitly reset another task, it MUST always
  3093. * be done by the task itself. This works for system wide because
  3094. * the tool that is controlling the session is logically doing
  3095. * "self-monitoring".
  3096. */
  3097. if (CTX_OVFL_NOBLOCK(ctx) == 0 && state == PFM_CTX_MASKED) {
  3098. DPRINT(("unblocking [%d] \n", task_pid_nr(task)));
  3099. complete(&ctx->ctx_restart_done);
  3100. } else {
  3101. DPRINT(("[%d] armed exit trap\n", task_pid_nr(task)));
  3102. ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_RESET;
  3103. PFM_SET_WORK_PENDING(task, 1);
  3104. pfm_set_task_notify(task);
  3105. /*
  3106. * XXX: send reschedule if task runs on another CPU
  3107. */
  3108. }
  3109. return 0;
  3110. }
  3111. static int
  3112. pfm_debug(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
  3113. {
  3114. unsigned int m = *(unsigned int *)arg;
  3115. pfm_sysctl.debug = m == 0 ? 0 : 1;
  3116. printk(KERN_INFO "perfmon debugging %s (timing reset)\n", pfm_sysctl.debug ? "on" : "off");
  3117. if (m == 0) {
  3118. memset(pfm_stats, 0, sizeof(pfm_stats));
  3119. for(m=0; m < NR_CPUS; m++) pfm_stats[m].pfm_ovfl_intr_cycles_min = ~0UL;
  3120. }
  3121. return 0;
  3122. }
  3123. /*
  3124. * arg can be NULL and count can be zero for this function
  3125. */
  3126. static int
  3127. pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
  3128. {
  3129. struct thread_struct *thread = NULL;
  3130. struct task_struct *task;
  3131. pfarg_dbreg_t *req = (pfarg_dbreg_t *)arg;
  3132. unsigned long flags;
  3133. dbreg_t dbreg;
  3134. unsigned int rnum;
  3135. int first_time;
  3136. int ret = 0, state;
  3137. int i, can_access_pmu = 0;
  3138. int is_system, is_loaded;
  3139. if (pmu_conf->use_rr_dbregs == 0) return -EINVAL;
  3140. state = ctx->ctx_state;
  3141. is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
  3142. is_system = ctx->ctx_fl_system;
  3143. task = ctx->ctx_task;
  3144. if (state == PFM_CTX_ZOMBIE) return -EINVAL;
  3145. /*
  3146. * on both UP and SMP, we can only write to the PMC when the task is
  3147. * the owner of the local PMU.
  3148. */
  3149. if (is_loaded) {
  3150. thread = &task->thread;
  3151. /*
  3152. * In system wide and when the context is loaded, access can only happen
  3153. * when the caller is running on the CPU being monitored by the session.
  3154. * It does not have to be the owner (ctx_task) of the context per se.
  3155. */
  3156. if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) {
  3157. DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
  3158. return -EBUSY;
  3159. }
  3160. can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
  3161. }
  3162. /*
  3163. * we do not need to check for ipsr.db because we do clear ibr.x, dbr.r, and dbr.w
  3164. * ensuring that no real breakpoint can be installed via this call.
  3165. *
  3166. * IMPORTANT: regs can be NULL in this function
  3167. */
  3168. first_time = ctx->ctx_fl_using_dbreg == 0;
  3169. /*
  3170. * don't bother if we are loaded and task is being debugged
  3171. */
  3172. if (is_loaded && (thread->flags & IA64_THREAD_DBG_VALID) != 0) {
  3173. DPRINT(("debug registers already in use for [%d]\n", task_pid_nr(task)));
  3174. return -EBUSY;
  3175. }
  3176. /*
  3177. * check for debug registers in system wide mode
  3178. *
  3179. * If though a check is done in pfm_context_load(),
  3180. * we must repeat it here, in case the registers are
  3181. * written after the context is loaded
  3182. */
  3183. if (is_loaded) {
  3184. LOCK_PFS(flags);
  3185. if (first_time && is_system) {
  3186. if (pfm_sessions.pfs_ptrace_use_dbregs)
  3187. ret = -EBUSY;
  3188. else
  3189. pfm_sessions.pfs_sys_use_dbregs++;
  3190. }
  3191. UNLOCK_PFS(flags);
  3192. }
  3193. if (ret != 0) return ret;
  3194. /*
  3195. * mark ourself as user of the debug registers for
  3196. * perfmon purposes.
  3197. */
  3198. ctx->ctx_fl_using_dbreg = 1;
  3199. /*
  3200. * clear hardware registers to make sure we don't
  3201. * pick up stale state.
  3202. *
  3203. * for a system wide session, we do not use
  3204. * thread.dbr, thread.ibr because this process
  3205. * never leaves the current CPU and the state
  3206. * is shared by all processes running on it
  3207. */
  3208. if (first_time && can_access_pmu) {
  3209. DPRINT(("[%d] clearing ibrs, dbrs\n", task_pid_nr(task)));
  3210. for (i=0; i < pmu_conf->num_ibrs; i++) {
  3211. ia64_set_ibr(i, 0UL);
  3212. ia64_dv_serialize_instruction();
  3213. }
  3214. ia64_srlz_i();
  3215. for (i=0; i < pmu_conf->num_dbrs; i++) {
  3216. ia64_set_dbr(i, 0UL);
  3217. ia64_dv_serialize_data();
  3218. }
  3219. ia64_srlz_d();
  3220. }
  3221. /*
  3222. * Now install the values into the registers
  3223. */
  3224. for (i = 0; i < count; i++, req++) {
  3225. rnum = req->dbreg_num;
  3226. dbreg.val = req->dbreg_value;
  3227. ret = -EINVAL;
  3228. if ((mode == PFM_CODE_RR && rnum >= PFM_NUM_IBRS) || ((mode == PFM_DATA_RR) && rnum >= PFM_NUM_DBRS)) {
  3229. DPRINT(("invalid register %u val=0x%lx mode=%d i=%d count=%d\n",
  3230. rnum, dbreg.val, mode, i, count));
  3231. goto abort_mission;
  3232. }
  3233. /*
  3234. * make sure we do not install enabled breakpoint
  3235. */
  3236. if (rnum & 0x1) {
  3237. if (mode == PFM_CODE_RR)
  3238. dbreg.ibr.ibr_x = 0;
  3239. else
  3240. dbreg.dbr.dbr_r = dbreg.dbr.dbr_w = 0;
  3241. }
  3242. PFM_REG_RETFLAG_SET(req->dbreg_flags, 0);
  3243. /*
  3244. * Debug registers, just like PMC, can only be modified
  3245. * by a kernel call. Moreover, perfmon() access to those
  3246. * registers are centralized in this routine. The hardware
  3247. * does not modify the value of these registers, therefore,
  3248. * if we save them as they are written, we can avoid having
  3249. * to save them on context switch out. This is made possible
  3250. * by the fact that when perfmon uses debug registers, ptrace()
  3251. * won't be able to modify them concurrently.
  3252. */
  3253. if (mode == PFM_CODE_RR) {
  3254. CTX_USED_IBR(ctx, rnum);
  3255. if (can_access_pmu) {
  3256. ia64_set_ibr(rnum, dbreg.val);
  3257. ia64_dv_serialize_instruction();
  3258. }
  3259. ctx->ctx_ibrs[rnum] = dbreg.val;
  3260. DPRINT(("write ibr%u=0x%lx used_ibrs=0x%x ld=%d apmu=%d\n",
  3261. rnum, dbreg.val, ctx->ctx_used_ibrs[0], is_loaded, can_access_pmu));
  3262. } else {
  3263. CTX_USED_DBR(ctx, rnum);
  3264. if (can_access_pmu) {
  3265. ia64_set_dbr(rnum, dbreg.val);
  3266. ia64_dv_serialize_data();
  3267. }
  3268. ctx->ctx_dbrs[rnum] = dbreg.val;
  3269. DPRINT(("write dbr%u=0x%lx used_dbrs=0x%x ld=%d apmu=%d\n",
  3270. rnum, dbreg.val, ctx->ctx_used_dbrs[0], is_loaded, can_access_pmu));
  3271. }
  3272. }
  3273. return 0;
  3274. abort_mission:
  3275. /*
  3276. * in case it was our first attempt, we undo the global modifications
  3277. */
  3278. if (first_time) {
  3279. LOCK_PFS(flags);
  3280. if (ctx->ctx_fl_system) {
  3281. pfm_sessions.pfs_sys_use_dbregs--;
  3282. }
  3283. UNLOCK_PFS(flags);
  3284. ctx->ctx_fl_using_dbreg = 0;
  3285. }
  3286. /*
  3287. * install error return flag
  3288. */
  3289. PFM_REG_RETFLAG_SET(req->dbreg_flags, PFM_REG_RETFL_EINVAL);
  3290. return ret;
  3291. }
  3292. static int
  3293. pfm_write_ibrs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
  3294. {
  3295. return pfm_write_ibr_dbr(PFM_CODE_RR, ctx, arg, count, regs);
  3296. }
  3297. static int
  3298. pfm_write_dbrs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
  3299. {
  3300. return pfm_write_ibr_dbr(PFM_DATA_RR, ctx, arg, count, regs);
  3301. }
  3302. int
  3303. pfm_mod_write_ibrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
  3304. {
  3305. pfm_context_t *ctx;
  3306. if (req == NULL) return -EINVAL;
  3307. ctx = GET_PMU_CTX();
  3308. if (ctx == NULL) return -EINVAL;
  3309. /*
  3310. * for now limit to current task, which is enough when calling
  3311. * from overflow handler
  3312. */
  3313. if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
  3314. return pfm_write_ibrs(ctx, req, nreq, regs);
  3315. }
  3316. EXPORT_SYMBOL(pfm_mod_write_ibrs);
  3317. int
  3318. pfm_mod_write_dbrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
  3319. {
  3320. pfm_context_t *ctx;
  3321. if (req == NULL) return -EINVAL;
  3322. ctx = GET_PMU_CTX();
  3323. if (ctx == NULL) return -EINVAL;
  3324. /*
  3325. * for now limit to current task, which is enough when calling
  3326. * from overflow handler
  3327. */
  3328. if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
  3329. return pfm_write_dbrs(ctx, req, nreq, regs);
  3330. }
  3331. EXPORT_SYMBOL(pfm_mod_write_dbrs);
  3332. static int
  3333. pfm_get_features(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
  3334. {
  3335. pfarg_features_t *req = (pfarg_features_t *)arg;
  3336. req->ft_version = PFM_VERSION;
  3337. return 0;
  3338. }
  3339. static int
  3340. pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
  3341. {
  3342. struct pt_regs *tregs;
  3343. struct task_struct *task = PFM_CTX_TASK(ctx);
  3344. int state, is_system;
  3345. state = ctx->ctx_state;
  3346. is_system = ctx->ctx_fl_system;
  3347. /*
  3348. * context must be attached to issue the stop command (includes LOADED,MASKED,ZOMBIE)
  3349. */
  3350. if (state == PFM_CTX_UNLOADED) return -EINVAL;
  3351. /*
  3352. * In system wide and when the context is loaded, access can only happen
  3353. * when the caller is running on the CPU being monitored by the session.
  3354. * It does not have to be the owner (ctx_task) of the context per se.
  3355. */
  3356. if (is_system && ctx->ctx_cpu != smp_processor_id()) {
  3357. DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
  3358. return -EBUSY;
  3359. }
  3360. DPRINT(("task [%d] ctx_state=%d is_system=%d\n",
  3361. task_pid_nr(PFM_CTX_TASK(ctx)),
  3362. state,
  3363. is_system));
  3364. /*
  3365. * in system mode, we need to update the PMU directly
  3366. * and the user level state of the caller, which may not
  3367. * necessarily be the creator of the context.
  3368. */
  3369. if (is_system) {
  3370. /*
  3371. * Update local PMU first
  3372. *
  3373. * disable dcr pp
  3374. */
  3375. ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) & ~IA64_DCR_PP);
  3376. ia64_srlz_i();
  3377. /*
  3378. * update local cpuinfo
  3379. */
  3380. PFM_CPUINFO_CLEAR(PFM_CPUINFO_DCR_PP);
  3381. /*
  3382. * stop monitoring, does srlz.i
  3383. */
  3384. pfm_clear_psr_pp();
  3385. /*
  3386. * stop monitoring in the caller
  3387. */
  3388. ia64_psr(regs)->pp = 0;
  3389. return 0;
  3390. }
  3391. /*
  3392. * per-task mode
  3393. */
  3394. if (task == current) {
  3395. /* stop monitoring at kernel level */
  3396. pfm_clear_psr_up();
  3397. /*
  3398. * stop monitoring at the user level
  3399. */
  3400. ia64_psr(regs)->up = 0;
  3401. } else {
  3402. tregs = task_pt_regs(task);
  3403. /*
  3404. * stop monitoring at the user level
  3405. */
  3406. ia64_psr(tregs)->up = 0;
  3407. /*
  3408. * monitoring disabled in kernel at next reschedule
  3409. */
  3410. ctx->ctx_saved_psr_up = 0;
  3411. DPRINT(("task=[%d]\n", task_pid_nr(task)));
  3412. }
  3413. return 0;
  3414. }
  3415. static int
  3416. pfm_start(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
  3417. {
  3418. struct pt_regs *tregs;
  3419. int state, is_system;
  3420. state = ctx->ctx_state;
  3421. is_system = ctx->ctx_fl_system;
  3422. if (state != PFM_CTX_LOADED) return -EINVAL;
  3423. /*
  3424. * In system wide and when the context is loaded, access can only happen
  3425. * when the caller is running on the CPU being monitored by the session.
  3426. * It does not have to be the owner (ctx_task) of the context per se.
  3427. */
  3428. if (is_system && ctx->ctx_cpu != smp_processor_id()) {
  3429. DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
  3430. return -EBUSY;
  3431. }
  3432. /*
  3433. * in system mode, we need to update the PMU directly
  3434. * and the user level state of the caller, which may not
  3435. * necessarily be the creator of the context.
  3436. */
  3437. if (is_system) {
  3438. /*
  3439. * set user level psr.pp for the caller
  3440. */
  3441. ia64_psr(regs)->pp = 1;
  3442. /*
  3443. * now update the local PMU and cpuinfo
  3444. */
  3445. PFM_CPUINFO_SET(PFM_CPUINFO_DCR_PP);
  3446. /*
  3447. * start monitoring at kernel level
  3448. */
  3449. pfm_set_psr_pp();
  3450. /* enable dcr pp */
  3451. ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) | IA64_DCR_PP);
  3452. ia64_srlz_i();
  3453. return 0;
  3454. }
  3455. /*
  3456. * per-process mode
  3457. */
  3458. if (ctx->ctx_task == current) {
  3459. /* start monitoring at kernel level */
  3460. pfm_set_psr_up();
  3461. /*
  3462. * activate monitoring at user level
  3463. */
  3464. ia64_psr(regs)->up = 1;
  3465. } else {
  3466. tregs = task_pt_regs(ctx->ctx_task);
  3467. /*
  3468. * start monitoring at the kernel level the next
  3469. * time the task is scheduled
  3470. */
  3471. ctx->ctx_saved_psr_up = IA64_PSR_UP;
  3472. /*
  3473. * activate monitoring at user level
  3474. */
  3475. ia64_psr(tregs)->up = 1;
  3476. }
  3477. return 0;
  3478. }
  3479. static int
  3480. pfm_get_pmc_reset(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
  3481. {
  3482. pfarg_reg_t *req = (pfarg_reg_t *)arg;
  3483. unsigned int cnum;
  3484. int i;
  3485. int ret = -EINVAL;
  3486. for (i = 0; i < count; i++, req++) {
  3487. cnum = req->reg_num;
  3488. if (!PMC_IS_IMPL(cnum)) goto abort_mission;
  3489. req->reg_value = PMC_DFL_VAL(cnum);
  3490. PFM_REG_RETFLAG_SET(req->reg_flags, 0);
  3491. DPRINT(("pmc_reset_val pmc[%u]=0x%lx\n", cnum, req->reg_value));
  3492. }
  3493. return 0;
  3494. abort_mission:
  3495. PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
  3496. return ret;
  3497. }
  3498. static int
  3499. pfm_check_task_exist(pfm_context_t *ctx)
  3500. {
  3501. struct task_struct *g, *t;
  3502. int ret = -ESRCH;
  3503. read_lock(&tasklist_lock);
  3504. do_each_thread (g, t) {
  3505. if (t->thread.pfm_context == ctx) {
  3506. ret = 0;
  3507. break;
  3508. }
  3509. } while_each_thread (g, t);
  3510. read_unlock(&tasklist_lock);
  3511. DPRINT(("pfm_check_task_exist: ret=%d ctx=%p\n", ret, ctx));
  3512. return ret;
  3513. }
  3514. static int
  3515. pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
  3516. {
  3517. struct task_struct *task;
  3518. struct thread_struct *thread;
  3519. struct pfm_context_t *old;
  3520. unsigned long flags;
  3521. #ifndef CONFIG_SMP
  3522. struct task_struct *owner_task = NULL;
  3523. #endif
  3524. pfarg_load_t *req = (pfarg_load_t *)arg;
  3525. unsigned long *pmcs_source, *pmds_source;
  3526. int the_cpu;
  3527. int ret = 0;
  3528. int state, is_system, set_dbregs = 0;
  3529. state = ctx->ctx_state;
  3530. is_system = ctx->ctx_fl_system;
  3531. /*
  3532. * can only load from unloaded or terminated state
  3533. */
  3534. if (state != PFM_CTX_UNLOADED) {
  3535. DPRINT(("cannot load to [%d], invalid ctx_state=%d\n",
  3536. req->load_pid,
  3537. ctx->ctx_state));
  3538. return -EBUSY;
  3539. }
  3540. DPRINT(("load_pid [%d] using_dbreg=%d\n", req->load_pid, ctx->ctx_fl_using_dbreg));
  3541. if (CTX_OVFL_NOBLOCK(ctx) == 0 && req->load_pid == current->pid) {
  3542. DPRINT(("cannot use blocking mode on self\n"));
  3543. return -EINVAL;
  3544. }
  3545. ret = pfm_get_task(ctx, req->load_pid, &task);
  3546. if (ret) {
  3547. DPRINT(("load_pid [%d] get_task=%d\n", req->load_pid, ret));
  3548. return ret;
  3549. }
  3550. ret = -EINVAL;
  3551. /*
  3552. * system wide is self monitoring only
  3553. */
  3554. if (is_system && task != current) {
  3555. DPRINT(("system wide is self monitoring only load_pid=%d\n",
  3556. req->load_pid));
  3557. goto error;
  3558. }
  3559. thread = &task->thread;
  3560. ret = 0;
  3561. /*
  3562. * cannot load a context which is using range restrictions,
  3563. * into a task that is being debugged.
  3564. */
  3565. if (ctx->ctx_fl_using_dbreg) {
  3566. if (thread->flags & IA64_THREAD_DBG_VALID) {
  3567. ret = -EBUSY;
  3568. DPRINT(("load_pid [%d] task is debugged, cannot load range restrictions\n", req->load_pid));
  3569. goto error;
  3570. }
  3571. LOCK_PFS(flags);
  3572. if (is_system) {
  3573. if (pfm_sessions.pfs_ptrace_use_dbregs) {
  3574. DPRINT(("cannot load [%d] dbregs in use\n",
  3575. task_pid_nr(task)));
  3576. ret = -EBUSY;
  3577. } else {
  3578. pfm_sessions.pfs_sys_use_dbregs++;
  3579. DPRINT(("load [%d] increased sys_use_dbreg=%u\n", task_pid_nr(task), pfm_sessions.pfs_sys_use_dbregs));
  3580. set_dbregs = 1;
  3581. }
  3582. }
  3583. UNLOCK_PFS(flags);
  3584. if (ret) goto error;
  3585. }
  3586. /*
  3587. * SMP system-wide monitoring implies self-monitoring.
  3588. *
  3589. * The programming model expects the task to
  3590. * be pinned on a CPU throughout the session.
  3591. * Here we take note of the current CPU at the
  3592. * time the context is loaded. No call from
  3593. * another CPU will be allowed.
  3594. *
  3595. * The pinning via shed_setaffinity()
  3596. * must be done by the calling task prior
  3597. * to this call.
  3598. *
  3599. * systemwide: keep track of CPU this session is supposed to run on
  3600. */
  3601. the_cpu = ctx->ctx_cpu = smp_processor_id();
  3602. ret = -EBUSY;
  3603. /*
  3604. * now reserve the session
  3605. */
  3606. ret = pfm_reserve_session(current, is_system, the_cpu);
  3607. if (ret) goto error;
  3608. /*
  3609. * task is necessarily stopped at this point.
  3610. *
  3611. * If the previous context was zombie, then it got removed in
  3612. * pfm_save_regs(). Therefore we should not see it here.
  3613. * If we see a context, then this is an active context
  3614. *
  3615. * XXX: needs to be atomic
  3616. */
  3617. DPRINT(("before cmpxchg() old_ctx=%p new_ctx=%p\n",
  3618. thread->pfm_context, ctx));
  3619. ret = -EBUSY;
  3620. old = ia64_cmpxchg(acq, &thread->pfm_context, NULL, ctx, sizeof(pfm_context_t *));
  3621. if (old != NULL) {
  3622. DPRINT(("load_pid [%d] already has a context\n", req->load_pid));
  3623. goto error_unres;
  3624. }
  3625. pfm_reset_msgq(ctx);
  3626. ctx->ctx_state = PFM_CTX_LOADED;
  3627. /*
  3628. * link context to task
  3629. */
  3630. ctx->ctx_task = task;
  3631. if (is_system) {
  3632. /*
  3633. * we load as stopped
  3634. */
  3635. PFM_CPUINFO_SET(PFM_CPUINFO_SYST_WIDE);
  3636. PFM_CPUINFO_CLEAR(PFM_CPUINFO_DCR_PP);
  3637. if (ctx->ctx_fl_excl_idle) PFM_CPUINFO_SET(PFM_CPUINFO_EXCL_IDLE);
  3638. } else {
  3639. thread->flags |= IA64_THREAD_PM_VALID;
  3640. }
  3641. /*
  3642. * propagate into thread-state
  3643. */
  3644. pfm_copy_pmds(task, ctx);
  3645. pfm_copy_pmcs(task, ctx);
  3646. pmcs_source = ctx->th_pmcs;
  3647. pmds_source = ctx->th_pmds;
  3648. /*
  3649. * always the case for system-wide
  3650. */
  3651. if (task == current) {
  3652. if (is_system == 0) {
  3653. /* allow user level control */
  3654. ia64_psr(regs)->sp = 0;
  3655. DPRINT(("clearing psr.sp for [%d]\n", task_pid_nr(task)));
  3656. SET_LAST_CPU(ctx, smp_processor_id());
  3657. INC_ACTIVATION();
  3658. SET_ACTIVATION(ctx);
  3659. #ifndef CONFIG_SMP
  3660. /*
  3661. * push the other task out, if any
  3662. */
  3663. owner_task = GET_PMU_OWNER();
  3664. if (owner_task) pfm_lazy_save_regs(owner_task);
  3665. #endif
  3666. }
  3667. /*
  3668. * load all PMD from ctx to PMU (as opposed to thread state)
  3669. * restore all PMC from ctx to PMU
  3670. */
  3671. pfm_restore_pmds(pmds_source, ctx->ctx_all_pmds[0]);
  3672. pfm_restore_pmcs(pmcs_source, ctx->ctx_all_pmcs[0]);
  3673. ctx->ctx_reload_pmcs[0] = 0UL;
  3674. ctx->ctx_reload_pmds[0] = 0UL;
  3675. /*
  3676. * guaranteed safe by earlier check against DBG_VALID
  3677. */
  3678. if (ctx->ctx_fl_using_dbreg) {
  3679. pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
  3680. pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
  3681. }
  3682. /*
  3683. * set new ownership
  3684. */
  3685. SET_PMU_OWNER(task, ctx);
  3686. DPRINT(("context loaded on PMU for [%d]\n", task_pid_nr(task)));
  3687. } else {
  3688. /*
  3689. * when not current, task MUST be stopped, so this is safe
  3690. */
  3691. regs = task_pt_regs(task);
  3692. /* force a full reload */
  3693. ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;
  3694. SET_LAST_CPU(ctx, -1);
  3695. /* initial saved psr (stopped) */
  3696. ctx->ctx_saved_psr_up = 0UL;
  3697. ia64_psr(regs)->up = ia64_psr(regs)->pp = 0;
  3698. }
  3699. ret = 0;
  3700. error_unres:
  3701. if (ret) pfm_unreserve_session(ctx, ctx->ctx_fl_system, the_cpu);
  3702. error:
  3703. /*
  3704. * we must undo the dbregs setting (for system-wide)
  3705. */
  3706. if (ret && set_dbregs) {
  3707. LOCK_PFS(flags);
  3708. pfm_sessions.pfs_sys_use_dbregs--;
  3709. UNLOCK_PFS(flags);
  3710. }
  3711. /*
  3712. * release task, there is now a link with the context
  3713. */
  3714. if (is_system == 0 && task != current) {
  3715. pfm_put_task(task);
  3716. if (ret == 0) {
  3717. ret = pfm_check_task_exist(ctx);
  3718. if (ret) {
  3719. ctx->ctx_state = PFM_CTX_UNLOADED;
  3720. ctx->ctx_task = NULL;
  3721. }
  3722. }
  3723. }
  3724. return ret;
  3725. }
  3726. /*
  3727. * in this function, we do not need to increase the use count
  3728. * for the task via get_task_struct(), because we hold the
  3729. * context lock. If the task were to disappear while having
  3730. * a context attached, it would go through pfm_exit_thread()
  3731. * which also grabs the context lock and would therefore be blocked
  3732. * until we are here.
  3733. */
  3734. static void pfm_flush_pmds(struct task_struct *, pfm_context_t *ctx);
  3735. static int
  3736. pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
  3737. {
  3738. struct task_struct *task = PFM_CTX_TASK(ctx);
  3739. struct pt_regs *tregs;
  3740. int prev_state, is_system;
  3741. int ret;
  3742. DPRINT(("ctx_state=%d task [%d]\n", ctx->ctx_state, task ? task_pid_nr(task) : -1));
  3743. prev_state = ctx->ctx_state;
  3744. is_system = ctx->ctx_fl_system;
  3745. /*
  3746. * unload only when necessary
  3747. */
  3748. if (prev_state == PFM_CTX_UNLOADED) {
  3749. DPRINT(("ctx_state=%d, nothing to do\n", prev_state));
  3750. return 0;
  3751. }
  3752. /*
  3753. * clear psr and dcr bits
  3754. */
  3755. ret = pfm_stop(ctx, NULL, 0, regs);
  3756. if (ret) return ret;
  3757. ctx->ctx_state = PFM_CTX_UNLOADED;
  3758. /*
  3759. * in system mode, we need to update the PMU directly
  3760. * and the user level state of the caller, which may not
  3761. * necessarily be the creator of the context.
  3762. */
  3763. if (is_system) {
  3764. /*
  3765. * Update cpuinfo
  3766. *
  3767. * local PMU is taken care of in pfm_stop()
  3768. */
  3769. PFM_CPUINFO_CLEAR(PFM_CPUINFO_SYST_WIDE);
  3770. PFM_CPUINFO_CLEAR(PFM_CPUINFO_EXCL_IDLE);
  3771. /*
  3772. * save PMDs in context
  3773. * release ownership
  3774. */
  3775. pfm_flush_pmds(current, ctx);
  3776. /*
  3777. * at this point we are done with the PMU
  3778. * so we can unreserve the resource.
  3779. */
  3780. if (prev_state != PFM_CTX_ZOMBIE)
  3781. pfm_unreserve_session(ctx, 1 , ctx->ctx_cpu);
  3782. /*
  3783. * disconnect context from task
  3784. */
  3785. task->thread.pfm_context = NULL;
  3786. /*
  3787. * disconnect task from context
  3788. */
  3789. ctx->ctx_task = NULL;
  3790. /*
  3791. * There is nothing more to cleanup here.
  3792. */
  3793. return 0;
  3794. }
  3795. /*
  3796. * per-task mode
  3797. */
  3798. tregs = task == current ? regs : task_pt_regs(task);
  3799. if (task == current) {
  3800. /*
  3801. * cancel user level control
  3802. */
  3803. ia64_psr(regs)->sp = 1;
  3804. DPRINT(("setting psr.sp for [%d]\n", task_pid_nr(task)));
  3805. }
  3806. /*
  3807. * save PMDs to context
  3808. * release ownership
  3809. */
  3810. pfm_flush_pmds(task, ctx);
  3811. /*
  3812. * at this point we are done with the PMU
  3813. * so we can unreserve the resource.
  3814. *
  3815. * when state was ZOMBIE, we have already unreserved.
  3816. */
  3817. if (prev_state != PFM_CTX_ZOMBIE)
  3818. pfm_unreserve_session(ctx, 0 , ctx->ctx_cpu);
  3819. /*
  3820. * reset activation counter and psr
  3821. */
  3822. ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;
  3823. SET_LAST_CPU(ctx, -1);
  3824. /*
  3825. * PMU state will not be restored
  3826. */
  3827. task->thread.flags &= ~IA64_THREAD_PM_VALID;
  3828. /*
  3829. * break links between context and task
  3830. */
  3831. task->thread.pfm_context = NULL;
  3832. ctx->ctx_task = NULL;
  3833. PFM_SET_WORK_PENDING(task, 0);
  3834. ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_NONE;
  3835. ctx->ctx_fl_can_restart = 0;
  3836. ctx->ctx_fl_going_zombie = 0;
  3837. DPRINT(("disconnected [%d] from context\n", task_pid_nr(task)));
  3838. return 0;
  3839. }
  3840. /*
  3841. * called only from exit_thread(): task == current
  3842. * we come here only if current has a context attached (loaded or masked)
  3843. */
  3844. void
  3845. pfm_exit_thread(struct task_struct *task)
  3846. {
  3847. pfm_context_t *ctx;
  3848. unsigned long flags;
  3849. struct pt_regs *regs = task_pt_regs(task);
  3850. int ret, state;
  3851. int free_ok = 0;
  3852. ctx = PFM_GET_CTX(task);
  3853. PROTECT_CTX(ctx, flags);
  3854. DPRINT(("state=%d task [%d]\n", ctx->ctx_state, task_pid_nr(task)));
  3855. state = ctx->ctx_state;
  3856. switch(state) {
  3857. case PFM_CTX_UNLOADED:
  3858. /*
  3859. * only comes to this function if pfm_context is not NULL, i.e., cannot
  3860. * be in unloaded state
  3861. */
  3862. printk(KERN_ERR "perfmon: pfm_exit_thread [%d] ctx unloaded\n", task_pid_nr(task));
  3863. break;
  3864. case PFM_CTX_LOADED:
  3865. case PFM_CTX_MASKED:
  3866. ret = pfm_context_unload(ctx, NULL, 0, regs);
  3867. if (ret) {
  3868. printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task_pid_nr(task), state, ret);
  3869. }
  3870. DPRINT(("ctx unloaded for current state was %d\n", state));
  3871. pfm_end_notify_user(ctx);
  3872. break;
  3873. case PFM_CTX_ZOMBIE:
  3874. ret = pfm_context_unload(ctx, NULL, 0, regs);
  3875. if (ret) {
  3876. printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task_pid_nr(task), state, ret);
  3877. }
  3878. free_ok = 1;
  3879. break;
  3880. default:
  3881. printk(KERN_ERR "perfmon: pfm_exit_thread [%d] unexpected state=%d\n", task_pid_nr(task), state);
  3882. break;
  3883. }
  3884. UNPROTECT_CTX(ctx, flags);
  3885. { u64 psr = pfm_get_psr();
  3886. BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
  3887. BUG_ON(GET_PMU_OWNER());
  3888. BUG_ON(ia64_psr(regs)->up);
  3889. BUG_ON(ia64_psr(regs)->pp);
  3890. }
  3891. /*
  3892. * All memory free operations (especially for vmalloc'ed memory)
  3893. * MUST be done with interrupts ENABLED.
  3894. */
  3895. if (free_ok) pfm_context_free(ctx);
  3896. }
  3897. /*
  3898. * functions MUST be listed in the increasing order of their index (see permfon.h)
  3899. */
  3900. #define PFM_CMD(name, flags, arg_count, arg_type, getsz) { name, #name, flags, arg_count, sizeof(arg_type), getsz }
  3901. #define PFM_CMD_S(name, flags) { name, #name, flags, 0, 0, NULL }
  3902. #define PFM_CMD_PCLRWS (PFM_CMD_FD|PFM_CMD_ARG_RW|PFM_CMD_STOP)
  3903. #define PFM_CMD_PCLRW (PFM_CMD_FD|PFM_CMD_ARG_RW)
  3904. #define PFM_CMD_NONE { NULL, "no-cmd", 0, 0, 0, NULL}
  3905. static pfm_cmd_desc_t pfm_cmd_tab[]={
  3906. /* 0 */PFM_CMD_NONE,
  3907. /* 1 */PFM_CMD(pfm_write_pmcs, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
  3908. /* 2 */PFM_CMD(pfm_write_pmds, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
  3909. /* 3 */PFM_CMD(pfm_read_pmds, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
  3910. /* 4 */PFM_CMD_S(pfm_stop, PFM_CMD_PCLRWS),
  3911. /* 5 */PFM_CMD_S(pfm_start, PFM_CMD_PCLRWS),
  3912. /* 6 */PFM_CMD_NONE,
  3913. /* 7 */PFM_CMD_NONE,
  3914. /* 8 */PFM_CMD(pfm_context_create, PFM_CMD_ARG_RW, 1, pfarg_context_t, pfm_ctx_getsize),
  3915. /* 9 */PFM_CMD_NONE,
  3916. /* 10 */PFM_CMD_S(pfm_restart, PFM_CMD_PCLRW),
  3917. /* 11 */PFM_CMD_NONE,
  3918. /* 12 */PFM_CMD(pfm_get_features, PFM_CMD_ARG_RW, 1, pfarg_features_t, NULL),
  3919. /* 13 */PFM_CMD(pfm_debug, 0, 1, unsigned int, NULL),
  3920. /* 14 */PFM_CMD_NONE,
  3921. /* 15 */PFM_CMD(pfm_get_pmc_reset, PFM_CMD_ARG_RW, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
  3922. /* 16 */PFM_CMD(pfm_context_load, PFM_CMD_PCLRWS, 1, pfarg_load_t, NULL),
  3923. /* 17 */PFM_CMD_S(pfm_context_unload, PFM_CMD_PCLRWS),
  3924. /* 18 */PFM_CMD_NONE,
  3925. /* 19 */PFM_CMD_NONE,
  3926. /* 20 */PFM_CMD_NONE,
  3927. /* 21 */PFM_CMD_NONE,
  3928. /* 22 */PFM_CMD_NONE,
  3929. /* 23 */PFM_CMD_NONE,
  3930. /* 24 */PFM_CMD_NONE,
  3931. /* 25 */PFM_CMD_NONE,
  3932. /* 26 */PFM_CMD_NONE,
  3933. /* 27 */PFM_CMD_NONE,
  3934. /* 28 */PFM_CMD_NONE,
  3935. /* 29 */PFM_CMD_NONE,
  3936. /* 30 */PFM_CMD_NONE,
  3937. /* 31 */PFM_CMD_NONE,
  3938. /* 32 */PFM_CMD(pfm_write_ibrs, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_dbreg_t, NULL),
  3939. /* 33 */PFM_CMD(pfm_write_dbrs, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_dbreg_t, NULL)
  3940. };
  3941. #define PFM_CMD_COUNT (sizeof(pfm_cmd_tab)/sizeof(pfm_cmd_desc_t))
  3942. static int
  3943. pfm_check_task_state(pfm_context_t *ctx, int cmd, unsigned long flags)
  3944. {
  3945. struct task_struct *task;
  3946. int state, old_state;
  3947. recheck:
  3948. state = ctx->ctx_state;
  3949. task = ctx->ctx_task;
  3950. if (task == NULL) {
  3951. DPRINT(("context %d no task, state=%d\n", ctx->ctx_fd, state));
  3952. return 0;
  3953. }
  3954. DPRINT(("context %d state=%d [%d] task_state=%ld must_stop=%d\n",
  3955. ctx->ctx_fd,
  3956. state,
  3957. task_pid_nr(task),
  3958. task->state, PFM_CMD_STOPPED(cmd)));
  3959. /*
  3960. * self-monitoring always ok.
  3961. *
  3962. * for system-wide the caller can either be the creator of the
  3963. * context (to one to which the context is attached to) OR
  3964. * a task running on the same CPU as the session.
  3965. */
  3966. if (task == current || ctx->ctx_fl_system) return 0;
  3967. /*
  3968. * we are monitoring another thread
  3969. */
  3970. switch(state) {
  3971. case PFM_CTX_UNLOADED:
  3972. /*
  3973. * if context is UNLOADED we are safe to go
  3974. */
  3975. return 0;
  3976. case PFM_CTX_ZOMBIE:
  3977. /*
  3978. * no command can operate on a zombie context
  3979. */
  3980. DPRINT(("cmd %d state zombie cannot operate on context\n", cmd));
  3981. return -EINVAL;
  3982. case PFM_CTX_MASKED:
  3983. /*
  3984. * PMU state has been saved to software even though
  3985. * the thread may still be running.
  3986. */
  3987. if (cmd != PFM_UNLOAD_CONTEXT) return 0;
  3988. }
  3989. /*
  3990. * context is LOADED or MASKED. Some commands may need to have
  3991. * the task stopped.
  3992. *
  3993. * We could lift this restriction for UP but it would mean that
  3994. * the user has no guarantee the task would not run between
  3995. * two successive calls to perfmonctl(). That's probably OK.
  3996. * If this user wants to ensure the task does not run, then
  3997. * the task must be stopped.
  3998. */
  3999. if (PFM_CMD_STOPPED(cmd)) {
  4000. if ((task->state != TASK_STOPPED) && (task->state != TASK_TRACED)) {
  4001. DPRINT(("[%d] task not in stopped state\n", task_pid_nr(task)));
  4002. return -EBUSY;
  4003. }
  4004. /*
  4005. * task is now stopped, wait for ctxsw out
  4006. *
  4007. * This is an interesting point in the code.
  4008. * We need to unprotect the context because
  4009. * the pfm_save_regs() routines needs to grab
  4010. * the same lock. There are danger in doing
  4011. * this because it leaves a window open for
  4012. * another task to get access to the context
  4013. * and possibly change its state. The one thing
  4014. * that is not possible is for the context to disappear
  4015. * because we are protected by the VFS layer, i.e.,
  4016. * get_fd()/put_fd().
  4017. */
  4018. old_state = state;
  4019. UNPROTECT_CTX(ctx, flags);
  4020. wait_task_inactive(task);
  4021. PROTECT_CTX(ctx, flags);
  4022. /*
  4023. * we must recheck to verify if state has changed
  4024. */
  4025. if (ctx->ctx_state != old_state) {
  4026. DPRINT(("old_state=%d new_state=%d\n", old_state, ctx->ctx_state));
  4027. goto recheck;
  4028. }
  4029. }
  4030. return 0;
  4031. }
  4032. /*
  4033. * system-call entry point (must return long)
  4034. */
  4035. asmlinkage long
  4036. sys_perfmonctl (int fd, int cmd, void __user *arg, int count)
  4037. {
  4038. struct file *file = NULL;
  4039. pfm_context_t *ctx = NULL;
  4040. unsigned long flags = 0UL;
  4041. void *args_k = NULL;
  4042. long ret; /* will expand int return types */
  4043. size_t base_sz, sz, xtra_sz = 0;
  4044. int narg, completed_args = 0, call_made = 0, cmd_flags;
  4045. int (*func)(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
  4046. int (*getsize)(void *arg, size_t *sz);
  4047. #define PFM_MAX_ARGSIZE 4096
  4048. /*
  4049. * reject any call if perfmon was disabled at initialization
  4050. */
  4051. if (unlikely(pmu_conf == NULL)) return -ENOSYS;
  4052. if (unlikely(cmd < 0 || cmd >= PFM_CMD_COUNT)) {
  4053. DPRINT(("invalid cmd=%d\n", cmd));
  4054. return -EINVAL;
  4055. }
  4056. func = pfm_cmd_tab[cmd].cmd_func;
  4057. narg = pfm_cmd_tab[cmd].cmd_narg;
  4058. base_sz = pfm_cmd_tab[cmd].cmd_argsize;
  4059. getsize = pfm_cmd_tab[cmd].cmd_getsize;
  4060. cmd_flags = pfm_cmd_tab[cmd].cmd_flags;
  4061. if (unlikely(func == NULL)) {
  4062. DPRINT(("invalid cmd=%d\n", cmd));
  4063. return -EINVAL;
  4064. }
  4065. DPRINT(("cmd=%s idx=%d narg=0x%x argsz=%lu count=%d\n",
  4066. PFM_CMD_NAME(cmd),
  4067. cmd,
  4068. narg,
  4069. base_sz,
  4070. count));
  4071. /*
  4072. * check if number of arguments matches what the command expects
  4073. */
  4074. if (unlikely((narg == PFM_CMD_ARG_MANY && count <= 0) || (narg > 0 && narg != count)))
  4075. return -EINVAL;
  4076. restart_args:
  4077. sz = xtra_sz + base_sz*count;
  4078. /*
  4079. * limit abuse to min page size
  4080. */
  4081. if (unlikely(sz > PFM_MAX_ARGSIZE)) {
  4082. printk(KERN_ERR "perfmon: [%d] argument too big %lu\n", task_pid_nr(current), sz);
  4083. return -E2BIG;
  4084. }
  4085. /*
  4086. * allocate default-sized argument buffer
  4087. */
  4088. if (likely(count && args_k == NULL)) {
  4089. args_k = kmalloc(PFM_MAX_ARGSIZE, GFP_KERNEL);
  4090. if (args_k == NULL) return -ENOMEM;
  4091. }
  4092. ret = -EFAULT;
  4093. /*
  4094. * copy arguments
  4095. *
  4096. * assume sz = 0 for command without parameters
  4097. */
  4098. if (sz && copy_from_user(args_k, arg, sz)) {
  4099. DPRINT(("cannot copy_from_user %lu bytes @%p\n", sz, arg));
  4100. goto error_args;
  4101. }
  4102. /*
  4103. * check if command supports extra parameters
  4104. */
  4105. if (completed_args == 0 && getsize) {
  4106. /*
  4107. * get extra parameters size (based on main argument)
  4108. */
  4109. ret = (*getsize)(args_k, &xtra_sz);
  4110. if (ret) goto error_args;
  4111. completed_args = 1;
  4112. DPRINT(("restart_args sz=%lu xtra_sz=%lu\n", sz, xtra_sz));
  4113. /* retry if necessary */
  4114. if (likely(xtra_sz)) goto restart_args;
  4115. }
  4116. if (unlikely((cmd_flags & PFM_CMD_FD) == 0)) goto skip_fd;
  4117. ret = -EBADF;
  4118. file = fget(fd);
  4119. if (unlikely(file == NULL)) {
  4120. DPRINT(("invalid fd %d\n", fd));
  4121. goto error_args;
  4122. }
  4123. if (unlikely(PFM_IS_FILE(file) == 0)) {
  4124. DPRINT(("fd %d not related to perfmon\n", fd));
  4125. goto error_args;
  4126. }
  4127. ctx = (pfm_context_t *)file->private_data;
  4128. if (unlikely(ctx == NULL)) {
  4129. DPRINT(("no context for fd %d\n", fd));
  4130. goto error_args;
  4131. }
  4132. prefetch(&ctx->ctx_state);
  4133. PROTECT_CTX(ctx, flags);
  4134. /*
  4135. * check task is stopped
  4136. */
  4137. ret = pfm_check_task_state(ctx, cmd, flags);
  4138. if (unlikely(ret)) goto abort_locked;
  4139. skip_fd:
  4140. ret = (*func)(ctx, args_k, count, task_pt_regs(current));
  4141. call_made = 1;
  4142. abort_locked:
  4143. if (likely(ctx)) {
  4144. DPRINT(("context unlocked\n"));
  4145. UNPROTECT_CTX(ctx, flags);
  4146. }
  4147. /* copy argument back to user, if needed */
  4148. if (call_made && PFM_CMD_RW_ARG(cmd) && copy_to_user(arg, args_k, base_sz*count)) ret = -EFAULT;
  4149. error_args:
  4150. if (file)
  4151. fput(file);
  4152. kfree(args_k);
  4153. DPRINT(("cmd=%s ret=%ld\n", PFM_CMD_NAME(cmd), ret));
  4154. return ret;
  4155. }
  4156. static void
  4157. pfm_resume_after_ovfl(pfm_context_t *ctx, unsigned long ovfl_regs, struct pt_regs *regs)
  4158. {
  4159. pfm_buffer_fmt_t *fmt = ctx->ctx_buf_fmt;
  4160. pfm_ovfl_ctrl_t rst_ctrl;
  4161. int state;
  4162. int ret = 0;
  4163. state = ctx->ctx_state;
  4164. /*
  4165. * Unlock sampling buffer and reset index atomically
  4166. * XXX: not really needed when blocking
  4167. */
  4168. if (CTX_HAS_SMPL(ctx)) {
  4169. rst_ctrl.bits.mask_monitoring = 0;
  4170. rst_ctrl.bits.reset_ovfl_pmds = 0;
  4171. if (state == PFM_CTX_LOADED)
  4172. ret = pfm_buf_fmt_restart_active(fmt, current, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
  4173. else
  4174. ret = pfm_buf_fmt_restart(fmt, current, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
  4175. } else {
  4176. rst_ctrl.bits.mask_monitoring = 0;
  4177. rst_ctrl.bits.reset_ovfl_pmds = 1;
  4178. }
  4179. if (ret == 0) {
  4180. if (rst_ctrl.bits.reset_ovfl_pmds) {
  4181. pfm_reset_regs(ctx, &ovfl_regs, PFM_PMD_LONG_RESET);
  4182. }
  4183. if (rst_ctrl.bits.mask_monitoring == 0) {
  4184. DPRINT(("resuming monitoring\n"));
  4185. if (ctx->ctx_state == PFM_CTX_MASKED) pfm_restore_monitoring(current);
  4186. } else {
  4187. DPRINT(("stopping monitoring\n"));
  4188. //pfm_stop_monitoring(current, regs);
  4189. }
  4190. ctx->ctx_state = PFM_CTX_LOADED;
  4191. }
  4192. }
  4193. /*
  4194. * context MUST BE LOCKED when calling
  4195. * can only be called for current
  4196. */
  4197. static void
  4198. pfm_context_force_terminate(pfm_context_t *ctx, struct pt_regs *regs)
  4199. {
  4200. int ret;
  4201. DPRINT(("entering for [%d]\n", task_pid_nr(current)));
  4202. ret = pfm_context_unload(ctx, NULL, 0, regs);
  4203. if (ret) {
  4204. printk(KERN_ERR "pfm_context_force_terminate: [%d] unloaded failed with %d\n", task_pid_nr(current), ret);
  4205. }
  4206. /*
  4207. * and wakeup controlling task, indicating we are now disconnected
  4208. */
  4209. wake_up_interruptible(&ctx->ctx_zombieq);
  4210. /*
  4211. * given that context is still locked, the controlling
  4212. * task will only get access when we return from
  4213. * pfm_handle_work().
  4214. */
  4215. }
  4216. static int pfm_ovfl_notify_user(pfm_context_t *ctx, unsigned long ovfl_pmds);
  4217. /*
  4218. * pfm_handle_work() can be called with interrupts enabled
  4219. * (TIF_NEED_RESCHED) or disabled. The down_interruptible
  4220. * call may sleep, therefore we must re-enable interrupts
  4221. * to avoid deadlocks. It is safe to do so because this function
  4222. * is called ONLY when returning to user level (PUStk=1), in which case
  4223. * there is no risk of kernel stack overflow due to deep
  4224. * interrupt nesting.
  4225. */
  4226. void
  4227. pfm_handle_work(void)
  4228. {
  4229. pfm_context_t *ctx;
  4230. struct pt_regs *regs;
  4231. unsigned long flags, dummy_flags;
  4232. unsigned long ovfl_regs;
  4233. unsigned int reason;
  4234. int ret;
  4235. ctx = PFM_GET_CTX(current);
  4236. if (ctx == NULL) {
  4237. printk(KERN_ERR "perfmon: [%d] has no PFM context\n", task_pid_nr(current));
  4238. return;
  4239. }
  4240. PROTECT_CTX(ctx, flags);
  4241. PFM_SET_WORK_PENDING(current, 0);
  4242. pfm_clear_task_notify();
  4243. regs = task_pt_regs(current);
  4244. /*
  4245. * extract reason for being here and clear
  4246. */
  4247. reason = ctx->ctx_fl_trap_reason;
  4248. ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_NONE;
  4249. ovfl_regs = ctx->ctx_ovfl_regs[0];
  4250. DPRINT(("reason=%d state=%d\n", reason, ctx->ctx_state));
  4251. /*
  4252. * must be done before we check for simple-reset mode
  4253. */
  4254. if (ctx->ctx_fl_going_zombie || ctx->ctx_state == PFM_CTX_ZOMBIE) goto do_zombie;
  4255. //if (CTX_OVFL_NOBLOCK(ctx)) goto skip_blocking;
  4256. if (reason == PFM_TRAP_REASON_RESET) goto skip_blocking;
  4257. /*
  4258. * restore interrupt mask to what it was on entry.
  4259. * Could be enabled/diasbled.
  4260. */
  4261. UNPROTECT_CTX(ctx, flags);
  4262. /*
  4263. * force interrupt enable because of down_interruptible()
  4264. */
  4265. local_irq_enable();
  4266. DPRINT(("before block sleeping\n"));
  4267. /*
  4268. * may go through without blocking on SMP systems
  4269. * if restart has been received already by the time we call down()
  4270. */
  4271. ret = wait_for_completion_interruptible(&ctx->ctx_restart_done);
  4272. DPRINT(("after block sleeping ret=%d\n", ret));
  4273. /*
  4274. * lock context and mask interrupts again
  4275. * We save flags into a dummy because we may have
  4276. * altered interrupts mask compared to entry in this
  4277. * function.
  4278. */
  4279. PROTECT_CTX(ctx, dummy_flags);
  4280. /*
  4281. * we need to read the ovfl_regs only after wake-up
  4282. * because we may have had pfm_write_pmds() in between
  4283. * and that can changed PMD values and therefore
  4284. * ovfl_regs is reset for these new PMD values.
  4285. */
  4286. ovfl_regs = ctx->ctx_ovfl_regs[0];
  4287. if (ctx->ctx_fl_going_zombie) {
  4288. do_zombie:
  4289. DPRINT(("context is zombie, bailing out\n"));
  4290. pfm_context_force_terminate(ctx, regs);
  4291. goto nothing_to_do;
  4292. }
  4293. /*
  4294. * in case of interruption of down() we don't restart anything
  4295. */
  4296. if (ret < 0) goto nothing_to_do;
  4297. skip_blocking:
  4298. pfm_resume_after_ovfl(ctx, ovfl_regs, regs);
  4299. ctx->ctx_ovfl_regs[0] = 0UL;
  4300. nothing_to_do:
  4301. /*
  4302. * restore flags as they were upon entry
  4303. */
  4304. UNPROTECT_CTX(ctx, flags);
  4305. }
  4306. static int
  4307. pfm_notify_user(pfm_context_t *ctx, pfm_msg_t *msg)
  4308. {
  4309. if (ctx->ctx_state == PFM_CTX_ZOMBIE) {
  4310. DPRINT(("ignoring overflow notification, owner is zombie\n"));
  4311. return 0;
  4312. }
  4313. DPRINT(("waking up somebody\n"));
  4314. if (msg) wake_up_interruptible(&ctx->ctx_msgq_wait);
  4315. /*
  4316. * safe, we are not in intr handler, nor in ctxsw when
  4317. * we come here
  4318. */
  4319. kill_fasync (&ctx->ctx_async_queue, SIGIO, POLL_IN);
  4320. return 0;
  4321. }
  4322. static int
  4323. pfm_ovfl_notify_user(pfm_context_t *ctx, unsigned long ovfl_pmds)
  4324. {
  4325. pfm_msg_t *msg = NULL;
  4326. if (ctx->ctx_fl_no_msg == 0) {
  4327. msg = pfm_get_new_msg(ctx);
  4328. if (msg == NULL) {
  4329. printk(KERN_ERR "perfmon: pfm_ovfl_notify_user no more notification msgs\n");
  4330. return -1;
  4331. }
  4332. msg->pfm_ovfl_msg.msg_type = PFM_MSG_OVFL;
  4333. msg->pfm_ovfl_msg.msg_ctx_fd = ctx->ctx_fd;
  4334. msg->pfm_ovfl_msg.msg_active_set = 0;
  4335. msg->pfm_ovfl_msg.msg_ovfl_pmds[0] = ovfl_pmds;
  4336. msg->pfm_ovfl_msg.msg_ovfl_pmds[1] = 0UL;
  4337. msg->pfm_ovfl_msg.msg_ovfl_pmds[2] = 0UL;
  4338. msg->pfm_ovfl_msg.msg_ovfl_pmds[3] = 0UL;
  4339. msg->pfm_ovfl_msg.msg_tstamp = 0UL;
  4340. }
  4341. DPRINT(("ovfl msg: msg=%p no_msg=%d fd=%d ovfl_pmds=0x%lx\n",
  4342. msg,
  4343. ctx->ctx_fl_no_msg,
  4344. ctx->ctx_fd,
  4345. ovfl_pmds));
  4346. return pfm_notify_user(ctx, msg);
  4347. }
  4348. static int
  4349. pfm_end_notify_user(pfm_context_t *ctx)
  4350. {
  4351. pfm_msg_t *msg;
  4352. msg = pfm_get_new_msg(ctx);
  4353. if (msg == NULL) {
  4354. printk(KERN_ERR "perfmon: pfm_end_notify_user no more notification msgs\n");
  4355. return -1;
  4356. }
  4357. /* no leak */
  4358. memset(msg, 0, sizeof(*msg));
  4359. msg->pfm_end_msg.msg_type = PFM_MSG_END;
  4360. msg->pfm_end_msg.msg_ctx_fd = ctx->ctx_fd;
  4361. msg->pfm_ovfl_msg.msg_tstamp = 0UL;
  4362. DPRINT(("end msg: msg=%p no_msg=%d ctx_fd=%d\n",
  4363. msg,
  4364. ctx->ctx_fl_no_msg,
  4365. ctx->ctx_fd));
  4366. return pfm_notify_user(ctx, msg);
  4367. }
  4368. /*
  4369. * main overflow processing routine.
  4370. * it can be called from the interrupt path or explicitly during the context switch code
  4371. */
  4372. static void
  4373. pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, struct pt_regs *regs)
  4374. {
  4375. pfm_ovfl_arg_t *ovfl_arg;
  4376. unsigned long mask;
  4377. unsigned long old_val, ovfl_val, new_val;
  4378. unsigned long ovfl_notify = 0UL, ovfl_pmds = 0UL, smpl_pmds = 0UL, reset_pmds;
  4379. unsigned long tstamp;
  4380. pfm_ovfl_ctrl_t ovfl_ctrl;
  4381. unsigned int i, has_smpl;
  4382. int must_notify = 0;
  4383. if (unlikely(ctx->ctx_state == PFM_CTX_ZOMBIE)) goto stop_monitoring;
  4384. /*
  4385. * sanity test. Should never happen
  4386. */
  4387. if (unlikely((pmc0 & 0x1) == 0)) goto sanity_check;
  4388. tstamp = ia64_get_itc();
  4389. mask = pmc0 >> PMU_FIRST_COUNTER;
  4390. ovfl_val = pmu_conf->ovfl_val;
  4391. has_smpl = CTX_HAS_SMPL(ctx);
  4392. DPRINT_ovfl(("pmc0=0x%lx pid=%d iip=0x%lx, %s "
  4393. "used_pmds=0x%lx\n",
  4394. pmc0,
  4395. task ? task_pid_nr(task): -1,
  4396. (regs ? regs->cr_iip : 0),
  4397. CTX_OVFL_NOBLOCK(ctx) ? "nonblocking" : "blocking",
  4398. ctx->ctx_used_pmds[0]));
  4399. /*
  4400. * first we update the virtual counters
  4401. * assume there was a prior ia64_srlz_d() issued
  4402. */
  4403. for (i = PMU_FIRST_COUNTER; mask ; i++, mask >>= 1) {
  4404. /* skip pmd which did not overflow */
  4405. if ((mask & 0x1) == 0) continue;
  4406. /*
  4407. * Note that the pmd is not necessarily 0 at this point as qualified events
  4408. * may have happened before the PMU was frozen. The residual count is not
  4409. * taken into consideration here but will be with any read of the pmd via
  4410. * pfm_read_pmds().
  4411. */
  4412. old_val = new_val = ctx->ctx_pmds[i].val;
  4413. new_val += 1 + ovfl_val;
  4414. ctx->ctx_pmds[i].val = new_val;
  4415. /*
  4416. * check for overflow condition
  4417. */
  4418. if (likely(old_val > new_val)) {
  4419. ovfl_pmds |= 1UL << i;
  4420. if (PMC_OVFL_NOTIFY(ctx, i)) ovfl_notify |= 1UL << i;
  4421. }
  4422. DPRINT_ovfl(("ctx_pmd[%d].val=0x%lx old_val=0x%lx pmd=0x%lx ovfl_pmds=0x%lx ovfl_notify=0x%lx\n",
  4423. i,
  4424. new_val,
  4425. old_val,
  4426. ia64_get_pmd(i) & ovfl_val,
  4427. ovfl_pmds,
  4428. ovfl_notify));
  4429. }
  4430. /*
  4431. * there was no 64-bit overflow, nothing else to do
  4432. */
  4433. if (ovfl_pmds == 0UL) return;
  4434. /*
  4435. * reset all control bits
  4436. */
  4437. ovfl_ctrl.val = 0;
  4438. reset_pmds = 0UL;
  4439. /*
  4440. * if a sampling format module exists, then we "cache" the overflow by
  4441. * calling the module's handler() routine.
  4442. */
  4443. if (has_smpl) {
  4444. unsigned long start_cycles, end_cycles;
  4445. unsigned long pmd_mask;
  4446. int j, k, ret = 0;
  4447. int this_cpu = smp_processor_id();
  4448. pmd_mask = ovfl_pmds >> PMU_FIRST_COUNTER;
  4449. ovfl_arg = &ctx->ctx_ovfl_arg;
  4450. prefetch(ctx->ctx_smpl_hdr);
  4451. for(i=PMU_FIRST_COUNTER; pmd_mask && ret == 0; i++, pmd_mask >>=1) {
  4452. mask = 1UL << i;
  4453. if ((pmd_mask & 0x1) == 0) continue;
  4454. ovfl_arg->ovfl_pmd = (unsigned char )i;
  4455. ovfl_arg->ovfl_notify = ovfl_notify & mask ? 1 : 0;
  4456. ovfl_arg->active_set = 0;
  4457. ovfl_arg->ovfl_ctrl.val = 0; /* module must fill in all fields */
  4458. ovfl_arg->smpl_pmds[0] = smpl_pmds = ctx->ctx_pmds[i].smpl_pmds[0];
  4459. ovfl_arg->pmd_value = ctx->ctx_pmds[i].val;
  4460. ovfl_arg->pmd_last_reset = ctx->ctx_pmds[i].lval;
  4461. ovfl_arg->pmd_eventid = ctx->ctx_pmds[i].eventid;
  4462. /*
  4463. * copy values of pmds of interest. Sampling format may copy them
  4464. * into sampling buffer.
  4465. */
  4466. if (smpl_pmds) {
  4467. for(j=0, k=0; smpl_pmds; j++, smpl_pmds >>=1) {
  4468. if ((smpl_pmds & 0x1) == 0) continue;
  4469. ovfl_arg->smpl_pmds_values[k++] = PMD_IS_COUNTING(j) ? pfm_read_soft_counter(ctx, j) : ia64_get_pmd(j);
  4470. DPRINT_ovfl(("smpl_pmd[%d]=pmd%u=0x%lx\n", k-1, j, ovfl_arg->smpl_pmds_values[k-1]));
  4471. }
  4472. }
  4473. pfm_stats[this_cpu].pfm_smpl_handler_calls++;
  4474. start_cycles = ia64_get_itc();
  4475. /*
  4476. * call custom buffer format record (handler) routine
  4477. */
  4478. ret = (*ctx->ctx_buf_fmt->fmt_handler)(task, ctx->ctx_smpl_hdr, ovfl_arg, regs, tstamp);
  4479. end_cycles = ia64_get_itc();
  4480. /*
  4481. * For those controls, we take the union because they have
  4482. * an all or nothing behavior.
  4483. */
  4484. ovfl_ctrl.bits.notify_user |= ovfl_arg->ovfl_ctrl.bits.notify_user;
  4485. ovfl_ctrl.bits.block_task |= ovfl_arg->ovfl_ctrl.bits.block_task;
  4486. ovfl_ctrl.bits.mask_monitoring |= ovfl_arg->ovfl_ctrl.bits.mask_monitoring;
  4487. /*
  4488. * build the bitmask of pmds to reset now
  4489. */
  4490. if (ovfl_arg->ovfl_ctrl.bits.reset_ovfl_pmds) reset_pmds |= mask;
  4491. pfm_stats[this_cpu].pfm_smpl_handler_cycles += end_cycles - start_cycles;
  4492. }
  4493. /*
  4494. * when the module cannot handle the rest of the overflows, we abort right here
  4495. */
  4496. if (ret && pmd_mask) {
  4497. DPRINT(("handler aborts leftover ovfl_pmds=0x%lx\n",
  4498. pmd_mask<<PMU_FIRST_COUNTER));
  4499. }
  4500. /*
  4501. * remove the pmds we reset now from the set of pmds to reset in pfm_restart()
  4502. */
  4503. ovfl_pmds &= ~reset_pmds;
  4504. } else {
  4505. /*
  4506. * when no sampling module is used, then the default
  4507. * is to notify on overflow if requested by user
  4508. */
  4509. ovfl_ctrl.bits.notify_user = ovfl_notify ? 1 : 0;
  4510. ovfl_ctrl.bits.block_task = ovfl_notify ? 1 : 0;
  4511. ovfl_ctrl.bits.mask_monitoring = ovfl_notify ? 1 : 0; /* XXX: change for saturation */
  4512. ovfl_ctrl.bits.reset_ovfl_pmds = ovfl_notify ? 0 : 1;
  4513. /*
  4514. * if needed, we reset all overflowed pmds
  4515. */
  4516. if (ovfl_notify == 0) reset_pmds = ovfl_pmds;
  4517. }
  4518. DPRINT_ovfl(("ovfl_pmds=0x%lx reset_pmds=0x%lx\n", ovfl_pmds, reset_pmds));
  4519. /*
  4520. * reset the requested PMD registers using the short reset values
  4521. */
  4522. if (reset_pmds) {
  4523. unsigned long bm = reset_pmds;
  4524. pfm_reset_regs(ctx, &bm, PFM_PMD_SHORT_RESET);
  4525. }
  4526. if (ovfl_notify && ovfl_ctrl.bits.notify_user) {
  4527. /*
  4528. * keep track of what to reset when unblocking
  4529. */
  4530. ctx->ctx_ovfl_regs[0] = ovfl_pmds;
  4531. /*
  4532. * check for blocking context
  4533. */
  4534. if (CTX_OVFL_NOBLOCK(ctx) == 0 && ovfl_ctrl.bits.block_task) {
  4535. ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_BLOCK;
  4536. /*
  4537. * set the perfmon specific checking pending work for the task
  4538. */
  4539. PFM_SET_WORK_PENDING(task, 1);
  4540. /*
  4541. * when coming from ctxsw, current still points to the
  4542. * previous task, therefore we must work with task and not current.
  4543. */
  4544. pfm_set_task_notify(task);
  4545. }
  4546. /*
  4547. * defer until state is changed (shorten spin window). the context is locked
  4548. * anyway, so the signal receiver would come spin for nothing.
  4549. */
  4550. must_notify = 1;
  4551. }
  4552. DPRINT_ovfl(("owner [%d] pending=%ld reason=%u ovfl_pmds=0x%lx ovfl_notify=0x%lx masked=%d\n",
  4553. GET_PMU_OWNER() ? task_pid_nr(GET_PMU_OWNER()) : -1,
  4554. PFM_GET_WORK_PENDING(task),
  4555. ctx->ctx_fl_trap_reason,
  4556. ovfl_pmds,
  4557. ovfl_notify,
  4558. ovfl_ctrl.bits.mask_monitoring ? 1 : 0));
  4559. /*
  4560. * in case monitoring must be stopped, we toggle the psr bits
  4561. */
  4562. if (ovfl_ctrl.bits.mask_monitoring) {
  4563. pfm_mask_monitoring(task);
  4564. ctx->ctx_state = PFM_CTX_MASKED;
  4565. ctx->ctx_fl_can_restart = 1;
  4566. }
  4567. /*
  4568. * send notification now
  4569. */
  4570. if (must_notify) pfm_ovfl_notify_user(ctx, ovfl_notify);
  4571. return;
  4572. sanity_check:
  4573. printk(KERN_ERR "perfmon: CPU%d overflow handler [%d] pmc0=0x%lx\n",
  4574. smp_processor_id(),
  4575. task ? task_pid_nr(task) : -1,
  4576. pmc0);
  4577. return;
  4578. stop_monitoring:
  4579. /*
  4580. * in SMP, zombie context is never restored but reclaimed in pfm_load_regs().
  4581. * Moreover, zombies are also reclaimed in pfm_save_regs(). Therefore we can
  4582. * come here as zombie only if the task is the current task. In which case, we
  4583. * can access the PMU hardware directly.
  4584. *
  4585. * Note that zombies do have PM_VALID set. So here we do the minimal.
  4586. *
  4587. * In case the context was zombified it could not be reclaimed at the time
  4588. * the monitoring program exited. At this point, the PMU reservation has been
  4589. * returned, the sampiing buffer has been freed. We must convert this call
  4590. * into a spurious interrupt. However, we must also avoid infinite overflows
  4591. * by stopping monitoring for this task. We can only come here for a per-task
  4592. * context. All we need to do is to stop monitoring using the psr bits which
  4593. * are always task private. By re-enabling secure montioring, we ensure that
  4594. * the monitored task will not be able to re-activate monitoring.
  4595. * The task will eventually be context switched out, at which point the context
  4596. * will be reclaimed (that includes releasing ownership of the PMU).
  4597. *
  4598. * So there might be a window of time where the number of per-task session is zero
  4599. * yet one PMU might have a owner and get at most one overflow interrupt for a zombie
  4600. * context. This is safe because if a per-task session comes in, it will push this one
  4601. * out and by the virtue on pfm_save_regs(), this one will disappear. If a system wide
  4602. * session is force on that CPU, given that we use task pinning, pfm_save_regs() will
  4603. * also push our zombie context out.
  4604. *
  4605. * Overall pretty hairy stuff....
  4606. */
  4607. DPRINT(("ctx is zombie for [%d], converted to spurious\n", task ? task_pid_nr(task): -1));
  4608. pfm_clear_psr_up();
  4609. ia64_psr(regs)->up = 0;
  4610. ia64_psr(regs)->sp = 1;
  4611. return;
  4612. }
  4613. static int
  4614. pfm_do_interrupt_handler(int irq, void *arg, struct pt_regs *regs)
  4615. {
  4616. struct task_struct *task;
  4617. pfm_context_t *ctx;
  4618. unsigned long flags;
  4619. u64 pmc0;
  4620. int this_cpu = smp_processor_id();
  4621. int retval = 0;
  4622. pfm_stats[this_cpu].pfm_ovfl_intr_count++;
  4623. /*
  4624. * srlz.d done before arriving here
  4625. */
  4626. pmc0 = ia64_get_pmc(0);
  4627. task = GET_PMU_OWNER();
  4628. ctx = GET_PMU_CTX();
  4629. /*
  4630. * if we have some pending bits set
  4631. * assumes : if any PMC0.bit[63-1] is set, then PMC0.fr = 1
  4632. */
  4633. if (PMC0_HAS_OVFL(pmc0) && task) {
  4634. /*
  4635. * we assume that pmc0.fr is always set here
  4636. */
  4637. /* sanity check */
  4638. if (!ctx) goto report_spurious1;
  4639. if (ctx->ctx_fl_system == 0 && (task->thread.flags & IA64_THREAD_PM_VALID) == 0)
  4640. goto report_spurious2;
  4641. PROTECT_CTX_NOPRINT(ctx, flags);
  4642. pfm_overflow_handler(task, ctx, pmc0, regs);
  4643. UNPROTECT_CTX_NOPRINT(ctx, flags);
  4644. } else {
  4645. pfm_stats[this_cpu].pfm_spurious_ovfl_intr_count++;
  4646. retval = -1;
  4647. }
  4648. /*
  4649. * keep it unfrozen at all times
  4650. */
  4651. pfm_unfreeze_pmu();
  4652. return retval;
  4653. report_spurious1:
  4654. printk(KERN_INFO "perfmon: spurious overflow interrupt on CPU%d: process %d has no PFM context\n",
  4655. this_cpu, task_pid_nr(task));
  4656. pfm_unfreeze_pmu();
  4657. return -1;
  4658. report_spurious2:
  4659. printk(KERN_INFO "perfmon: spurious overflow interrupt on CPU%d: process %d, invalid flag\n",
  4660. this_cpu,
  4661. task_pid_nr(task));
  4662. pfm_unfreeze_pmu();
  4663. return -1;
  4664. }
  4665. static irqreturn_t
  4666. pfm_interrupt_handler(int irq, void *arg)
  4667. {
  4668. unsigned long start_cycles, total_cycles;
  4669. unsigned long min, max;
  4670. int this_cpu;
  4671. int ret;
  4672. struct pt_regs *regs = get_irq_regs();
  4673. this_cpu = get_cpu();
  4674. if (likely(!pfm_alt_intr_handler)) {
  4675. min = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min;
  4676. max = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max;
  4677. start_cycles = ia64_get_itc();
  4678. ret = pfm_do_interrupt_handler(irq, arg, regs);
  4679. total_cycles = ia64_get_itc();
  4680. /*
  4681. * don't measure spurious interrupts
  4682. */
  4683. if (likely(ret == 0)) {
  4684. total_cycles -= start_cycles;
  4685. if (total_cycles < min) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min = total_cycles;
  4686. if (total_cycles > max) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max = total_cycles;
  4687. pfm_stats[this_cpu].pfm_ovfl_intr_cycles += total_cycles;
  4688. }
  4689. }
  4690. else {
  4691. (*pfm_alt_intr_handler->handler)(irq, arg, regs);
  4692. }
  4693. put_cpu_no_resched();
  4694. return IRQ_HANDLED;
  4695. }
  4696. /*
  4697. * /proc/perfmon interface, for debug only
  4698. */
  4699. #define PFM_PROC_SHOW_HEADER ((void *)NR_CPUS+1)
  4700. static void *
  4701. pfm_proc_start(struct seq_file *m, loff_t *pos)
  4702. {
  4703. if (*pos == 0) {
  4704. return PFM_PROC_SHOW_HEADER;
  4705. }
  4706. while (*pos <= NR_CPUS) {
  4707. if (cpu_online(*pos - 1)) {
  4708. return (void *)*pos;
  4709. }
  4710. ++*pos;
  4711. }
  4712. return NULL;
  4713. }
  4714. static void *
  4715. pfm_proc_next(struct seq_file *m, void *v, loff_t *pos)
  4716. {
  4717. ++*pos;
  4718. return pfm_proc_start(m, pos);
  4719. }
  4720. static void
  4721. pfm_proc_stop(struct seq_file *m, void *v)
  4722. {
  4723. }
  4724. static void
  4725. pfm_proc_show_header(struct seq_file *m)
  4726. {
  4727. struct list_head * pos;
  4728. pfm_buffer_fmt_t * entry;
  4729. unsigned long flags;
  4730. seq_printf(m,
  4731. "perfmon version : %u.%u\n"
  4732. "model : %s\n"
  4733. "fastctxsw : %s\n"
  4734. "expert mode : %s\n"
  4735. "ovfl_mask : 0x%lx\n"
  4736. "PMU flags : 0x%x\n",
  4737. PFM_VERSION_MAJ, PFM_VERSION_MIN,
  4738. pmu_conf->pmu_name,
  4739. pfm_sysctl.fastctxsw > 0 ? "Yes": "No",
  4740. pfm_sysctl.expert_mode > 0 ? "Yes": "No",
  4741. pmu_conf->ovfl_val,
  4742. pmu_conf->flags);
  4743. LOCK_PFS(flags);
  4744. seq_printf(m,
  4745. "proc_sessions : %u\n"
  4746. "sys_sessions : %u\n"
  4747. "sys_use_dbregs : %u\n"
  4748. "ptrace_use_dbregs : %u\n",
  4749. pfm_sessions.pfs_task_sessions,
  4750. pfm_sessions.pfs_sys_sessions,
  4751. pfm_sessions.pfs_sys_use_dbregs,
  4752. pfm_sessions.pfs_ptrace_use_dbregs);
  4753. UNLOCK_PFS(flags);
  4754. spin_lock(&pfm_buffer_fmt_lock);
  4755. list_for_each(pos, &pfm_buffer_fmt_list) {
  4756. entry = list_entry(pos, pfm_buffer_fmt_t, fmt_list);
  4757. seq_printf(m, "format : %02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x %s\n",
  4758. entry->fmt_uuid[0],
  4759. entry->fmt_uuid[1],
  4760. entry->fmt_uuid[2],
  4761. entry->fmt_uuid[3],
  4762. entry->fmt_uuid[4],
  4763. entry->fmt_uuid[5],
  4764. entry->fmt_uuid[6],
  4765. entry->fmt_uuid[7],
  4766. entry->fmt_uuid[8],
  4767. entry->fmt_uuid[9],
  4768. entry->fmt_uuid[10],
  4769. entry->fmt_uuid[11],
  4770. entry->fmt_uuid[12],
  4771. entry->fmt_uuid[13],
  4772. entry->fmt_uuid[14],
  4773. entry->fmt_uuid[15],
  4774. entry->fmt_name);
  4775. }
  4776. spin_unlock(&pfm_buffer_fmt_lock);
  4777. }
  4778. static int
  4779. pfm_proc_show(struct seq_file *m, void *v)
  4780. {
  4781. unsigned long psr;
  4782. unsigned int i;
  4783. int cpu;
  4784. if (v == PFM_PROC_SHOW_HEADER) {
  4785. pfm_proc_show_header(m);
  4786. return 0;
  4787. }
  4788. /* show info for CPU (v - 1) */
  4789. cpu = (long)v - 1;
  4790. seq_printf(m,
  4791. "CPU%-2d overflow intrs : %lu\n"
  4792. "CPU%-2d overflow cycles : %lu\n"
  4793. "CPU%-2d overflow min : %lu\n"
  4794. "CPU%-2d overflow max : %lu\n"
  4795. "CPU%-2d smpl handler calls : %lu\n"
  4796. "CPU%-2d smpl handler cycles : %lu\n"
  4797. "CPU%-2d spurious intrs : %lu\n"
  4798. "CPU%-2d replay intrs : %lu\n"
  4799. "CPU%-2d syst_wide : %d\n"
  4800. "CPU%-2d dcr_pp : %d\n"
  4801. "CPU%-2d exclude idle : %d\n"
  4802. "CPU%-2d owner : %d\n"
  4803. "CPU%-2d context : %p\n"
  4804. "CPU%-2d activations : %lu\n",
  4805. cpu, pfm_stats[cpu].pfm_ovfl_intr_count,
  4806. cpu, pfm_stats[cpu].pfm_ovfl_intr_cycles,
  4807. cpu, pfm_stats[cpu].pfm_ovfl_intr_cycles_min,
  4808. cpu, pfm_stats[cpu].pfm_ovfl_intr_cycles_max,
  4809. cpu, pfm_stats[cpu].pfm_smpl_handler_calls,
  4810. cpu, pfm_stats[cpu].pfm_smpl_handler_cycles,
  4811. cpu, pfm_stats[cpu].pfm_spurious_ovfl_intr_count,
  4812. cpu, pfm_stats[cpu].pfm_replay_ovfl_intr_count,
  4813. cpu, pfm_get_cpu_data(pfm_syst_info, cpu) & PFM_CPUINFO_SYST_WIDE ? 1 : 0,
  4814. cpu, pfm_get_cpu_data(pfm_syst_info, cpu) & PFM_CPUINFO_DCR_PP ? 1 : 0,
  4815. cpu, pfm_get_cpu_data(pfm_syst_info, cpu) & PFM_CPUINFO_EXCL_IDLE ? 1 : 0,
  4816. cpu, pfm_get_cpu_data(pmu_owner, cpu) ? pfm_get_cpu_data(pmu_owner, cpu)->pid: -1,
  4817. cpu, pfm_get_cpu_data(pmu_ctx, cpu),
  4818. cpu, pfm_get_cpu_data(pmu_activation_number, cpu));
  4819. if (num_online_cpus() == 1 && pfm_sysctl.debug > 0) {
  4820. psr = pfm_get_psr();
  4821. ia64_srlz_d();
  4822. seq_printf(m,
  4823. "CPU%-2d psr : 0x%lx\n"
  4824. "CPU%-2d pmc0 : 0x%lx\n",
  4825. cpu, psr,
  4826. cpu, ia64_get_pmc(0));
  4827. for (i=0; PMC_IS_LAST(i) == 0; i++) {
  4828. if (PMC_IS_COUNTING(i) == 0) continue;
  4829. seq_printf(m,
  4830. "CPU%-2d pmc%u : 0x%lx\n"
  4831. "CPU%-2d pmd%u : 0x%lx\n",
  4832. cpu, i, ia64_get_pmc(i),
  4833. cpu, i, ia64_get_pmd(i));
  4834. }
  4835. }
  4836. return 0;
  4837. }
  4838. struct seq_operations pfm_seq_ops = {
  4839. .start = pfm_proc_start,
  4840. .next = pfm_proc_next,
  4841. .stop = pfm_proc_stop,
  4842. .show = pfm_proc_show
  4843. };
  4844. static int
  4845. pfm_proc_open(struct inode *inode, struct file *file)
  4846. {
  4847. return seq_open(file, &pfm_seq_ops);
  4848. }
  4849. /*
  4850. * we come here as soon as local_cpu_data->pfm_syst_wide is set. this happens
  4851. * during pfm_enable() hence before pfm_start(). We cannot assume monitoring
  4852. * is active or inactive based on mode. We must rely on the value in
  4853. * local_cpu_data->pfm_syst_info
  4854. */
  4855. void
  4856. pfm_syst_wide_update_task(struct task_struct *task, unsigned long info, int is_ctxswin)
  4857. {
  4858. struct pt_regs *regs;
  4859. unsigned long dcr;
  4860. unsigned long dcr_pp;
  4861. dcr_pp = info & PFM_CPUINFO_DCR_PP ? 1 : 0;
  4862. /*
  4863. * pid 0 is guaranteed to be the idle task. There is one such task with pid 0
  4864. * on every CPU, so we can rely on the pid to identify the idle task.
  4865. */
  4866. if ((info & PFM_CPUINFO_EXCL_IDLE) == 0 || task->pid) {
  4867. regs = task_pt_regs(task);
  4868. ia64_psr(regs)->pp = is_ctxswin ? dcr_pp : 0;
  4869. return;
  4870. }
  4871. /*
  4872. * if monitoring has started
  4873. */
  4874. if (dcr_pp) {
  4875. dcr = ia64_getreg(_IA64_REG_CR_DCR);
  4876. /*
  4877. * context switching in?
  4878. */
  4879. if (is_ctxswin) {
  4880. /* mask monitoring for the idle task */
  4881. ia64_setreg(_IA64_REG_CR_DCR, dcr & ~IA64_DCR_PP);
  4882. pfm_clear_psr_pp();
  4883. ia64_srlz_i();
  4884. return;
  4885. }
  4886. /*
  4887. * context switching out
  4888. * restore monitoring for next task
  4889. *
  4890. * Due to inlining this odd if-then-else construction generates
  4891. * better code.
  4892. */
  4893. ia64_setreg(_IA64_REG_CR_DCR, dcr |IA64_DCR_PP);
  4894. pfm_set_psr_pp();
  4895. ia64_srlz_i();
  4896. }
  4897. }
  4898. #ifdef CONFIG_SMP
  4899. static void
  4900. pfm_force_cleanup(pfm_context_t *ctx, struct pt_regs *regs)
  4901. {
  4902. struct task_struct *task = ctx->ctx_task;
  4903. ia64_psr(regs)->up = 0;
  4904. ia64_psr(regs)->sp = 1;
  4905. if (GET_PMU_OWNER() == task) {
  4906. DPRINT(("cleared ownership for [%d]\n",
  4907. task_pid_nr(ctx->ctx_task)));
  4908. SET_PMU_OWNER(NULL, NULL);
  4909. }
  4910. /*
  4911. * disconnect the task from the context and vice-versa
  4912. */
  4913. PFM_SET_WORK_PENDING(task, 0);
  4914. task->thread.pfm_context = NULL;
  4915. task->thread.flags &= ~IA64_THREAD_PM_VALID;
  4916. DPRINT(("force cleanup for [%d]\n", task_pid_nr(task)));
  4917. }
  4918. /*
  4919. * in 2.6, interrupts are masked when we come here and the runqueue lock is held
  4920. */
  4921. void
  4922. pfm_save_regs(struct task_struct *task)
  4923. {
  4924. pfm_context_t *ctx;
  4925. unsigned long flags;
  4926. u64 psr;
  4927. ctx = PFM_GET_CTX(task);
  4928. if (ctx == NULL) return;
  4929. /*
  4930. * we always come here with interrupts ALREADY disabled by
  4931. * the scheduler. So we simply need to protect against concurrent
  4932. * access, not CPU concurrency.
  4933. */
  4934. flags = pfm_protect_ctx_ctxsw(ctx);
  4935. if (ctx->ctx_state == PFM_CTX_ZOMBIE) {
  4936. struct pt_regs *regs = task_pt_regs(task);
  4937. pfm_clear_psr_up();
  4938. pfm_force_cleanup(ctx, regs);
  4939. BUG_ON(ctx->ctx_smpl_hdr);
  4940. pfm_unprotect_ctx_ctxsw(ctx, flags);
  4941. pfm_context_free(ctx);
  4942. return;
  4943. }
  4944. /*
  4945. * save current PSR: needed because we modify it
  4946. */
  4947. ia64_srlz_d();
  4948. psr = pfm_get_psr();
  4949. BUG_ON(psr & (IA64_PSR_I));
  4950. /*
  4951. * stop monitoring:
  4952. * This is the last instruction which may generate an overflow
  4953. *
  4954. * We do not need to set psr.sp because, it is irrelevant in kernel.
  4955. * It will be restored from ipsr when going back to user level
  4956. */
  4957. pfm_clear_psr_up();
  4958. /*
  4959. * keep a copy of psr.up (for reload)
  4960. */
  4961. ctx->ctx_saved_psr_up = psr & IA64_PSR_UP;
  4962. /*
  4963. * release ownership of this PMU.
  4964. * PM interrupts are masked, so nothing
  4965. * can happen.
  4966. */
  4967. SET_PMU_OWNER(NULL, NULL);
  4968. /*
  4969. * we systematically save the PMD as we have no
  4970. * guarantee we will be schedule at that same
  4971. * CPU again.
  4972. */
  4973. pfm_save_pmds(ctx->th_pmds, ctx->ctx_used_pmds[0]);
  4974. /*
  4975. * save pmc0 ia64_srlz_d() done in pfm_save_pmds()
  4976. * we will need it on the restore path to check
  4977. * for pending overflow.
  4978. */
  4979. ctx->th_pmcs[0] = ia64_get_pmc(0);
  4980. /*
  4981. * unfreeze PMU if had pending overflows
  4982. */
  4983. if (ctx->th_pmcs[0] & ~0x1UL) pfm_unfreeze_pmu();
  4984. /*
  4985. * finally, allow context access.
  4986. * interrupts will still be masked after this call.
  4987. */
  4988. pfm_unprotect_ctx_ctxsw(ctx, flags);
  4989. }
  4990. #else /* !CONFIG_SMP */
  4991. void
  4992. pfm_save_regs(struct task_struct *task)
  4993. {
  4994. pfm_context_t *ctx;
  4995. u64 psr;
  4996. ctx = PFM_GET_CTX(task);
  4997. if (ctx == NULL) return;
  4998. /*
  4999. * save current PSR: needed because we modify it
  5000. */
  5001. psr = pfm_get_psr();
  5002. BUG_ON(psr & (IA64_PSR_I));
  5003. /*
  5004. * stop monitoring:
  5005. * This is the last instruction which may generate an overflow
  5006. *
  5007. * We do not need to set psr.sp because, it is irrelevant in kernel.
  5008. * It will be restored from ipsr when going back to user level
  5009. */
  5010. pfm_clear_psr_up();
  5011. /*
  5012. * keep a copy of psr.up (for reload)
  5013. */
  5014. ctx->ctx_saved_psr_up = psr & IA64_PSR_UP;
  5015. }
  5016. static void
  5017. pfm_lazy_save_regs (struct task_struct *task)
  5018. {
  5019. pfm_context_t *ctx;
  5020. unsigned long flags;
  5021. { u64 psr = pfm_get_psr();
  5022. BUG_ON(psr & IA64_PSR_UP);
  5023. }
  5024. ctx = PFM_GET_CTX(task);
  5025. /*
  5026. * we need to mask PMU overflow here to
  5027. * make sure that we maintain pmc0 until
  5028. * we save it. overflow interrupts are
  5029. * treated as spurious if there is no
  5030. * owner.
  5031. *
  5032. * XXX: I don't think this is necessary
  5033. */
  5034. PROTECT_CTX(ctx,flags);
  5035. /*
  5036. * release ownership of this PMU.
  5037. * must be done before we save the registers.
  5038. *
  5039. * after this call any PMU interrupt is treated
  5040. * as spurious.
  5041. */
  5042. SET_PMU_OWNER(NULL, NULL);
  5043. /*
  5044. * save all the pmds we use
  5045. */
  5046. pfm_save_pmds(ctx->th_pmds, ctx->ctx_used_pmds[0]);
  5047. /*
  5048. * save pmc0 ia64_srlz_d() done in pfm_save_pmds()
  5049. * it is needed to check for pended overflow
  5050. * on the restore path
  5051. */
  5052. ctx->th_pmcs[0] = ia64_get_pmc(0);
  5053. /*
  5054. * unfreeze PMU if had pending overflows
  5055. */
  5056. if (ctx->th_pmcs[0] & ~0x1UL) pfm_unfreeze_pmu();
  5057. /*
  5058. * now get can unmask PMU interrupts, they will
  5059. * be treated as purely spurious and we will not
  5060. * lose any information
  5061. */
  5062. UNPROTECT_CTX(ctx,flags);
  5063. }
  5064. #endif /* CONFIG_SMP */
  5065. #ifdef CONFIG_SMP
  5066. /*
  5067. * in 2.6, interrupts are masked when we come here and the runqueue lock is held
  5068. */
  5069. void
  5070. pfm_load_regs (struct task_struct *task)
  5071. {
  5072. pfm_context_t *ctx;
  5073. unsigned long pmc_mask = 0UL, pmd_mask = 0UL;
  5074. unsigned long flags;
  5075. u64 psr, psr_up;
  5076. int need_irq_resend;
  5077. ctx = PFM_GET_CTX(task);
  5078. if (unlikely(ctx == NULL)) return;
  5079. BUG_ON(GET_PMU_OWNER());
  5080. /*
  5081. * possible on unload
  5082. */
  5083. if (unlikely((task->thread.flags & IA64_THREAD_PM_VALID) == 0)) return;
  5084. /*
  5085. * we always come here with interrupts ALREADY disabled by
  5086. * the scheduler. So we simply need to protect against concurrent
  5087. * access, not CPU concurrency.
  5088. */
  5089. flags = pfm_protect_ctx_ctxsw(ctx);
  5090. psr = pfm_get_psr();
  5091. need_irq_resend = pmu_conf->flags & PFM_PMU_IRQ_RESEND;
  5092. BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
  5093. BUG_ON(psr & IA64_PSR_I);
  5094. if (unlikely(ctx->ctx_state == PFM_CTX_ZOMBIE)) {
  5095. struct pt_regs *regs = task_pt_regs(task);
  5096. BUG_ON(ctx->ctx_smpl_hdr);
  5097. pfm_force_cleanup(ctx, regs);
  5098. pfm_unprotect_ctx_ctxsw(ctx, flags);
  5099. /*
  5100. * this one (kmalloc'ed) is fine with interrupts disabled
  5101. */
  5102. pfm_context_free(ctx);
  5103. return;
  5104. }
  5105. /*
  5106. * we restore ALL the debug registers to avoid picking up
  5107. * stale state.
  5108. */
  5109. if (ctx->ctx_fl_using_dbreg) {
  5110. pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
  5111. pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
  5112. }
  5113. /*
  5114. * retrieve saved psr.up
  5115. */
  5116. psr_up = ctx->ctx_saved_psr_up;
  5117. /*
  5118. * if we were the last user of the PMU on that CPU,
  5119. * then nothing to do except restore psr
  5120. */
  5121. if (GET_LAST_CPU(ctx) == smp_processor_id() && ctx->ctx_last_activation == GET_ACTIVATION()) {
  5122. /*
  5123. * retrieve partial reload masks (due to user modifications)
  5124. */
  5125. pmc_mask = ctx->ctx_reload_pmcs[0];
  5126. pmd_mask = ctx->ctx_reload_pmds[0];
  5127. } else {
  5128. /*
  5129. * To avoid leaking information to the user level when psr.sp=0,
  5130. * we must reload ALL implemented pmds (even the ones we don't use).
  5131. * In the kernel we only allow PFM_READ_PMDS on registers which
  5132. * we initialized or requested (sampling) so there is no risk there.
  5133. */
  5134. pmd_mask = pfm_sysctl.fastctxsw ? ctx->ctx_used_pmds[0] : ctx->ctx_all_pmds[0];
  5135. /*
  5136. * ALL accessible PMCs are systematically reloaded, unused registers
  5137. * get their default (from pfm_reset_pmu_state()) values to avoid picking
  5138. * up stale configuration.
  5139. *
  5140. * PMC0 is never in the mask. It is always restored separately.
  5141. */
  5142. pmc_mask = ctx->ctx_all_pmcs[0];
  5143. }
  5144. /*
  5145. * when context is MASKED, we will restore PMC with plm=0
  5146. * and PMD with stale information, but that's ok, nothing
  5147. * will be captured.
  5148. *
  5149. * XXX: optimize here
  5150. */
  5151. if (pmd_mask) pfm_restore_pmds(ctx->th_pmds, pmd_mask);
  5152. if (pmc_mask) pfm_restore_pmcs(ctx->th_pmcs, pmc_mask);
  5153. /*
  5154. * check for pending overflow at the time the state
  5155. * was saved.
  5156. */
  5157. if (unlikely(PMC0_HAS_OVFL(ctx->th_pmcs[0]))) {
  5158. /*
  5159. * reload pmc0 with the overflow information
  5160. * On McKinley PMU, this will trigger a PMU interrupt
  5161. */
  5162. ia64_set_pmc(0, ctx->th_pmcs[0]);
  5163. ia64_srlz_d();
  5164. ctx->th_pmcs[0] = 0UL;
  5165. /*
  5166. * will replay the PMU interrupt
  5167. */
  5168. if (need_irq_resend) ia64_resend_irq(IA64_PERFMON_VECTOR);
  5169. pfm_stats[smp_processor_id()].pfm_replay_ovfl_intr_count++;
  5170. }
  5171. /*
  5172. * we just did a reload, so we reset the partial reload fields
  5173. */
  5174. ctx->ctx_reload_pmcs[0] = 0UL;
  5175. ctx->ctx_reload_pmds[0] = 0UL;
  5176. SET_LAST_CPU(ctx, smp_processor_id());
  5177. /*
  5178. * dump activation value for this PMU
  5179. */
  5180. INC_ACTIVATION();
  5181. /*
  5182. * record current activation for this context
  5183. */
  5184. SET_ACTIVATION(ctx);
  5185. /*
  5186. * establish new ownership.
  5187. */
  5188. SET_PMU_OWNER(task, ctx);
  5189. /*
  5190. * restore the psr.up bit. measurement
  5191. * is active again.
  5192. * no PMU interrupt can happen at this point
  5193. * because we still have interrupts disabled.
  5194. */
  5195. if (likely(psr_up)) pfm_set_psr_up();
  5196. /*
  5197. * allow concurrent access to context
  5198. */
  5199. pfm_unprotect_ctx_ctxsw(ctx, flags);
  5200. }
  5201. #else /* !CONFIG_SMP */
  5202. /*
  5203. * reload PMU state for UP kernels
  5204. * in 2.5 we come here with interrupts disabled
  5205. */
  5206. void
  5207. pfm_load_regs (struct task_struct *task)
  5208. {
  5209. pfm_context_t *ctx;
  5210. struct task_struct *owner;
  5211. unsigned long pmd_mask, pmc_mask;
  5212. u64 psr, psr_up;
  5213. int need_irq_resend;
  5214. owner = GET_PMU_OWNER();
  5215. ctx = PFM_GET_CTX(task);
  5216. psr = pfm_get_psr();
  5217. BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
  5218. BUG_ON(psr & IA64_PSR_I);
  5219. /*
  5220. * we restore ALL the debug registers to avoid picking up
  5221. * stale state.
  5222. *
  5223. * This must be done even when the task is still the owner
  5224. * as the registers may have been modified via ptrace()
  5225. * (not perfmon) by the previous task.
  5226. */
  5227. if (ctx->ctx_fl_using_dbreg) {
  5228. pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
  5229. pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
  5230. }
  5231. /*
  5232. * retrieved saved psr.up
  5233. */
  5234. psr_up = ctx->ctx_saved_psr_up;
  5235. need_irq_resend = pmu_conf->flags & PFM_PMU_IRQ_RESEND;
  5236. /*
  5237. * short path, our state is still there, just
  5238. * need to restore psr and we go
  5239. *
  5240. * we do not touch either PMC nor PMD. the psr is not touched
  5241. * by the overflow_handler. So we are safe w.r.t. to interrupt
  5242. * concurrency even without interrupt masking.
  5243. */
  5244. if (likely(owner == task)) {
  5245. if (likely(psr_up)) pfm_set_psr_up();
  5246. return;
  5247. }
  5248. /*
  5249. * someone else is still using the PMU, first push it out and
  5250. * then we'll be able to install our stuff !
  5251. *
  5252. * Upon return, there will be no owner for the current PMU
  5253. */
  5254. if (owner) pfm_lazy_save_regs(owner);
  5255. /*
  5256. * To avoid leaking information to the user level when psr.sp=0,
  5257. * we must reload ALL implemented pmds (even the ones we don't use).
  5258. * In the kernel we only allow PFM_READ_PMDS on registers which
  5259. * we initialized or requested (sampling) so there is no risk there.
  5260. */
  5261. pmd_mask = pfm_sysctl.fastctxsw ? ctx->ctx_used_pmds[0] : ctx->ctx_all_pmds[0];
  5262. /*
  5263. * ALL accessible PMCs are systematically reloaded, unused registers
  5264. * get their default (from pfm_reset_pmu_state()) values to avoid picking
  5265. * up stale configuration.
  5266. *
  5267. * PMC0 is never in the mask. It is always restored separately
  5268. */
  5269. pmc_mask = ctx->ctx_all_pmcs[0];
  5270. pfm_restore_pmds(ctx->th_pmds, pmd_mask);
  5271. pfm_restore_pmcs(ctx->th_pmcs, pmc_mask);
  5272. /*
  5273. * check for pending overflow at the time the state
  5274. * was saved.
  5275. */
  5276. if (unlikely(PMC0_HAS_OVFL(ctx->th_pmcs[0]))) {
  5277. /*
  5278. * reload pmc0 with the overflow information
  5279. * On McKinley PMU, this will trigger a PMU interrupt
  5280. */
  5281. ia64_set_pmc(0, ctx->th_pmcs[0]);
  5282. ia64_srlz_d();
  5283. ctx->th_pmcs[0] = 0UL;
  5284. /*
  5285. * will replay the PMU interrupt
  5286. */
  5287. if (need_irq_resend) ia64_resend_irq(IA64_PERFMON_VECTOR);
  5288. pfm_stats[smp_processor_id()].pfm_replay_ovfl_intr_count++;
  5289. }
  5290. /*
  5291. * establish new ownership.
  5292. */
  5293. SET_PMU_OWNER(task, ctx);
  5294. /*
  5295. * restore the psr.up bit. measurement
  5296. * is active again.
  5297. * no PMU interrupt can happen at this point
  5298. * because we still have interrupts disabled.
  5299. */
  5300. if (likely(psr_up)) pfm_set_psr_up();
  5301. }
  5302. #endif /* CONFIG_SMP */
  5303. /*
  5304. * this function assumes monitoring is stopped
  5305. */
  5306. static void
  5307. pfm_flush_pmds(struct task_struct *task, pfm_context_t *ctx)
  5308. {
  5309. u64 pmc0;
  5310. unsigned long mask2, val, pmd_val, ovfl_val;
  5311. int i, can_access_pmu = 0;
  5312. int is_self;
  5313. /*
  5314. * is the caller the task being monitored (or which initiated the
  5315. * session for system wide measurements)
  5316. */
  5317. is_self = ctx->ctx_task == task ? 1 : 0;
  5318. /*
  5319. * can access PMU is task is the owner of the PMU state on the current CPU
  5320. * or if we are running on the CPU bound to the context in system-wide mode
  5321. * (that is not necessarily the task the context is attached to in this mode).
  5322. * In system-wide we always have can_access_pmu true because a task running on an
  5323. * invalid processor is flagged earlier in the call stack (see pfm_stop).
  5324. */
  5325. can_access_pmu = (GET_PMU_OWNER() == task) || (ctx->ctx_fl_system && ctx->ctx_cpu == smp_processor_id());
  5326. if (can_access_pmu) {
  5327. /*
  5328. * Mark the PMU as not owned
  5329. * This will cause the interrupt handler to do nothing in case an overflow
  5330. * interrupt was in-flight
  5331. * This also guarantees that pmc0 will contain the final state
  5332. * It virtually gives us full control on overflow processing from that point
  5333. * on.
  5334. */
  5335. SET_PMU_OWNER(NULL, NULL);
  5336. DPRINT(("releasing ownership\n"));
  5337. /*
  5338. * read current overflow status:
  5339. *
  5340. * we are guaranteed to read the final stable state
  5341. */
  5342. ia64_srlz_d();
  5343. pmc0 = ia64_get_pmc(0); /* slow */
  5344. /*
  5345. * reset freeze bit, overflow status information destroyed
  5346. */
  5347. pfm_unfreeze_pmu();
  5348. } else {
  5349. pmc0 = ctx->th_pmcs[0];
  5350. /*
  5351. * clear whatever overflow status bits there were
  5352. */
  5353. ctx->th_pmcs[0] = 0;
  5354. }
  5355. ovfl_val = pmu_conf->ovfl_val;
  5356. /*
  5357. * we save all the used pmds
  5358. * we take care of overflows for counting PMDs
  5359. *
  5360. * XXX: sampling situation is not taken into account here
  5361. */
  5362. mask2 = ctx->ctx_used_pmds[0];
  5363. DPRINT(("is_self=%d ovfl_val=0x%lx mask2=0x%lx\n", is_self, ovfl_val, mask2));
  5364. for (i = 0; mask2; i++, mask2>>=1) {
  5365. /* skip non used pmds */
  5366. if ((mask2 & 0x1) == 0) continue;
  5367. /*
  5368. * can access PMU always true in system wide mode
  5369. */
  5370. val = pmd_val = can_access_pmu ? ia64_get_pmd(i) : ctx->th_pmds[i];
  5371. if (PMD_IS_COUNTING(i)) {
  5372. DPRINT(("[%d] pmd[%d] ctx_pmd=0x%lx hw_pmd=0x%lx\n",
  5373. task_pid_nr(task),
  5374. i,
  5375. ctx->ctx_pmds[i].val,
  5376. val & ovfl_val));
  5377. /*
  5378. * we rebuild the full 64 bit value of the counter
  5379. */
  5380. val = ctx->ctx_pmds[i].val + (val & ovfl_val);
  5381. /*
  5382. * now everything is in ctx_pmds[] and we need
  5383. * to clear the saved context from save_regs() such that
  5384. * pfm_read_pmds() gets the correct value
  5385. */
  5386. pmd_val = 0UL;
  5387. /*
  5388. * take care of overflow inline
  5389. */
  5390. if (pmc0 & (1UL << i)) {
  5391. val += 1 + ovfl_val;
  5392. DPRINT(("[%d] pmd[%d] overflowed\n", task_pid_nr(task), i));
  5393. }
  5394. }
  5395. DPRINT(("[%d] ctx_pmd[%d]=0x%lx pmd_val=0x%lx\n", task_pid_nr(task), i, val, pmd_val));
  5396. if (is_self) ctx->th_pmds[i] = pmd_val;
  5397. ctx->ctx_pmds[i].val = val;
  5398. }
  5399. }
  5400. static struct irqaction perfmon_irqaction = {
  5401. .handler = pfm_interrupt_handler,
  5402. .flags = IRQF_DISABLED,
  5403. .name = "perfmon"
  5404. };
  5405. static void
  5406. pfm_alt_save_pmu_state(void *data)
  5407. {
  5408. struct pt_regs *regs;
  5409. regs = task_pt_regs(current);
  5410. DPRINT(("called\n"));
  5411. /*
  5412. * should not be necessary but
  5413. * let's take not risk
  5414. */
  5415. pfm_clear_psr_up();
  5416. pfm_clear_psr_pp();
  5417. ia64_psr(regs)->pp = 0;
  5418. /*
  5419. * This call is required
  5420. * May cause a spurious interrupt on some processors
  5421. */
  5422. pfm_freeze_pmu();
  5423. ia64_srlz_d();
  5424. }
  5425. void
  5426. pfm_alt_restore_pmu_state(void *data)
  5427. {
  5428. struct pt_regs *regs;
  5429. regs = task_pt_regs(current);
  5430. DPRINT(("called\n"));
  5431. /*
  5432. * put PMU back in state expected
  5433. * by perfmon
  5434. */
  5435. pfm_clear_psr_up();
  5436. pfm_clear_psr_pp();
  5437. ia64_psr(regs)->pp = 0;
  5438. /*
  5439. * perfmon runs with PMU unfrozen at all times
  5440. */
  5441. pfm_unfreeze_pmu();
  5442. ia64_srlz_d();
  5443. }
  5444. int
  5445. pfm_install_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl)
  5446. {
  5447. int ret, i;
  5448. int reserve_cpu;
  5449. /* some sanity checks */
  5450. if (hdl == NULL || hdl->handler == NULL) return -EINVAL;
  5451. /* do the easy test first */
  5452. if (pfm_alt_intr_handler) return -EBUSY;
  5453. /* one at a time in the install or remove, just fail the others */
  5454. if (!spin_trylock(&pfm_alt_install_check)) {
  5455. return -EBUSY;
  5456. }
  5457. /* reserve our session */
  5458. for_each_online_cpu(reserve_cpu) {
  5459. ret = pfm_reserve_session(NULL, 1, reserve_cpu);
  5460. if (ret) goto cleanup_reserve;
  5461. }
  5462. /* save the current system wide pmu states */
  5463. ret = on_each_cpu(pfm_alt_save_pmu_state, NULL, 0, 1);
  5464. if (ret) {
  5465. DPRINT(("on_each_cpu() failed: %d\n", ret));
  5466. goto cleanup_reserve;
  5467. }
  5468. /* officially change to the alternate interrupt handler */
  5469. pfm_alt_intr_handler = hdl;
  5470. spin_unlock(&pfm_alt_install_check);
  5471. return 0;
  5472. cleanup_reserve:
  5473. for_each_online_cpu(i) {
  5474. /* don't unreserve more than we reserved */
  5475. if (i >= reserve_cpu) break;
  5476. pfm_unreserve_session(NULL, 1, i);
  5477. }
  5478. spin_unlock(&pfm_alt_install_check);
  5479. return ret;
  5480. }
  5481. EXPORT_SYMBOL_GPL(pfm_install_alt_pmu_interrupt);
  5482. int
  5483. pfm_remove_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl)
  5484. {
  5485. int i;
  5486. int ret;
  5487. if (hdl == NULL) return -EINVAL;
  5488. /* cannot remove someone else's handler! */
  5489. if (pfm_alt_intr_handler != hdl) return -EINVAL;
  5490. /* one at a time in the install or remove, just fail the others */
  5491. if (!spin_trylock(&pfm_alt_install_check)) {
  5492. return -EBUSY;
  5493. }
  5494. pfm_alt_intr_handler = NULL;
  5495. ret = on_each_cpu(pfm_alt_restore_pmu_state, NULL, 0, 1);
  5496. if (ret) {
  5497. DPRINT(("on_each_cpu() failed: %d\n", ret));
  5498. }
  5499. for_each_online_cpu(i) {
  5500. pfm_unreserve_session(NULL, 1, i);
  5501. }
  5502. spin_unlock(&pfm_alt_install_check);
  5503. return 0;
  5504. }
  5505. EXPORT_SYMBOL_GPL(pfm_remove_alt_pmu_interrupt);
  5506. /*
  5507. * perfmon initialization routine, called from the initcall() table
  5508. */
  5509. static int init_pfm_fs(void);
  5510. static int __init
  5511. pfm_probe_pmu(void)
  5512. {
  5513. pmu_config_t **p;
  5514. int family;
  5515. family = local_cpu_data->family;
  5516. p = pmu_confs;
  5517. while(*p) {
  5518. if ((*p)->probe) {
  5519. if ((*p)->probe() == 0) goto found;
  5520. } else if ((*p)->pmu_family == family || (*p)->pmu_family == 0xff) {
  5521. goto found;
  5522. }
  5523. p++;
  5524. }
  5525. return -1;
  5526. found:
  5527. pmu_conf = *p;
  5528. return 0;
  5529. }
  5530. static const struct file_operations pfm_proc_fops = {
  5531. .open = pfm_proc_open,
  5532. .read = seq_read,
  5533. .llseek = seq_lseek,
  5534. .release = seq_release,
  5535. };
  5536. int __init
  5537. pfm_init(void)
  5538. {
  5539. unsigned int n, n_counters, i;
  5540. printk("perfmon: version %u.%u IRQ %u\n",
  5541. PFM_VERSION_MAJ,
  5542. PFM_VERSION_MIN,
  5543. IA64_PERFMON_VECTOR);
  5544. if (pfm_probe_pmu()) {
  5545. printk(KERN_INFO "perfmon: disabled, there is no support for processor family %d\n",
  5546. local_cpu_data->family);
  5547. return -ENODEV;
  5548. }
  5549. /*
  5550. * compute the number of implemented PMD/PMC from the
  5551. * description tables
  5552. */
  5553. n = 0;
  5554. for (i=0; PMC_IS_LAST(i) == 0; i++) {
  5555. if (PMC_IS_IMPL(i) == 0) continue;
  5556. pmu_conf->impl_pmcs[i>>6] |= 1UL << (i&63);
  5557. n++;
  5558. }
  5559. pmu_conf->num_pmcs = n;
  5560. n = 0; n_counters = 0;
  5561. for (i=0; PMD_IS_LAST(i) == 0; i++) {
  5562. if (PMD_IS_IMPL(i) == 0) continue;
  5563. pmu_conf->impl_pmds[i>>6] |= 1UL << (i&63);
  5564. n++;
  5565. if (PMD_IS_COUNTING(i)) n_counters++;
  5566. }
  5567. pmu_conf->num_pmds = n;
  5568. pmu_conf->num_counters = n_counters;
  5569. /*
  5570. * sanity checks on the number of debug registers
  5571. */
  5572. if (pmu_conf->use_rr_dbregs) {
  5573. if (pmu_conf->num_ibrs > IA64_NUM_DBG_REGS) {
  5574. printk(KERN_INFO "perfmon: unsupported number of code debug registers (%u)\n", pmu_conf->num_ibrs);
  5575. pmu_conf = NULL;
  5576. return -1;
  5577. }
  5578. if (pmu_conf->num_dbrs > IA64_NUM_DBG_REGS) {
  5579. printk(KERN_INFO "perfmon: unsupported number of data debug registers (%u)\n", pmu_conf->num_ibrs);
  5580. pmu_conf = NULL;
  5581. return -1;
  5582. }
  5583. }
  5584. printk("perfmon: %s PMU detected, %u PMCs, %u PMDs, %u counters (%lu bits)\n",
  5585. pmu_conf->pmu_name,
  5586. pmu_conf->num_pmcs,
  5587. pmu_conf->num_pmds,
  5588. pmu_conf->num_counters,
  5589. ffz(pmu_conf->ovfl_val));
  5590. /* sanity check */
  5591. if (pmu_conf->num_pmds >= PFM_NUM_PMD_REGS || pmu_conf->num_pmcs >= PFM_NUM_PMC_REGS) {
  5592. printk(KERN_ERR "perfmon: not enough pmc/pmd, perfmon disabled\n");
  5593. pmu_conf = NULL;
  5594. return -1;
  5595. }
  5596. /*
  5597. * create /proc/perfmon (mostly for debugging purposes)
  5598. */
  5599. perfmon_dir = create_proc_entry("perfmon", S_IRUGO, NULL);
  5600. if (perfmon_dir == NULL) {
  5601. printk(KERN_ERR "perfmon: cannot create /proc entry, perfmon disabled\n");
  5602. pmu_conf = NULL;
  5603. return -1;
  5604. }
  5605. /*
  5606. * install customized file operations for /proc/perfmon entry
  5607. */
  5608. perfmon_dir->proc_fops = &pfm_proc_fops;
  5609. /*
  5610. * create /proc/sys/kernel/perfmon (for debugging purposes)
  5611. */
  5612. pfm_sysctl_header = register_sysctl_table(pfm_sysctl_root);
  5613. /*
  5614. * initialize all our spinlocks
  5615. */
  5616. spin_lock_init(&pfm_sessions.pfs_lock);
  5617. spin_lock_init(&pfm_buffer_fmt_lock);
  5618. init_pfm_fs();
  5619. for(i=0; i < NR_CPUS; i++) pfm_stats[i].pfm_ovfl_intr_cycles_min = ~0UL;
  5620. return 0;
  5621. }
  5622. __initcall(pfm_init);
  5623. /*
  5624. * this function is called before pfm_init()
  5625. */
  5626. void
  5627. pfm_init_percpu (void)
  5628. {
  5629. static int first_time=1;
  5630. /*
  5631. * make sure no measurement is active
  5632. * (may inherit programmed PMCs from EFI).
  5633. */
  5634. pfm_clear_psr_pp();
  5635. pfm_clear_psr_up();
  5636. /*
  5637. * we run with the PMU not frozen at all times
  5638. */
  5639. pfm_unfreeze_pmu();
  5640. if (first_time) {
  5641. register_percpu_irq(IA64_PERFMON_VECTOR, &perfmon_irqaction);
  5642. first_time=0;
  5643. }
  5644. ia64_setreg(_IA64_REG_CR_PMV, IA64_PERFMON_VECTOR);
  5645. ia64_srlz_d();
  5646. }
  5647. /*
  5648. * used for debug purposes only
  5649. */
  5650. void
  5651. dump_pmu_state(const char *from)
  5652. {
  5653. struct task_struct *task;
  5654. struct pt_regs *regs;
  5655. pfm_context_t *ctx;
  5656. unsigned long psr, dcr, info, flags;
  5657. int i, this_cpu;
  5658. local_irq_save(flags);
  5659. this_cpu = smp_processor_id();
  5660. regs = task_pt_regs(current);
  5661. info = PFM_CPUINFO_GET();
  5662. dcr = ia64_getreg(_IA64_REG_CR_DCR);
  5663. if (info == 0 && ia64_psr(regs)->pp == 0 && (dcr & IA64_DCR_PP) == 0) {
  5664. local_irq_restore(flags);
  5665. return;
  5666. }
  5667. printk("CPU%d from %s() current [%d] iip=0x%lx %s\n",
  5668. this_cpu,
  5669. from,
  5670. task_pid_nr(current),
  5671. regs->cr_iip,
  5672. current->comm);
  5673. task = GET_PMU_OWNER();
  5674. ctx = GET_PMU_CTX();
  5675. printk("->CPU%d owner [%d] ctx=%p\n", this_cpu, task ? task_pid_nr(task) : -1, ctx);
  5676. psr = pfm_get_psr();
  5677. printk("->CPU%d pmc0=0x%lx psr.pp=%d psr.up=%d dcr.pp=%d syst_info=0x%lx user_psr.up=%d user_psr.pp=%d\n",
  5678. this_cpu,
  5679. ia64_get_pmc(0),
  5680. psr & IA64_PSR_PP ? 1 : 0,
  5681. psr & IA64_PSR_UP ? 1 : 0,
  5682. dcr & IA64_DCR_PP ? 1 : 0,
  5683. info,
  5684. ia64_psr(regs)->up,
  5685. ia64_psr(regs)->pp);
  5686. ia64_psr(regs)->up = 0;
  5687. ia64_psr(regs)->pp = 0;
  5688. for (i=1; PMC_IS_LAST(i) == 0; i++) {
  5689. if (PMC_IS_IMPL(i) == 0) continue;
  5690. printk("->CPU%d pmc[%d]=0x%lx thread_pmc[%d]=0x%lx\n", this_cpu, i, ia64_get_pmc(i), i, ctx->th_pmcs[i]);
  5691. }
  5692. for (i=1; PMD_IS_LAST(i) == 0; i++) {
  5693. if (PMD_IS_IMPL(i) == 0) continue;
  5694. printk("->CPU%d pmd[%d]=0x%lx thread_pmd[%d]=0x%lx\n", this_cpu, i, ia64_get_pmd(i), i, ctx->th_pmds[i]);
  5695. }
  5696. if (ctx) {
  5697. printk("->CPU%d ctx_state=%d vaddr=%p addr=%p fd=%d ctx_task=[%d] saved_psr_up=0x%lx\n",
  5698. this_cpu,
  5699. ctx->ctx_state,
  5700. ctx->ctx_smpl_vaddr,
  5701. ctx->ctx_smpl_hdr,
  5702. ctx->ctx_msgq_head,
  5703. ctx->ctx_msgq_tail,
  5704. ctx->ctx_saved_psr_up);
  5705. }
  5706. local_irq_restore(flags);
  5707. }
  5708. /*
  5709. * called from process.c:copy_thread(). task is new child.
  5710. */
  5711. void
  5712. pfm_inherit(struct task_struct *task, struct pt_regs *regs)
  5713. {
  5714. struct thread_struct *thread;
  5715. DPRINT(("perfmon: pfm_inherit clearing state for [%d]\n", task_pid_nr(task)));
  5716. thread = &task->thread;
  5717. /*
  5718. * cut links inherited from parent (current)
  5719. */
  5720. thread->pfm_context = NULL;
  5721. PFM_SET_WORK_PENDING(task, 0);
  5722. /*
  5723. * the psr bits are already set properly in copy_threads()
  5724. */
  5725. }
  5726. #else /* !CONFIG_PERFMON */
  5727. asmlinkage long
  5728. sys_perfmonctl (int fd, int cmd, void *arg, int count)
  5729. {
  5730. return -ENOSYS;
  5731. }
  5732. #endif /* CONFIG_PERFMON */