cik.c 232 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047704870497050705170527053705470557056705770587059706070617062706370647065706670677068706970707071707270737074707570767077707870797080708170827083708470857086708770887089709070917092709370947095709670977098709971007101710271037104710571067107710871097110711171127113711471157116711771187119712071217122712371247125712671277128712971307131713271337134713571367137713871397140714171427143714471457146714771487149715071517152715371547155715671577158715971607161716271637164716571667167716871697170717171727173717471757176717771787179718071817182718371847185718671877188718971907191719271937194719571967197719871997200720172027203720472057206720772087209721072117212721372147215721672177218721972207221722272237224722572267227722872297230723172327233723472357236723772387239724072417242724372447245724672477248724972507251725272537254725572567257725872597260726172627263726472657266726772687269727072717272727372747275727672777278727972807281728272837284728572867287728872897290729172927293729472957296729772987299730073017302730373047305730673077308730973107311731273137314731573167317731873197320732173227323732473257326732773287329733073317332733373347335733673377338733973407341734273437344734573467347734873497350735173527353735473557356735773587359736073617362736373647365736673677368736973707371737273737374737573767377737873797380738173827383738473857386738773887389739073917392739373947395739673977398739974007401740274037404740574067407740874097410741174127413741474157416741774187419742074217422742374247425742674277428742974307431743274337434743574367437743874397440744174427443744474457446744774487449745074517452745374547455745674577458745974607461746274637464746574667467746874697470747174727473747474757476747774787479748074817482748374847485748674877488748974907491749274937494749574967497749874997500750175027503750475057506750775087509751075117512751375147515751675177518751975207521752275237524752575267527752875297530753175327533753475357536753775387539754075417542754375447545754675477548754975507551755275537554755575567557755875597560756175627563756475657566756775687569757075717572757375747575757675777578757975807581758275837584758575867587758875897590759175927593759475957596759775987599760076017602760376047605760676077608760976107611761276137614761576167617761876197620762176227623762476257626762776287629763076317632763376347635763676377638763976407641764276437644764576467647764876497650765176527653765476557656765776587659766076617662766376647665766676677668766976707671767276737674767576767677767876797680768176827683768476857686768776887689769076917692769376947695769676977698769977007701770277037704770577067707770877097710771177127713771477157716771777187719772077217722772377247725772677277728772977307731773277337734773577367737773877397740774177427743774477457746774777487749775077517752775377547755775677577758775977607761776277637764776577667767776877697770777177727773777477757776777777787779778077817782778377847785778677877788778977907791779277937794779577967797779877997800780178027803780478057806780778087809781078117812781378147815781678177818781978207821782278237824782578267827782878297830783178327833783478357836783778387839784078417842784378447845784678477848784978507851785278537854785578567857785878597860786178627863786478657866786778687869787078717872787378747875787678777878787978807881788278837884788578867887788878897890789178927893789478957896789778987899790079017902790379047905790679077908790979107911791279137914791579167917791879197920792179227923792479257926792779287929793079317932793379347935793679377938793979407941794279437944794579467947794879497950795179527953795479557956795779587959796079617962796379647965796679677968796979707971797279737974797579767977797879797980798179827983798479857986798779887989799079917992799379947995799679977998799980008001800280038004800580068007800880098010801180128013801480158016801780188019802080218022802380248025802680278028802980308031803280338034803580368037803880398040804180428043804480458046804780488049805080518052805380548055805680578058805980608061806280638064806580668067806880698070807180728073807480758076807780788079808080818082808380848085808680878088808980908091809280938094809580968097809880998100810181028103810481058106810781088109811081118112811381148115811681178118811981208121812281238124812581268127812881298130813181328133813481358136813781388139814081418142814381448145814681478148814981508151815281538154815581568157815881598160816181628163816481658166816781688169817081718172817381748175817681778178817981808181818281838184818581868187818881898190819181928193819481958196819781988199820082018202820382048205820682078208820982108211821282138214821582168217821882198220822182228223822482258226822782288229823082318232823382348235823682378238823982408241824282438244824582468247824882498250825182528253825482558256825782588259826082618262826382648265826682678268826982708271827282738274827582768277827882798280828182828283828482858286828782888289829082918292829382948295829682978298829983008301830283038304830583068307830883098310831183128313831483158316831783188319832083218322832383248325832683278328832983308331833283338334833583368337833883398340834183428343834483458346834783488349835083518352835383548355835683578358835983608361836283638364836583668367836883698370837183728373837483758376837783788379838083818382838383848385838683878388
  1. /*
  2. * Copyright 2012 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. * Authors: Alex Deucher
  23. */
  24. #include <linux/firmware.h>
  25. #include <linux/slab.h>
  26. #include <linux/module.h>
  27. #include "drmP.h"
  28. #include "radeon.h"
  29. #include "radeon_asic.h"
  30. #include "cikd.h"
  31. #include "atom.h"
  32. #include "cik_blit_shaders.h"
  33. #include "radeon_ucode.h"
  34. #include "clearstate_ci.h"
  35. MODULE_FIRMWARE("radeon/BONAIRE_pfp.bin");
  36. MODULE_FIRMWARE("radeon/BONAIRE_me.bin");
  37. MODULE_FIRMWARE("radeon/BONAIRE_ce.bin");
  38. MODULE_FIRMWARE("radeon/BONAIRE_mec.bin");
  39. MODULE_FIRMWARE("radeon/BONAIRE_mc.bin");
  40. MODULE_FIRMWARE("radeon/BONAIRE_rlc.bin");
  41. MODULE_FIRMWARE("radeon/BONAIRE_sdma.bin");
  42. MODULE_FIRMWARE("radeon/BONAIRE_smc.bin");
  43. MODULE_FIRMWARE("radeon/KAVERI_pfp.bin");
  44. MODULE_FIRMWARE("radeon/KAVERI_me.bin");
  45. MODULE_FIRMWARE("radeon/KAVERI_ce.bin");
  46. MODULE_FIRMWARE("radeon/KAVERI_mec.bin");
  47. MODULE_FIRMWARE("radeon/KAVERI_rlc.bin");
  48. MODULE_FIRMWARE("radeon/KAVERI_sdma.bin");
  49. MODULE_FIRMWARE("radeon/KABINI_pfp.bin");
  50. MODULE_FIRMWARE("radeon/KABINI_me.bin");
  51. MODULE_FIRMWARE("radeon/KABINI_ce.bin");
  52. MODULE_FIRMWARE("radeon/KABINI_mec.bin");
  53. MODULE_FIRMWARE("radeon/KABINI_rlc.bin");
  54. MODULE_FIRMWARE("radeon/KABINI_sdma.bin");
  55. extern int r600_ih_ring_alloc(struct radeon_device *rdev);
  56. extern void r600_ih_ring_fini(struct radeon_device *rdev);
  57. extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save);
  58. extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save);
  59. extern bool evergreen_is_display_hung(struct radeon_device *rdev);
  60. extern void sumo_rlc_fini(struct radeon_device *rdev);
  61. extern int sumo_rlc_init(struct radeon_device *rdev);
  62. extern void si_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
  63. extern void si_rlc_reset(struct radeon_device *rdev);
  64. extern void si_init_uvd_internal_cg(struct radeon_device *rdev);
  65. extern int cik_sdma_resume(struct radeon_device *rdev);
  66. extern void cik_sdma_enable(struct radeon_device *rdev, bool enable);
  67. extern void cik_sdma_fini(struct radeon_device *rdev);
  68. extern void cik_sdma_vm_set_page(struct radeon_device *rdev,
  69. struct radeon_ib *ib,
  70. uint64_t pe,
  71. uint64_t addr, unsigned count,
  72. uint32_t incr, uint32_t flags);
  73. static void cik_rlc_stop(struct radeon_device *rdev);
  74. static void cik_pcie_gen3_enable(struct radeon_device *rdev);
  75. static void cik_program_aspm(struct radeon_device *rdev);
  76. static void cik_init_pg(struct radeon_device *rdev);
  77. static void cik_init_cg(struct radeon_device *rdev);
  78. static void cik_enable_gui_idle_interrupt(struct radeon_device *rdev,
  79. bool enable);
  80. /* get temperature in millidegrees */
  81. int ci_get_temp(struct radeon_device *rdev)
  82. {
  83. u32 temp;
  84. int actual_temp = 0;
  85. temp = (RREG32_SMC(CG_MULT_THERMAL_STATUS) & CTF_TEMP_MASK) >>
  86. CTF_TEMP_SHIFT;
  87. if (temp & 0x200)
  88. actual_temp = 255;
  89. else
  90. actual_temp = temp & 0x1ff;
  91. actual_temp = actual_temp * 1000;
  92. return actual_temp;
  93. }
  94. /* get temperature in millidegrees */
  95. int kv_get_temp(struct radeon_device *rdev)
  96. {
  97. u32 temp;
  98. int actual_temp = 0;
  99. temp = RREG32_SMC(0xC0300E0C);
  100. if (temp)
  101. actual_temp = (temp / 8) - 49;
  102. else
  103. actual_temp = 0;
  104. actual_temp = actual_temp * 1000;
  105. return actual_temp;
  106. }
  107. /*
  108. * Indirect registers accessor
  109. */
  110. u32 cik_pciep_rreg(struct radeon_device *rdev, u32 reg)
  111. {
  112. unsigned long flags;
  113. u32 r;
  114. spin_lock_irqsave(&rdev->pciep_idx_lock, flags);
  115. WREG32(PCIE_INDEX, reg);
  116. (void)RREG32(PCIE_INDEX);
  117. r = RREG32(PCIE_DATA);
  118. spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags);
  119. return r;
  120. }
  121. void cik_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
  122. {
  123. unsigned long flags;
  124. spin_lock_irqsave(&rdev->pciep_idx_lock, flags);
  125. WREG32(PCIE_INDEX, reg);
  126. (void)RREG32(PCIE_INDEX);
  127. WREG32(PCIE_DATA, v);
  128. (void)RREG32(PCIE_DATA);
  129. spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags);
  130. }
  131. static const u32 spectre_rlc_save_restore_register_list[] =
  132. {
  133. (0x0e00 << 16) | (0xc12c >> 2),
  134. 0x00000000,
  135. (0x0e00 << 16) | (0xc140 >> 2),
  136. 0x00000000,
  137. (0x0e00 << 16) | (0xc150 >> 2),
  138. 0x00000000,
  139. (0x0e00 << 16) | (0xc15c >> 2),
  140. 0x00000000,
  141. (0x0e00 << 16) | (0xc168 >> 2),
  142. 0x00000000,
  143. (0x0e00 << 16) | (0xc170 >> 2),
  144. 0x00000000,
  145. (0x0e00 << 16) | (0xc178 >> 2),
  146. 0x00000000,
  147. (0x0e00 << 16) | (0xc204 >> 2),
  148. 0x00000000,
  149. (0x0e00 << 16) | (0xc2b4 >> 2),
  150. 0x00000000,
  151. (0x0e00 << 16) | (0xc2b8 >> 2),
  152. 0x00000000,
  153. (0x0e00 << 16) | (0xc2bc >> 2),
  154. 0x00000000,
  155. (0x0e00 << 16) | (0xc2c0 >> 2),
  156. 0x00000000,
  157. (0x0e00 << 16) | (0x8228 >> 2),
  158. 0x00000000,
  159. (0x0e00 << 16) | (0x829c >> 2),
  160. 0x00000000,
  161. (0x0e00 << 16) | (0x869c >> 2),
  162. 0x00000000,
  163. (0x0600 << 16) | (0x98f4 >> 2),
  164. 0x00000000,
  165. (0x0e00 << 16) | (0x98f8 >> 2),
  166. 0x00000000,
  167. (0x0e00 << 16) | (0x9900 >> 2),
  168. 0x00000000,
  169. (0x0e00 << 16) | (0xc260 >> 2),
  170. 0x00000000,
  171. (0x0e00 << 16) | (0x90e8 >> 2),
  172. 0x00000000,
  173. (0x0e00 << 16) | (0x3c000 >> 2),
  174. 0x00000000,
  175. (0x0e00 << 16) | (0x3c00c >> 2),
  176. 0x00000000,
  177. (0x0e00 << 16) | (0x8c1c >> 2),
  178. 0x00000000,
  179. (0x0e00 << 16) | (0x9700 >> 2),
  180. 0x00000000,
  181. (0x0e00 << 16) | (0xcd20 >> 2),
  182. 0x00000000,
  183. (0x4e00 << 16) | (0xcd20 >> 2),
  184. 0x00000000,
  185. (0x5e00 << 16) | (0xcd20 >> 2),
  186. 0x00000000,
  187. (0x6e00 << 16) | (0xcd20 >> 2),
  188. 0x00000000,
  189. (0x7e00 << 16) | (0xcd20 >> 2),
  190. 0x00000000,
  191. (0x8e00 << 16) | (0xcd20 >> 2),
  192. 0x00000000,
  193. (0x9e00 << 16) | (0xcd20 >> 2),
  194. 0x00000000,
  195. (0xae00 << 16) | (0xcd20 >> 2),
  196. 0x00000000,
  197. (0xbe00 << 16) | (0xcd20 >> 2),
  198. 0x00000000,
  199. (0x0e00 << 16) | (0x89bc >> 2),
  200. 0x00000000,
  201. (0x0e00 << 16) | (0x8900 >> 2),
  202. 0x00000000,
  203. 0x3,
  204. (0x0e00 << 16) | (0xc130 >> 2),
  205. 0x00000000,
  206. (0x0e00 << 16) | (0xc134 >> 2),
  207. 0x00000000,
  208. (0x0e00 << 16) | (0xc1fc >> 2),
  209. 0x00000000,
  210. (0x0e00 << 16) | (0xc208 >> 2),
  211. 0x00000000,
  212. (0x0e00 << 16) | (0xc264 >> 2),
  213. 0x00000000,
  214. (0x0e00 << 16) | (0xc268 >> 2),
  215. 0x00000000,
  216. (0x0e00 << 16) | (0xc26c >> 2),
  217. 0x00000000,
  218. (0x0e00 << 16) | (0xc270 >> 2),
  219. 0x00000000,
  220. (0x0e00 << 16) | (0xc274 >> 2),
  221. 0x00000000,
  222. (0x0e00 << 16) | (0xc278 >> 2),
  223. 0x00000000,
  224. (0x0e00 << 16) | (0xc27c >> 2),
  225. 0x00000000,
  226. (0x0e00 << 16) | (0xc280 >> 2),
  227. 0x00000000,
  228. (0x0e00 << 16) | (0xc284 >> 2),
  229. 0x00000000,
  230. (0x0e00 << 16) | (0xc288 >> 2),
  231. 0x00000000,
  232. (0x0e00 << 16) | (0xc28c >> 2),
  233. 0x00000000,
  234. (0x0e00 << 16) | (0xc290 >> 2),
  235. 0x00000000,
  236. (0x0e00 << 16) | (0xc294 >> 2),
  237. 0x00000000,
  238. (0x0e00 << 16) | (0xc298 >> 2),
  239. 0x00000000,
  240. (0x0e00 << 16) | (0xc29c >> 2),
  241. 0x00000000,
  242. (0x0e00 << 16) | (0xc2a0 >> 2),
  243. 0x00000000,
  244. (0x0e00 << 16) | (0xc2a4 >> 2),
  245. 0x00000000,
  246. (0x0e00 << 16) | (0xc2a8 >> 2),
  247. 0x00000000,
  248. (0x0e00 << 16) | (0xc2ac >> 2),
  249. 0x00000000,
  250. (0x0e00 << 16) | (0xc2b0 >> 2),
  251. 0x00000000,
  252. (0x0e00 << 16) | (0x301d0 >> 2),
  253. 0x00000000,
  254. (0x0e00 << 16) | (0x30238 >> 2),
  255. 0x00000000,
  256. (0x0e00 << 16) | (0x30250 >> 2),
  257. 0x00000000,
  258. (0x0e00 << 16) | (0x30254 >> 2),
  259. 0x00000000,
  260. (0x0e00 << 16) | (0x30258 >> 2),
  261. 0x00000000,
  262. (0x0e00 << 16) | (0x3025c >> 2),
  263. 0x00000000,
  264. (0x4e00 << 16) | (0xc900 >> 2),
  265. 0x00000000,
  266. (0x5e00 << 16) | (0xc900 >> 2),
  267. 0x00000000,
  268. (0x6e00 << 16) | (0xc900 >> 2),
  269. 0x00000000,
  270. (0x7e00 << 16) | (0xc900 >> 2),
  271. 0x00000000,
  272. (0x8e00 << 16) | (0xc900 >> 2),
  273. 0x00000000,
  274. (0x9e00 << 16) | (0xc900 >> 2),
  275. 0x00000000,
  276. (0xae00 << 16) | (0xc900 >> 2),
  277. 0x00000000,
  278. (0xbe00 << 16) | (0xc900 >> 2),
  279. 0x00000000,
  280. (0x4e00 << 16) | (0xc904 >> 2),
  281. 0x00000000,
  282. (0x5e00 << 16) | (0xc904 >> 2),
  283. 0x00000000,
  284. (0x6e00 << 16) | (0xc904 >> 2),
  285. 0x00000000,
  286. (0x7e00 << 16) | (0xc904 >> 2),
  287. 0x00000000,
  288. (0x8e00 << 16) | (0xc904 >> 2),
  289. 0x00000000,
  290. (0x9e00 << 16) | (0xc904 >> 2),
  291. 0x00000000,
  292. (0xae00 << 16) | (0xc904 >> 2),
  293. 0x00000000,
  294. (0xbe00 << 16) | (0xc904 >> 2),
  295. 0x00000000,
  296. (0x4e00 << 16) | (0xc908 >> 2),
  297. 0x00000000,
  298. (0x5e00 << 16) | (0xc908 >> 2),
  299. 0x00000000,
  300. (0x6e00 << 16) | (0xc908 >> 2),
  301. 0x00000000,
  302. (0x7e00 << 16) | (0xc908 >> 2),
  303. 0x00000000,
  304. (0x8e00 << 16) | (0xc908 >> 2),
  305. 0x00000000,
  306. (0x9e00 << 16) | (0xc908 >> 2),
  307. 0x00000000,
  308. (0xae00 << 16) | (0xc908 >> 2),
  309. 0x00000000,
  310. (0xbe00 << 16) | (0xc908 >> 2),
  311. 0x00000000,
  312. (0x4e00 << 16) | (0xc90c >> 2),
  313. 0x00000000,
  314. (0x5e00 << 16) | (0xc90c >> 2),
  315. 0x00000000,
  316. (0x6e00 << 16) | (0xc90c >> 2),
  317. 0x00000000,
  318. (0x7e00 << 16) | (0xc90c >> 2),
  319. 0x00000000,
  320. (0x8e00 << 16) | (0xc90c >> 2),
  321. 0x00000000,
  322. (0x9e00 << 16) | (0xc90c >> 2),
  323. 0x00000000,
  324. (0xae00 << 16) | (0xc90c >> 2),
  325. 0x00000000,
  326. (0xbe00 << 16) | (0xc90c >> 2),
  327. 0x00000000,
  328. (0x4e00 << 16) | (0xc910 >> 2),
  329. 0x00000000,
  330. (0x5e00 << 16) | (0xc910 >> 2),
  331. 0x00000000,
  332. (0x6e00 << 16) | (0xc910 >> 2),
  333. 0x00000000,
  334. (0x7e00 << 16) | (0xc910 >> 2),
  335. 0x00000000,
  336. (0x8e00 << 16) | (0xc910 >> 2),
  337. 0x00000000,
  338. (0x9e00 << 16) | (0xc910 >> 2),
  339. 0x00000000,
  340. (0xae00 << 16) | (0xc910 >> 2),
  341. 0x00000000,
  342. (0xbe00 << 16) | (0xc910 >> 2),
  343. 0x00000000,
  344. (0x0e00 << 16) | (0xc99c >> 2),
  345. 0x00000000,
  346. (0x0e00 << 16) | (0x9834 >> 2),
  347. 0x00000000,
  348. (0x0000 << 16) | (0x30f00 >> 2),
  349. 0x00000000,
  350. (0x0001 << 16) | (0x30f00 >> 2),
  351. 0x00000000,
  352. (0x0000 << 16) | (0x30f04 >> 2),
  353. 0x00000000,
  354. (0x0001 << 16) | (0x30f04 >> 2),
  355. 0x00000000,
  356. (0x0000 << 16) | (0x30f08 >> 2),
  357. 0x00000000,
  358. (0x0001 << 16) | (0x30f08 >> 2),
  359. 0x00000000,
  360. (0x0000 << 16) | (0x30f0c >> 2),
  361. 0x00000000,
  362. (0x0001 << 16) | (0x30f0c >> 2),
  363. 0x00000000,
  364. (0x0600 << 16) | (0x9b7c >> 2),
  365. 0x00000000,
  366. (0x0e00 << 16) | (0x8a14 >> 2),
  367. 0x00000000,
  368. (0x0e00 << 16) | (0x8a18 >> 2),
  369. 0x00000000,
  370. (0x0600 << 16) | (0x30a00 >> 2),
  371. 0x00000000,
  372. (0x0e00 << 16) | (0x8bf0 >> 2),
  373. 0x00000000,
  374. (0x0e00 << 16) | (0x8bcc >> 2),
  375. 0x00000000,
  376. (0x0e00 << 16) | (0x8b24 >> 2),
  377. 0x00000000,
  378. (0x0e00 << 16) | (0x30a04 >> 2),
  379. 0x00000000,
  380. (0x0600 << 16) | (0x30a10 >> 2),
  381. 0x00000000,
  382. (0x0600 << 16) | (0x30a14 >> 2),
  383. 0x00000000,
  384. (0x0600 << 16) | (0x30a18 >> 2),
  385. 0x00000000,
  386. (0x0600 << 16) | (0x30a2c >> 2),
  387. 0x00000000,
  388. (0x0e00 << 16) | (0xc700 >> 2),
  389. 0x00000000,
  390. (0x0e00 << 16) | (0xc704 >> 2),
  391. 0x00000000,
  392. (0x0e00 << 16) | (0xc708 >> 2),
  393. 0x00000000,
  394. (0x0e00 << 16) | (0xc768 >> 2),
  395. 0x00000000,
  396. (0x0400 << 16) | (0xc770 >> 2),
  397. 0x00000000,
  398. (0x0400 << 16) | (0xc774 >> 2),
  399. 0x00000000,
  400. (0x0400 << 16) | (0xc778 >> 2),
  401. 0x00000000,
  402. (0x0400 << 16) | (0xc77c >> 2),
  403. 0x00000000,
  404. (0x0400 << 16) | (0xc780 >> 2),
  405. 0x00000000,
  406. (0x0400 << 16) | (0xc784 >> 2),
  407. 0x00000000,
  408. (0x0400 << 16) | (0xc788 >> 2),
  409. 0x00000000,
  410. (0x0400 << 16) | (0xc78c >> 2),
  411. 0x00000000,
  412. (0x0400 << 16) | (0xc798 >> 2),
  413. 0x00000000,
  414. (0x0400 << 16) | (0xc79c >> 2),
  415. 0x00000000,
  416. (0x0400 << 16) | (0xc7a0 >> 2),
  417. 0x00000000,
  418. (0x0400 << 16) | (0xc7a4 >> 2),
  419. 0x00000000,
  420. (0x0400 << 16) | (0xc7a8 >> 2),
  421. 0x00000000,
  422. (0x0400 << 16) | (0xc7ac >> 2),
  423. 0x00000000,
  424. (0x0400 << 16) | (0xc7b0 >> 2),
  425. 0x00000000,
  426. (0x0400 << 16) | (0xc7b4 >> 2),
  427. 0x00000000,
  428. (0x0e00 << 16) | (0x9100 >> 2),
  429. 0x00000000,
  430. (0x0e00 << 16) | (0x3c010 >> 2),
  431. 0x00000000,
  432. (0x0e00 << 16) | (0x92a8 >> 2),
  433. 0x00000000,
  434. (0x0e00 << 16) | (0x92ac >> 2),
  435. 0x00000000,
  436. (0x0e00 << 16) | (0x92b4 >> 2),
  437. 0x00000000,
  438. (0x0e00 << 16) | (0x92b8 >> 2),
  439. 0x00000000,
  440. (0x0e00 << 16) | (0x92bc >> 2),
  441. 0x00000000,
  442. (0x0e00 << 16) | (0x92c0 >> 2),
  443. 0x00000000,
  444. (0x0e00 << 16) | (0x92c4 >> 2),
  445. 0x00000000,
  446. (0x0e00 << 16) | (0x92c8 >> 2),
  447. 0x00000000,
  448. (0x0e00 << 16) | (0x92cc >> 2),
  449. 0x00000000,
  450. (0x0e00 << 16) | (0x92d0 >> 2),
  451. 0x00000000,
  452. (0x0e00 << 16) | (0x8c00 >> 2),
  453. 0x00000000,
  454. (0x0e00 << 16) | (0x8c04 >> 2),
  455. 0x00000000,
  456. (0x0e00 << 16) | (0x8c20 >> 2),
  457. 0x00000000,
  458. (0x0e00 << 16) | (0x8c38 >> 2),
  459. 0x00000000,
  460. (0x0e00 << 16) | (0x8c3c >> 2),
  461. 0x00000000,
  462. (0x0e00 << 16) | (0xae00 >> 2),
  463. 0x00000000,
  464. (0x0e00 << 16) | (0x9604 >> 2),
  465. 0x00000000,
  466. (0x0e00 << 16) | (0xac08 >> 2),
  467. 0x00000000,
  468. (0x0e00 << 16) | (0xac0c >> 2),
  469. 0x00000000,
  470. (0x0e00 << 16) | (0xac10 >> 2),
  471. 0x00000000,
  472. (0x0e00 << 16) | (0xac14 >> 2),
  473. 0x00000000,
  474. (0x0e00 << 16) | (0xac58 >> 2),
  475. 0x00000000,
  476. (0x0e00 << 16) | (0xac68 >> 2),
  477. 0x00000000,
  478. (0x0e00 << 16) | (0xac6c >> 2),
  479. 0x00000000,
  480. (0x0e00 << 16) | (0xac70 >> 2),
  481. 0x00000000,
  482. (0x0e00 << 16) | (0xac74 >> 2),
  483. 0x00000000,
  484. (0x0e00 << 16) | (0xac78 >> 2),
  485. 0x00000000,
  486. (0x0e00 << 16) | (0xac7c >> 2),
  487. 0x00000000,
  488. (0x0e00 << 16) | (0xac80 >> 2),
  489. 0x00000000,
  490. (0x0e00 << 16) | (0xac84 >> 2),
  491. 0x00000000,
  492. (0x0e00 << 16) | (0xac88 >> 2),
  493. 0x00000000,
  494. (0x0e00 << 16) | (0xac8c >> 2),
  495. 0x00000000,
  496. (0x0e00 << 16) | (0x970c >> 2),
  497. 0x00000000,
  498. (0x0e00 << 16) | (0x9714 >> 2),
  499. 0x00000000,
  500. (0x0e00 << 16) | (0x9718 >> 2),
  501. 0x00000000,
  502. (0x0e00 << 16) | (0x971c >> 2),
  503. 0x00000000,
  504. (0x0e00 << 16) | (0x31068 >> 2),
  505. 0x00000000,
  506. (0x4e00 << 16) | (0x31068 >> 2),
  507. 0x00000000,
  508. (0x5e00 << 16) | (0x31068 >> 2),
  509. 0x00000000,
  510. (0x6e00 << 16) | (0x31068 >> 2),
  511. 0x00000000,
  512. (0x7e00 << 16) | (0x31068 >> 2),
  513. 0x00000000,
  514. (0x8e00 << 16) | (0x31068 >> 2),
  515. 0x00000000,
  516. (0x9e00 << 16) | (0x31068 >> 2),
  517. 0x00000000,
  518. (0xae00 << 16) | (0x31068 >> 2),
  519. 0x00000000,
  520. (0xbe00 << 16) | (0x31068 >> 2),
  521. 0x00000000,
  522. (0x0e00 << 16) | (0xcd10 >> 2),
  523. 0x00000000,
  524. (0x0e00 << 16) | (0xcd14 >> 2),
  525. 0x00000000,
  526. (0x0e00 << 16) | (0x88b0 >> 2),
  527. 0x00000000,
  528. (0x0e00 << 16) | (0x88b4 >> 2),
  529. 0x00000000,
  530. (0x0e00 << 16) | (0x88b8 >> 2),
  531. 0x00000000,
  532. (0x0e00 << 16) | (0x88bc >> 2),
  533. 0x00000000,
  534. (0x0400 << 16) | (0x89c0 >> 2),
  535. 0x00000000,
  536. (0x0e00 << 16) | (0x88c4 >> 2),
  537. 0x00000000,
  538. (0x0e00 << 16) | (0x88c8 >> 2),
  539. 0x00000000,
  540. (0x0e00 << 16) | (0x88d0 >> 2),
  541. 0x00000000,
  542. (0x0e00 << 16) | (0x88d4 >> 2),
  543. 0x00000000,
  544. (0x0e00 << 16) | (0x88d8 >> 2),
  545. 0x00000000,
  546. (0x0e00 << 16) | (0x8980 >> 2),
  547. 0x00000000,
  548. (0x0e00 << 16) | (0x30938 >> 2),
  549. 0x00000000,
  550. (0x0e00 << 16) | (0x3093c >> 2),
  551. 0x00000000,
  552. (0x0e00 << 16) | (0x30940 >> 2),
  553. 0x00000000,
  554. (0x0e00 << 16) | (0x89a0 >> 2),
  555. 0x00000000,
  556. (0x0e00 << 16) | (0x30900 >> 2),
  557. 0x00000000,
  558. (0x0e00 << 16) | (0x30904 >> 2),
  559. 0x00000000,
  560. (0x0e00 << 16) | (0x89b4 >> 2),
  561. 0x00000000,
  562. (0x0e00 << 16) | (0x3c210 >> 2),
  563. 0x00000000,
  564. (0x0e00 << 16) | (0x3c214 >> 2),
  565. 0x00000000,
  566. (0x0e00 << 16) | (0x3c218 >> 2),
  567. 0x00000000,
  568. (0x0e00 << 16) | (0x8904 >> 2),
  569. 0x00000000,
  570. 0x5,
  571. (0x0e00 << 16) | (0x8c28 >> 2),
  572. (0x0e00 << 16) | (0x8c2c >> 2),
  573. (0x0e00 << 16) | (0x8c30 >> 2),
  574. (0x0e00 << 16) | (0x8c34 >> 2),
  575. (0x0e00 << 16) | (0x9600 >> 2),
  576. };
  577. static const u32 kalindi_rlc_save_restore_register_list[] =
  578. {
  579. (0x0e00 << 16) | (0xc12c >> 2),
  580. 0x00000000,
  581. (0x0e00 << 16) | (0xc140 >> 2),
  582. 0x00000000,
  583. (0x0e00 << 16) | (0xc150 >> 2),
  584. 0x00000000,
  585. (0x0e00 << 16) | (0xc15c >> 2),
  586. 0x00000000,
  587. (0x0e00 << 16) | (0xc168 >> 2),
  588. 0x00000000,
  589. (0x0e00 << 16) | (0xc170 >> 2),
  590. 0x00000000,
  591. (0x0e00 << 16) | (0xc204 >> 2),
  592. 0x00000000,
  593. (0x0e00 << 16) | (0xc2b4 >> 2),
  594. 0x00000000,
  595. (0x0e00 << 16) | (0xc2b8 >> 2),
  596. 0x00000000,
  597. (0x0e00 << 16) | (0xc2bc >> 2),
  598. 0x00000000,
  599. (0x0e00 << 16) | (0xc2c0 >> 2),
  600. 0x00000000,
  601. (0x0e00 << 16) | (0x8228 >> 2),
  602. 0x00000000,
  603. (0x0e00 << 16) | (0x829c >> 2),
  604. 0x00000000,
  605. (0x0e00 << 16) | (0x869c >> 2),
  606. 0x00000000,
  607. (0x0600 << 16) | (0x98f4 >> 2),
  608. 0x00000000,
  609. (0x0e00 << 16) | (0x98f8 >> 2),
  610. 0x00000000,
  611. (0x0e00 << 16) | (0x9900 >> 2),
  612. 0x00000000,
  613. (0x0e00 << 16) | (0xc260 >> 2),
  614. 0x00000000,
  615. (0x0e00 << 16) | (0x90e8 >> 2),
  616. 0x00000000,
  617. (0x0e00 << 16) | (0x3c000 >> 2),
  618. 0x00000000,
  619. (0x0e00 << 16) | (0x3c00c >> 2),
  620. 0x00000000,
  621. (0x0e00 << 16) | (0x8c1c >> 2),
  622. 0x00000000,
  623. (0x0e00 << 16) | (0x9700 >> 2),
  624. 0x00000000,
  625. (0x0e00 << 16) | (0xcd20 >> 2),
  626. 0x00000000,
  627. (0x4e00 << 16) | (0xcd20 >> 2),
  628. 0x00000000,
  629. (0x5e00 << 16) | (0xcd20 >> 2),
  630. 0x00000000,
  631. (0x6e00 << 16) | (0xcd20 >> 2),
  632. 0x00000000,
  633. (0x7e00 << 16) | (0xcd20 >> 2),
  634. 0x00000000,
  635. (0x0e00 << 16) | (0x89bc >> 2),
  636. 0x00000000,
  637. (0x0e00 << 16) | (0x8900 >> 2),
  638. 0x00000000,
  639. 0x3,
  640. (0x0e00 << 16) | (0xc130 >> 2),
  641. 0x00000000,
  642. (0x0e00 << 16) | (0xc134 >> 2),
  643. 0x00000000,
  644. (0x0e00 << 16) | (0xc1fc >> 2),
  645. 0x00000000,
  646. (0x0e00 << 16) | (0xc208 >> 2),
  647. 0x00000000,
  648. (0x0e00 << 16) | (0xc264 >> 2),
  649. 0x00000000,
  650. (0x0e00 << 16) | (0xc268 >> 2),
  651. 0x00000000,
  652. (0x0e00 << 16) | (0xc26c >> 2),
  653. 0x00000000,
  654. (0x0e00 << 16) | (0xc270 >> 2),
  655. 0x00000000,
  656. (0x0e00 << 16) | (0xc274 >> 2),
  657. 0x00000000,
  658. (0x0e00 << 16) | (0xc28c >> 2),
  659. 0x00000000,
  660. (0x0e00 << 16) | (0xc290 >> 2),
  661. 0x00000000,
  662. (0x0e00 << 16) | (0xc294 >> 2),
  663. 0x00000000,
  664. (0x0e00 << 16) | (0xc298 >> 2),
  665. 0x00000000,
  666. (0x0e00 << 16) | (0xc2a0 >> 2),
  667. 0x00000000,
  668. (0x0e00 << 16) | (0xc2a4 >> 2),
  669. 0x00000000,
  670. (0x0e00 << 16) | (0xc2a8 >> 2),
  671. 0x00000000,
  672. (0x0e00 << 16) | (0xc2ac >> 2),
  673. 0x00000000,
  674. (0x0e00 << 16) | (0x301d0 >> 2),
  675. 0x00000000,
  676. (0x0e00 << 16) | (0x30238 >> 2),
  677. 0x00000000,
  678. (0x0e00 << 16) | (0x30250 >> 2),
  679. 0x00000000,
  680. (0x0e00 << 16) | (0x30254 >> 2),
  681. 0x00000000,
  682. (0x0e00 << 16) | (0x30258 >> 2),
  683. 0x00000000,
  684. (0x0e00 << 16) | (0x3025c >> 2),
  685. 0x00000000,
  686. (0x4e00 << 16) | (0xc900 >> 2),
  687. 0x00000000,
  688. (0x5e00 << 16) | (0xc900 >> 2),
  689. 0x00000000,
  690. (0x6e00 << 16) | (0xc900 >> 2),
  691. 0x00000000,
  692. (0x7e00 << 16) | (0xc900 >> 2),
  693. 0x00000000,
  694. (0x4e00 << 16) | (0xc904 >> 2),
  695. 0x00000000,
  696. (0x5e00 << 16) | (0xc904 >> 2),
  697. 0x00000000,
  698. (0x6e00 << 16) | (0xc904 >> 2),
  699. 0x00000000,
  700. (0x7e00 << 16) | (0xc904 >> 2),
  701. 0x00000000,
  702. (0x4e00 << 16) | (0xc908 >> 2),
  703. 0x00000000,
  704. (0x5e00 << 16) | (0xc908 >> 2),
  705. 0x00000000,
  706. (0x6e00 << 16) | (0xc908 >> 2),
  707. 0x00000000,
  708. (0x7e00 << 16) | (0xc908 >> 2),
  709. 0x00000000,
  710. (0x4e00 << 16) | (0xc90c >> 2),
  711. 0x00000000,
  712. (0x5e00 << 16) | (0xc90c >> 2),
  713. 0x00000000,
  714. (0x6e00 << 16) | (0xc90c >> 2),
  715. 0x00000000,
  716. (0x7e00 << 16) | (0xc90c >> 2),
  717. 0x00000000,
  718. (0x4e00 << 16) | (0xc910 >> 2),
  719. 0x00000000,
  720. (0x5e00 << 16) | (0xc910 >> 2),
  721. 0x00000000,
  722. (0x6e00 << 16) | (0xc910 >> 2),
  723. 0x00000000,
  724. (0x7e00 << 16) | (0xc910 >> 2),
  725. 0x00000000,
  726. (0x0e00 << 16) | (0xc99c >> 2),
  727. 0x00000000,
  728. (0x0e00 << 16) | (0x9834 >> 2),
  729. 0x00000000,
  730. (0x0000 << 16) | (0x30f00 >> 2),
  731. 0x00000000,
  732. (0x0000 << 16) | (0x30f04 >> 2),
  733. 0x00000000,
  734. (0x0000 << 16) | (0x30f08 >> 2),
  735. 0x00000000,
  736. (0x0000 << 16) | (0x30f0c >> 2),
  737. 0x00000000,
  738. (0x0600 << 16) | (0x9b7c >> 2),
  739. 0x00000000,
  740. (0x0e00 << 16) | (0x8a14 >> 2),
  741. 0x00000000,
  742. (0x0e00 << 16) | (0x8a18 >> 2),
  743. 0x00000000,
  744. (0x0600 << 16) | (0x30a00 >> 2),
  745. 0x00000000,
  746. (0x0e00 << 16) | (0x8bf0 >> 2),
  747. 0x00000000,
  748. (0x0e00 << 16) | (0x8bcc >> 2),
  749. 0x00000000,
  750. (0x0e00 << 16) | (0x8b24 >> 2),
  751. 0x00000000,
  752. (0x0e00 << 16) | (0x30a04 >> 2),
  753. 0x00000000,
  754. (0x0600 << 16) | (0x30a10 >> 2),
  755. 0x00000000,
  756. (0x0600 << 16) | (0x30a14 >> 2),
  757. 0x00000000,
  758. (0x0600 << 16) | (0x30a18 >> 2),
  759. 0x00000000,
  760. (0x0600 << 16) | (0x30a2c >> 2),
  761. 0x00000000,
  762. (0x0e00 << 16) | (0xc700 >> 2),
  763. 0x00000000,
  764. (0x0e00 << 16) | (0xc704 >> 2),
  765. 0x00000000,
  766. (0x0e00 << 16) | (0xc708 >> 2),
  767. 0x00000000,
  768. (0x0e00 << 16) | (0xc768 >> 2),
  769. 0x00000000,
  770. (0x0400 << 16) | (0xc770 >> 2),
  771. 0x00000000,
  772. (0x0400 << 16) | (0xc774 >> 2),
  773. 0x00000000,
  774. (0x0400 << 16) | (0xc798 >> 2),
  775. 0x00000000,
  776. (0x0400 << 16) | (0xc79c >> 2),
  777. 0x00000000,
  778. (0x0e00 << 16) | (0x9100 >> 2),
  779. 0x00000000,
  780. (0x0e00 << 16) | (0x3c010 >> 2),
  781. 0x00000000,
  782. (0x0e00 << 16) | (0x8c00 >> 2),
  783. 0x00000000,
  784. (0x0e00 << 16) | (0x8c04 >> 2),
  785. 0x00000000,
  786. (0x0e00 << 16) | (0x8c20 >> 2),
  787. 0x00000000,
  788. (0x0e00 << 16) | (0x8c38 >> 2),
  789. 0x00000000,
  790. (0x0e00 << 16) | (0x8c3c >> 2),
  791. 0x00000000,
  792. (0x0e00 << 16) | (0xae00 >> 2),
  793. 0x00000000,
  794. (0x0e00 << 16) | (0x9604 >> 2),
  795. 0x00000000,
  796. (0x0e00 << 16) | (0xac08 >> 2),
  797. 0x00000000,
  798. (0x0e00 << 16) | (0xac0c >> 2),
  799. 0x00000000,
  800. (0x0e00 << 16) | (0xac10 >> 2),
  801. 0x00000000,
  802. (0x0e00 << 16) | (0xac14 >> 2),
  803. 0x00000000,
  804. (0x0e00 << 16) | (0xac58 >> 2),
  805. 0x00000000,
  806. (0x0e00 << 16) | (0xac68 >> 2),
  807. 0x00000000,
  808. (0x0e00 << 16) | (0xac6c >> 2),
  809. 0x00000000,
  810. (0x0e00 << 16) | (0xac70 >> 2),
  811. 0x00000000,
  812. (0x0e00 << 16) | (0xac74 >> 2),
  813. 0x00000000,
  814. (0x0e00 << 16) | (0xac78 >> 2),
  815. 0x00000000,
  816. (0x0e00 << 16) | (0xac7c >> 2),
  817. 0x00000000,
  818. (0x0e00 << 16) | (0xac80 >> 2),
  819. 0x00000000,
  820. (0x0e00 << 16) | (0xac84 >> 2),
  821. 0x00000000,
  822. (0x0e00 << 16) | (0xac88 >> 2),
  823. 0x00000000,
  824. (0x0e00 << 16) | (0xac8c >> 2),
  825. 0x00000000,
  826. (0x0e00 << 16) | (0x970c >> 2),
  827. 0x00000000,
  828. (0x0e00 << 16) | (0x9714 >> 2),
  829. 0x00000000,
  830. (0x0e00 << 16) | (0x9718 >> 2),
  831. 0x00000000,
  832. (0x0e00 << 16) | (0x971c >> 2),
  833. 0x00000000,
  834. (0x0e00 << 16) | (0x31068 >> 2),
  835. 0x00000000,
  836. (0x4e00 << 16) | (0x31068 >> 2),
  837. 0x00000000,
  838. (0x5e00 << 16) | (0x31068 >> 2),
  839. 0x00000000,
  840. (0x6e00 << 16) | (0x31068 >> 2),
  841. 0x00000000,
  842. (0x7e00 << 16) | (0x31068 >> 2),
  843. 0x00000000,
  844. (0x0e00 << 16) | (0xcd10 >> 2),
  845. 0x00000000,
  846. (0x0e00 << 16) | (0xcd14 >> 2),
  847. 0x00000000,
  848. (0x0e00 << 16) | (0x88b0 >> 2),
  849. 0x00000000,
  850. (0x0e00 << 16) | (0x88b4 >> 2),
  851. 0x00000000,
  852. (0x0e00 << 16) | (0x88b8 >> 2),
  853. 0x00000000,
  854. (0x0e00 << 16) | (0x88bc >> 2),
  855. 0x00000000,
  856. (0x0400 << 16) | (0x89c0 >> 2),
  857. 0x00000000,
  858. (0x0e00 << 16) | (0x88c4 >> 2),
  859. 0x00000000,
  860. (0x0e00 << 16) | (0x88c8 >> 2),
  861. 0x00000000,
  862. (0x0e00 << 16) | (0x88d0 >> 2),
  863. 0x00000000,
  864. (0x0e00 << 16) | (0x88d4 >> 2),
  865. 0x00000000,
  866. (0x0e00 << 16) | (0x88d8 >> 2),
  867. 0x00000000,
  868. (0x0e00 << 16) | (0x8980 >> 2),
  869. 0x00000000,
  870. (0x0e00 << 16) | (0x30938 >> 2),
  871. 0x00000000,
  872. (0x0e00 << 16) | (0x3093c >> 2),
  873. 0x00000000,
  874. (0x0e00 << 16) | (0x30940 >> 2),
  875. 0x00000000,
  876. (0x0e00 << 16) | (0x89a0 >> 2),
  877. 0x00000000,
  878. (0x0e00 << 16) | (0x30900 >> 2),
  879. 0x00000000,
  880. (0x0e00 << 16) | (0x30904 >> 2),
  881. 0x00000000,
  882. (0x0e00 << 16) | (0x89b4 >> 2),
  883. 0x00000000,
  884. (0x0e00 << 16) | (0x3e1fc >> 2),
  885. 0x00000000,
  886. (0x0e00 << 16) | (0x3c210 >> 2),
  887. 0x00000000,
  888. (0x0e00 << 16) | (0x3c214 >> 2),
  889. 0x00000000,
  890. (0x0e00 << 16) | (0x3c218 >> 2),
  891. 0x00000000,
  892. (0x0e00 << 16) | (0x8904 >> 2),
  893. 0x00000000,
  894. 0x5,
  895. (0x0e00 << 16) | (0x8c28 >> 2),
  896. (0x0e00 << 16) | (0x8c2c >> 2),
  897. (0x0e00 << 16) | (0x8c30 >> 2),
  898. (0x0e00 << 16) | (0x8c34 >> 2),
  899. (0x0e00 << 16) | (0x9600 >> 2),
  900. };
  901. static const u32 bonaire_golden_spm_registers[] =
  902. {
  903. 0x30800, 0xe0ffffff, 0xe0000000
  904. };
  905. static const u32 bonaire_golden_common_registers[] =
  906. {
  907. 0xc770, 0xffffffff, 0x00000800,
  908. 0xc774, 0xffffffff, 0x00000800,
  909. 0xc798, 0xffffffff, 0x00007fbf,
  910. 0xc79c, 0xffffffff, 0x00007faf
  911. };
  912. static const u32 bonaire_golden_registers[] =
  913. {
  914. 0x3354, 0x00000333, 0x00000333,
  915. 0x3350, 0x000c0fc0, 0x00040200,
  916. 0x9a10, 0x00010000, 0x00058208,
  917. 0x3c000, 0xffff1fff, 0x00140000,
  918. 0x3c200, 0xfdfc0fff, 0x00000100,
  919. 0x3c234, 0x40000000, 0x40000200,
  920. 0x9830, 0xffffffff, 0x00000000,
  921. 0x9834, 0xf00fffff, 0x00000400,
  922. 0x9838, 0x0002021c, 0x00020200,
  923. 0xc78, 0x00000080, 0x00000000,
  924. 0x5bb0, 0x000000f0, 0x00000070,
  925. 0x5bc0, 0xf0311fff, 0x80300000,
  926. 0x98f8, 0x73773777, 0x12010001,
  927. 0x350c, 0x00810000, 0x408af000,
  928. 0x7030, 0x31000111, 0x00000011,
  929. 0x2f48, 0x73773777, 0x12010001,
  930. 0x220c, 0x00007fb6, 0x0021a1b1,
  931. 0x2210, 0x00007fb6, 0x002021b1,
  932. 0x2180, 0x00007fb6, 0x00002191,
  933. 0x2218, 0x00007fb6, 0x002121b1,
  934. 0x221c, 0x00007fb6, 0x002021b1,
  935. 0x21dc, 0x00007fb6, 0x00002191,
  936. 0x21e0, 0x00007fb6, 0x00002191,
  937. 0x3628, 0x0000003f, 0x0000000a,
  938. 0x362c, 0x0000003f, 0x0000000a,
  939. 0x2ae4, 0x00073ffe, 0x000022a2,
  940. 0x240c, 0x000007ff, 0x00000000,
  941. 0x8a14, 0xf000003f, 0x00000007,
  942. 0x8bf0, 0x00002001, 0x00000001,
  943. 0x8b24, 0xffffffff, 0x00ffffff,
  944. 0x30a04, 0x0000ff0f, 0x00000000,
  945. 0x28a4c, 0x07ffffff, 0x06000000,
  946. 0x4d8, 0x00000fff, 0x00000100,
  947. 0x3e78, 0x00000001, 0x00000002,
  948. 0x9100, 0x03000000, 0x0362c688,
  949. 0x8c00, 0x000000ff, 0x00000001,
  950. 0xe40, 0x00001fff, 0x00001fff,
  951. 0x9060, 0x0000007f, 0x00000020,
  952. 0x9508, 0x00010000, 0x00010000,
  953. 0xac14, 0x000003ff, 0x000000f3,
  954. 0xac0c, 0xffffffff, 0x00001032
  955. };
  956. static const u32 bonaire_mgcg_cgcg_init[] =
  957. {
  958. 0xc420, 0xffffffff, 0xfffffffc,
  959. 0x30800, 0xffffffff, 0xe0000000,
  960. 0x3c2a0, 0xffffffff, 0x00000100,
  961. 0x3c208, 0xffffffff, 0x00000100,
  962. 0x3c2c0, 0xffffffff, 0xc0000100,
  963. 0x3c2c8, 0xffffffff, 0xc0000100,
  964. 0x3c2c4, 0xffffffff, 0xc0000100,
  965. 0x55e4, 0xffffffff, 0x00600100,
  966. 0x3c280, 0xffffffff, 0x00000100,
  967. 0x3c214, 0xffffffff, 0x06000100,
  968. 0x3c220, 0xffffffff, 0x00000100,
  969. 0x3c218, 0xffffffff, 0x06000100,
  970. 0x3c204, 0xffffffff, 0x00000100,
  971. 0x3c2e0, 0xffffffff, 0x00000100,
  972. 0x3c224, 0xffffffff, 0x00000100,
  973. 0x3c200, 0xffffffff, 0x00000100,
  974. 0x3c230, 0xffffffff, 0x00000100,
  975. 0x3c234, 0xffffffff, 0x00000100,
  976. 0x3c250, 0xffffffff, 0x00000100,
  977. 0x3c254, 0xffffffff, 0x00000100,
  978. 0x3c258, 0xffffffff, 0x00000100,
  979. 0x3c25c, 0xffffffff, 0x00000100,
  980. 0x3c260, 0xffffffff, 0x00000100,
  981. 0x3c27c, 0xffffffff, 0x00000100,
  982. 0x3c278, 0xffffffff, 0x00000100,
  983. 0x3c210, 0xffffffff, 0x06000100,
  984. 0x3c290, 0xffffffff, 0x00000100,
  985. 0x3c274, 0xffffffff, 0x00000100,
  986. 0x3c2b4, 0xffffffff, 0x00000100,
  987. 0x3c2b0, 0xffffffff, 0x00000100,
  988. 0x3c270, 0xffffffff, 0x00000100,
  989. 0x30800, 0xffffffff, 0xe0000000,
  990. 0x3c020, 0xffffffff, 0x00010000,
  991. 0x3c024, 0xffffffff, 0x00030002,
  992. 0x3c028, 0xffffffff, 0x00040007,
  993. 0x3c02c, 0xffffffff, 0x00060005,
  994. 0x3c030, 0xffffffff, 0x00090008,
  995. 0x3c034, 0xffffffff, 0x00010000,
  996. 0x3c038, 0xffffffff, 0x00030002,
  997. 0x3c03c, 0xffffffff, 0x00040007,
  998. 0x3c040, 0xffffffff, 0x00060005,
  999. 0x3c044, 0xffffffff, 0x00090008,
  1000. 0x3c048, 0xffffffff, 0x00010000,
  1001. 0x3c04c, 0xffffffff, 0x00030002,
  1002. 0x3c050, 0xffffffff, 0x00040007,
  1003. 0x3c054, 0xffffffff, 0x00060005,
  1004. 0x3c058, 0xffffffff, 0x00090008,
  1005. 0x3c05c, 0xffffffff, 0x00010000,
  1006. 0x3c060, 0xffffffff, 0x00030002,
  1007. 0x3c064, 0xffffffff, 0x00040007,
  1008. 0x3c068, 0xffffffff, 0x00060005,
  1009. 0x3c06c, 0xffffffff, 0x00090008,
  1010. 0x3c070, 0xffffffff, 0x00010000,
  1011. 0x3c074, 0xffffffff, 0x00030002,
  1012. 0x3c078, 0xffffffff, 0x00040007,
  1013. 0x3c07c, 0xffffffff, 0x00060005,
  1014. 0x3c080, 0xffffffff, 0x00090008,
  1015. 0x3c084, 0xffffffff, 0x00010000,
  1016. 0x3c088, 0xffffffff, 0x00030002,
  1017. 0x3c08c, 0xffffffff, 0x00040007,
  1018. 0x3c090, 0xffffffff, 0x00060005,
  1019. 0x3c094, 0xffffffff, 0x00090008,
  1020. 0x3c098, 0xffffffff, 0x00010000,
  1021. 0x3c09c, 0xffffffff, 0x00030002,
  1022. 0x3c0a0, 0xffffffff, 0x00040007,
  1023. 0x3c0a4, 0xffffffff, 0x00060005,
  1024. 0x3c0a8, 0xffffffff, 0x00090008,
  1025. 0x3c000, 0xffffffff, 0x96e00200,
  1026. 0x8708, 0xffffffff, 0x00900100,
  1027. 0xc424, 0xffffffff, 0x0020003f,
  1028. 0x38, 0xffffffff, 0x0140001c,
  1029. 0x3c, 0x000f0000, 0x000f0000,
  1030. 0x220, 0xffffffff, 0xC060000C,
  1031. 0x224, 0xc0000fff, 0x00000100,
  1032. 0xf90, 0xffffffff, 0x00000100,
  1033. 0xf98, 0x00000101, 0x00000000,
  1034. 0x20a8, 0xffffffff, 0x00000104,
  1035. 0x55e4, 0xff000fff, 0x00000100,
  1036. 0x30cc, 0xc0000fff, 0x00000104,
  1037. 0xc1e4, 0x00000001, 0x00000001,
  1038. 0xd00c, 0xff000ff0, 0x00000100,
  1039. 0xd80c, 0xff000ff0, 0x00000100
  1040. };
  1041. static const u32 spectre_golden_spm_registers[] =
  1042. {
  1043. 0x30800, 0xe0ffffff, 0xe0000000
  1044. };
  1045. static const u32 spectre_golden_common_registers[] =
  1046. {
  1047. 0xc770, 0xffffffff, 0x00000800,
  1048. 0xc774, 0xffffffff, 0x00000800,
  1049. 0xc798, 0xffffffff, 0x00007fbf,
  1050. 0xc79c, 0xffffffff, 0x00007faf
  1051. };
  1052. static const u32 spectre_golden_registers[] =
  1053. {
  1054. 0x3c000, 0xffff1fff, 0x96940200,
  1055. 0x3c00c, 0xffff0001, 0xff000000,
  1056. 0x3c200, 0xfffc0fff, 0x00000100,
  1057. 0x6ed8, 0x00010101, 0x00010000,
  1058. 0x9834, 0xf00fffff, 0x00000400,
  1059. 0x9838, 0xfffffffc, 0x00020200,
  1060. 0x5bb0, 0x000000f0, 0x00000070,
  1061. 0x5bc0, 0xf0311fff, 0x80300000,
  1062. 0x98f8, 0x73773777, 0x12010001,
  1063. 0x9b7c, 0x00ff0000, 0x00fc0000,
  1064. 0x2f48, 0x73773777, 0x12010001,
  1065. 0x8a14, 0xf000003f, 0x00000007,
  1066. 0x8b24, 0xffffffff, 0x00ffffff,
  1067. 0x28350, 0x3f3f3fff, 0x00000082,
  1068. 0x28355, 0x0000003f, 0x00000000,
  1069. 0x3e78, 0x00000001, 0x00000002,
  1070. 0x913c, 0xffff03df, 0x00000004,
  1071. 0xc768, 0x00000008, 0x00000008,
  1072. 0x8c00, 0x000008ff, 0x00000800,
  1073. 0x9508, 0x00010000, 0x00010000,
  1074. 0xac0c, 0xffffffff, 0x54763210,
  1075. 0x214f8, 0x01ff01ff, 0x00000002,
  1076. 0x21498, 0x007ff800, 0x00200000,
  1077. 0x2015c, 0xffffffff, 0x00000f40,
  1078. 0x30934, 0xffffffff, 0x00000001
  1079. };
  1080. static const u32 spectre_mgcg_cgcg_init[] =
  1081. {
  1082. 0xc420, 0xffffffff, 0xfffffffc,
  1083. 0x30800, 0xffffffff, 0xe0000000,
  1084. 0x3c2a0, 0xffffffff, 0x00000100,
  1085. 0x3c208, 0xffffffff, 0x00000100,
  1086. 0x3c2c0, 0xffffffff, 0x00000100,
  1087. 0x3c2c8, 0xffffffff, 0x00000100,
  1088. 0x3c2c4, 0xffffffff, 0x00000100,
  1089. 0x55e4, 0xffffffff, 0x00600100,
  1090. 0x3c280, 0xffffffff, 0x00000100,
  1091. 0x3c214, 0xffffffff, 0x06000100,
  1092. 0x3c220, 0xffffffff, 0x00000100,
  1093. 0x3c218, 0xffffffff, 0x06000100,
  1094. 0x3c204, 0xffffffff, 0x00000100,
  1095. 0x3c2e0, 0xffffffff, 0x00000100,
  1096. 0x3c224, 0xffffffff, 0x00000100,
  1097. 0x3c200, 0xffffffff, 0x00000100,
  1098. 0x3c230, 0xffffffff, 0x00000100,
  1099. 0x3c234, 0xffffffff, 0x00000100,
  1100. 0x3c250, 0xffffffff, 0x00000100,
  1101. 0x3c254, 0xffffffff, 0x00000100,
  1102. 0x3c258, 0xffffffff, 0x00000100,
  1103. 0x3c25c, 0xffffffff, 0x00000100,
  1104. 0x3c260, 0xffffffff, 0x00000100,
  1105. 0x3c27c, 0xffffffff, 0x00000100,
  1106. 0x3c278, 0xffffffff, 0x00000100,
  1107. 0x3c210, 0xffffffff, 0x06000100,
  1108. 0x3c290, 0xffffffff, 0x00000100,
  1109. 0x3c274, 0xffffffff, 0x00000100,
  1110. 0x3c2b4, 0xffffffff, 0x00000100,
  1111. 0x3c2b0, 0xffffffff, 0x00000100,
  1112. 0x3c270, 0xffffffff, 0x00000100,
  1113. 0x30800, 0xffffffff, 0xe0000000,
  1114. 0x3c020, 0xffffffff, 0x00010000,
  1115. 0x3c024, 0xffffffff, 0x00030002,
  1116. 0x3c028, 0xffffffff, 0x00040007,
  1117. 0x3c02c, 0xffffffff, 0x00060005,
  1118. 0x3c030, 0xffffffff, 0x00090008,
  1119. 0x3c034, 0xffffffff, 0x00010000,
  1120. 0x3c038, 0xffffffff, 0x00030002,
  1121. 0x3c03c, 0xffffffff, 0x00040007,
  1122. 0x3c040, 0xffffffff, 0x00060005,
  1123. 0x3c044, 0xffffffff, 0x00090008,
  1124. 0x3c048, 0xffffffff, 0x00010000,
  1125. 0x3c04c, 0xffffffff, 0x00030002,
  1126. 0x3c050, 0xffffffff, 0x00040007,
  1127. 0x3c054, 0xffffffff, 0x00060005,
  1128. 0x3c058, 0xffffffff, 0x00090008,
  1129. 0x3c05c, 0xffffffff, 0x00010000,
  1130. 0x3c060, 0xffffffff, 0x00030002,
  1131. 0x3c064, 0xffffffff, 0x00040007,
  1132. 0x3c068, 0xffffffff, 0x00060005,
  1133. 0x3c06c, 0xffffffff, 0x00090008,
  1134. 0x3c070, 0xffffffff, 0x00010000,
  1135. 0x3c074, 0xffffffff, 0x00030002,
  1136. 0x3c078, 0xffffffff, 0x00040007,
  1137. 0x3c07c, 0xffffffff, 0x00060005,
  1138. 0x3c080, 0xffffffff, 0x00090008,
  1139. 0x3c084, 0xffffffff, 0x00010000,
  1140. 0x3c088, 0xffffffff, 0x00030002,
  1141. 0x3c08c, 0xffffffff, 0x00040007,
  1142. 0x3c090, 0xffffffff, 0x00060005,
  1143. 0x3c094, 0xffffffff, 0x00090008,
  1144. 0x3c098, 0xffffffff, 0x00010000,
  1145. 0x3c09c, 0xffffffff, 0x00030002,
  1146. 0x3c0a0, 0xffffffff, 0x00040007,
  1147. 0x3c0a4, 0xffffffff, 0x00060005,
  1148. 0x3c0a8, 0xffffffff, 0x00090008,
  1149. 0x3c0ac, 0xffffffff, 0x00010000,
  1150. 0x3c0b0, 0xffffffff, 0x00030002,
  1151. 0x3c0b4, 0xffffffff, 0x00040007,
  1152. 0x3c0b8, 0xffffffff, 0x00060005,
  1153. 0x3c0bc, 0xffffffff, 0x00090008,
  1154. 0x3c000, 0xffffffff, 0x96e00200,
  1155. 0x8708, 0xffffffff, 0x00900100,
  1156. 0xc424, 0xffffffff, 0x0020003f,
  1157. 0x38, 0xffffffff, 0x0140001c,
  1158. 0x3c, 0x000f0000, 0x000f0000,
  1159. 0x220, 0xffffffff, 0xC060000C,
  1160. 0x224, 0xc0000fff, 0x00000100,
  1161. 0xf90, 0xffffffff, 0x00000100,
  1162. 0xf98, 0x00000101, 0x00000000,
  1163. 0x20a8, 0xffffffff, 0x00000104,
  1164. 0x55e4, 0xff000fff, 0x00000100,
  1165. 0x30cc, 0xc0000fff, 0x00000104,
  1166. 0xc1e4, 0x00000001, 0x00000001,
  1167. 0xd00c, 0xff000ff0, 0x00000100,
  1168. 0xd80c, 0xff000ff0, 0x00000100
  1169. };
  1170. static const u32 kalindi_golden_spm_registers[] =
  1171. {
  1172. 0x30800, 0xe0ffffff, 0xe0000000
  1173. };
  1174. static const u32 kalindi_golden_common_registers[] =
  1175. {
  1176. 0xc770, 0xffffffff, 0x00000800,
  1177. 0xc774, 0xffffffff, 0x00000800,
  1178. 0xc798, 0xffffffff, 0x00007fbf,
  1179. 0xc79c, 0xffffffff, 0x00007faf
  1180. };
  1181. static const u32 kalindi_golden_registers[] =
  1182. {
  1183. 0x3c000, 0xffffdfff, 0x6e944040,
  1184. 0x55e4, 0xff607fff, 0xfc000100,
  1185. 0x3c220, 0xff000fff, 0x00000100,
  1186. 0x3c224, 0xff000fff, 0x00000100,
  1187. 0x3c200, 0xfffc0fff, 0x00000100,
  1188. 0x6ed8, 0x00010101, 0x00010000,
  1189. 0x9830, 0xffffffff, 0x00000000,
  1190. 0x9834, 0xf00fffff, 0x00000400,
  1191. 0x5bb0, 0x000000f0, 0x00000070,
  1192. 0x5bc0, 0xf0311fff, 0x80300000,
  1193. 0x98f8, 0x73773777, 0x12010001,
  1194. 0x98fc, 0xffffffff, 0x00000010,
  1195. 0x9b7c, 0x00ff0000, 0x00fc0000,
  1196. 0x8030, 0x00001f0f, 0x0000100a,
  1197. 0x2f48, 0x73773777, 0x12010001,
  1198. 0x2408, 0x000fffff, 0x000c007f,
  1199. 0x8a14, 0xf000003f, 0x00000007,
  1200. 0x8b24, 0x3fff3fff, 0x00ffcfff,
  1201. 0x30a04, 0x0000ff0f, 0x00000000,
  1202. 0x28a4c, 0x07ffffff, 0x06000000,
  1203. 0x4d8, 0x00000fff, 0x00000100,
  1204. 0x3e78, 0x00000001, 0x00000002,
  1205. 0xc768, 0x00000008, 0x00000008,
  1206. 0x8c00, 0x000000ff, 0x00000003,
  1207. 0x214f8, 0x01ff01ff, 0x00000002,
  1208. 0x21498, 0x007ff800, 0x00200000,
  1209. 0x2015c, 0xffffffff, 0x00000f40,
  1210. 0x88c4, 0x001f3ae3, 0x00000082,
  1211. 0x88d4, 0x0000001f, 0x00000010,
  1212. 0x30934, 0xffffffff, 0x00000000
  1213. };
  1214. static const u32 kalindi_mgcg_cgcg_init[] =
  1215. {
  1216. 0xc420, 0xffffffff, 0xfffffffc,
  1217. 0x30800, 0xffffffff, 0xe0000000,
  1218. 0x3c2a0, 0xffffffff, 0x00000100,
  1219. 0x3c208, 0xffffffff, 0x00000100,
  1220. 0x3c2c0, 0xffffffff, 0x00000100,
  1221. 0x3c2c8, 0xffffffff, 0x00000100,
  1222. 0x3c2c4, 0xffffffff, 0x00000100,
  1223. 0x55e4, 0xffffffff, 0x00600100,
  1224. 0x3c280, 0xffffffff, 0x00000100,
  1225. 0x3c214, 0xffffffff, 0x06000100,
  1226. 0x3c220, 0xffffffff, 0x00000100,
  1227. 0x3c218, 0xffffffff, 0x06000100,
  1228. 0x3c204, 0xffffffff, 0x00000100,
  1229. 0x3c2e0, 0xffffffff, 0x00000100,
  1230. 0x3c224, 0xffffffff, 0x00000100,
  1231. 0x3c200, 0xffffffff, 0x00000100,
  1232. 0x3c230, 0xffffffff, 0x00000100,
  1233. 0x3c234, 0xffffffff, 0x00000100,
  1234. 0x3c250, 0xffffffff, 0x00000100,
  1235. 0x3c254, 0xffffffff, 0x00000100,
  1236. 0x3c258, 0xffffffff, 0x00000100,
  1237. 0x3c25c, 0xffffffff, 0x00000100,
  1238. 0x3c260, 0xffffffff, 0x00000100,
  1239. 0x3c27c, 0xffffffff, 0x00000100,
  1240. 0x3c278, 0xffffffff, 0x00000100,
  1241. 0x3c210, 0xffffffff, 0x06000100,
  1242. 0x3c290, 0xffffffff, 0x00000100,
  1243. 0x3c274, 0xffffffff, 0x00000100,
  1244. 0x3c2b4, 0xffffffff, 0x00000100,
  1245. 0x3c2b0, 0xffffffff, 0x00000100,
  1246. 0x3c270, 0xffffffff, 0x00000100,
  1247. 0x30800, 0xffffffff, 0xe0000000,
  1248. 0x3c020, 0xffffffff, 0x00010000,
  1249. 0x3c024, 0xffffffff, 0x00030002,
  1250. 0x3c028, 0xffffffff, 0x00040007,
  1251. 0x3c02c, 0xffffffff, 0x00060005,
  1252. 0x3c030, 0xffffffff, 0x00090008,
  1253. 0x3c034, 0xffffffff, 0x00010000,
  1254. 0x3c038, 0xffffffff, 0x00030002,
  1255. 0x3c03c, 0xffffffff, 0x00040007,
  1256. 0x3c040, 0xffffffff, 0x00060005,
  1257. 0x3c044, 0xffffffff, 0x00090008,
  1258. 0x3c000, 0xffffffff, 0x96e00200,
  1259. 0x8708, 0xffffffff, 0x00900100,
  1260. 0xc424, 0xffffffff, 0x0020003f,
  1261. 0x38, 0xffffffff, 0x0140001c,
  1262. 0x3c, 0x000f0000, 0x000f0000,
  1263. 0x220, 0xffffffff, 0xC060000C,
  1264. 0x224, 0xc0000fff, 0x00000100,
  1265. 0x20a8, 0xffffffff, 0x00000104,
  1266. 0x55e4, 0xff000fff, 0x00000100,
  1267. 0x30cc, 0xc0000fff, 0x00000104,
  1268. 0xc1e4, 0x00000001, 0x00000001,
  1269. 0xd00c, 0xff000ff0, 0x00000100,
  1270. 0xd80c, 0xff000ff0, 0x00000100
  1271. };
  1272. static void cik_init_golden_registers(struct radeon_device *rdev)
  1273. {
  1274. switch (rdev->family) {
  1275. case CHIP_BONAIRE:
  1276. radeon_program_register_sequence(rdev,
  1277. bonaire_mgcg_cgcg_init,
  1278. (const u32)ARRAY_SIZE(bonaire_mgcg_cgcg_init));
  1279. radeon_program_register_sequence(rdev,
  1280. bonaire_golden_registers,
  1281. (const u32)ARRAY_SIZE(bonaire_golden_registers));
  1282. radeon_program_register_sequence(rdev,
  1283. bonaire_golden_common_registers,
  1284. (const u32)ARRAY_SIZE(bonaire_golden_common_registers));
  1285. radeon_program_register_sequence(rdev,
  1286. bonaire_golden_spm_registers,
  1287. (const u32)ARRAY_SIZE(bonaire_golden_spm_registers));
  1288. break;
  1289. case CHIP_KABINI:
  1290. radeon_program_register_sequence(rdev,
  1291. kalindi_mgcg_cgcg_init,
  1292. (const u32)ARRAY_SIZE(kalindi_mgcg_cgcg_init));
  1293. radeon_program_register_sequence(rdev,
  1294. kalindi_golden_registers,
  1295. (const u32)ARRAY_SIZE(kalindi_golden_registers));
  1296. radeon_program_register_sequence(rdev,
  1297. kalindi_golden_common_registers,
  1298. (const u32)ARRAY_SIZE(kalindi_golden_common_registers));
  1299. radeon_program_register_sequence(rdev,
  1300. kalindi_golden_spm_registers,
  1301. (const u32)ARRAY_SIZE(kalindi_golden_spm_registers));
  1302. break;
  1303. case CHIP_KAVERI:
  1304. radeon_program_register_sequence(rdev,
  1305. spectre_mgcg_cgcg_init,
  1306. (const u32)ARRAY_SIZE(spectre_mgcg_cgcg_init));
  1307. radeon_program_register_sequence(rdev,
  1308. spectre_golden_registers,
  1309. (const u32)ARRAY_SIZE(spectre_golden_registers));
  1310. radeon_program_register_sequence(rdev,
  1311. spectre_golden_common_registers,
  1312. (const u32)ARRAY_SIZE(spectre_golden_common_registers));
  1313. radeon_program_register_sequence(rdev,
  1314. spectre_golden_spm_registers,
  1315. (const u32)ARRAY_SIZE(spectre_golden_spm_registers));
  1316. break;
  1317. default:
  1318. break;
  1319. }
  1320. }
  1321. /**
  1322. * cik_get_xclk - get the xclk
  1323. *
  1324. * @rdev: radeon_device pointer
  1325. *
  1326. * Returns the reference clock used by the gfx engine
  1327. * (CIK).
  1328. */
  1329. u32 cik_get_xclk(struct radeon_device *rdev)
  1330. {
  1331. u32 reference_clock = rdev->clock.spll.reference_freq;
  1332. if (rdev->flags & RADEON_IS_IGP) {
  1333. if (RREG32_SMC(GENERAL_PWRMGT) & GPU_COUNTER_CLK)
  1334. return reference_clock / 2;
  1335. } else {
  1336. if (RREG32_SMC(CG_CLKPIN_CNTL) & XTALIN_DIVIDE)
  1337. return reference_clock / 4;
  1338. }
  1339. return reference_clock;
  1340. }
  1341. /**
  1342. * cik_mm_rdoorbell - read a doorbell dword
  1343. *
  1344. * @rdev: radeon_device pointer
  1345. * @offset: byte offset into the aperture
  1346. *
  1347. * Returns the value in the doorbell aperture at the
  1348. * requested offset (CIK).
  1349. */
  1350. u32 cik_mm_rdoorbell(struct radeon_device *rdev, u32 offset)
  1351. {
  1352. if (offset < rdev->doorbell.size) {
  1353. return readl(((void __iomem *)rdev->doorbell.ptr) + offset);
  1354. } else {
  1355. DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", offset);
  1356. return 0;
  1357. }
  1358. }
  1359. /**
  1360. * cik_mm_wdoorbell - write a doorbell dword
  1361. *
  1362. * @rdev: radeon_device pointer
  1363. * @offset: byte offset into the aperture
  1364. * @v: value to write
  1365. *
  1366. * Writes @v to the doorbell aperture at the
  1367. * requested offset (CIK).
  1368. */
  1369. void cik_mm_wdoorbell(struct radeon_device *rdev, u32 offset, u32 v)
  1370. {
  1371. if (offset < rdev->doorbell.size) {
  1372. writel(v, ((void __iomem *)rdev->doorbell.ptr) + offset);
  1373. } else {
  1374. DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", offset);
  1375. }
  1376. }
  1377. #define BONAIRE_IO_MC_REGS_SIZE 36
  1378. static const u32 bonaire_io_mc_regs[BONAIRE_IO_MC_REGS_SIZE][2] =
  1379. {
  1380. {0x00000070, 0x04400000},
  1381. {0x00000071, 0x80c01803},
  1382. {0x00000072, 0x00004004},
  1383. {0x00000073, 0x00000100},
  1384. {0x00000074, 0x00ff0000},
  1385. {0x00000075, 0x34000000},
  1386. {0x00000076, 0x08000014},
  1387. {0x00000077, 0x00cc08ec},
  1388. {0x00000078, 0x00000400},
  1389. {0x00000079, 0x00000000},
  1390. {0x0000007a, 0x04090000},
  1391. {0x0000007c, 0x00000000},
  1392. {0x0000007e, 0x4408a8e8},
  1393. {0x0000007f, 0x00000304},
  1394. {0x00000080, 0x00000000},
  1395. {0x00000082, 0x00000001},
  1396. {0x00000083, 0x00000002},
  1397. {0x00000084, 0xf3e4f400},
  1398. {0x00000085, 0x052024e3},
  1399. {0x00000087, 0x00000000},
  1400. {0x00000088, 0x01000000},
  1401. {0x0000008a, 0x1c0a0000},
  1402. {0x0000008b, 0xff010000},
  1403. {0x0000008d, 0xffffefff},
  1404. {0x0000008e, 0xfff3efff},
  1405. {0x0000008f, 0xfff3efbf},
  1406. {0x00000092, 0xf7ffffff},
  1407. {0x00000093, 0xffffff7f},
  1408. {0x00000095, 0x00101101},
  1409. {0x00000096, 0x00000fff},
  1410. {0x00000097, 0x00116fff},
  1411. {0x00000098, 0x60010000},
  1412. {0x00000099, 0x10010000},
  1413. {0x0000009a, 0x00006000},
  1414. {0x0000009b, 0x00001000},
  1415. {0x0000009f, 0x00b48000}
  1416. };
  1417. /**
  1418. * cik_srbm_select - select specific register instances
  1419. *
  1420. * @rdev: radeon_device pointer
  1421. * @me: selected ME (micro engine)
  1422. * @pipe: pipe
  1423. * @queue: queue
  1424. * @vmid: VMID
  1425. *
  1426. * Switches the currently active registers instances. Some
  1427. * registers are instanced per VMID, others are instanced per
  1428. * me/pipe/queue combination.
  1429. */
  1430. static void cik_srbm_select(struct radeon_device *rdev,
  1431. u32 me, u32 pipe, u32 queue, u32 vmid)
  1432. {
  1433. u32 srbm_gfx_cntl = (PIPEID(pipe & 0x3) |
  1434. MEID(me & 0x3) |
  1435. VMID(vmid & 0xf) |
  1436. QUEUEID(queue & 0x7));
  1437. WREG32(SRBM_GFX_CNTL, srbm_gfx_cntl);
  1438. }
  1439. /* ucode loading */
  1440. /**
  1441. * ci_mc_load_microcode - load MC ucode into the hw
  1442. *
  1443. * @rdev: radeon_device pointer
  1444. *
  1445. * Load the GDDR MC ucode into the hw (CIK).
  1446. * Returns 0 on success, error on failure.
  1447. */
  1448. static int ci_mc_load_microcode(struct radeon_device *rdev)
  1449. {
  1450. const __be32 *fw_data;
  1451. u32 running, blackout = 0;
  1452. u32 *io_mc_regs;
  1453. int i, ucode_size, regs_size;
  1454. if (!rdev->mc_fw)
  1455. return -EINVAL;
  1456. switch (rdev->family) {
  1457. case CHIP_BONAIRE:
  1458. default:
  1459. io_mc_regs = (u32 *)&bonaire_io_mc_regs;
  1460. ucode_size = CIK_MC_UCODE_SIZE;
  1461. regs_size = BONAIRE_IO_MC_REGS_SIZE;
  1462. break;
  1463. }
  1464. running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
  1465. if (running == 0) {
  1466. if (running) {
  1467. blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
  1468. WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);
  1469. }
  1470. /* reset the engine and set to writable */
  1471. WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
  1472. WREG32(MC_SEQ_SUP_CNTL, 0x00000010);
  1473. /* load mc io regs */
  1474. for (i = 0; i < regs_size; i++) {
  1475. WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]);
  1476. WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]);
  1477. }
  1478. /* load the MC ucode */
  1479. fw_data = (const __be32 *)rdev->mc_fw->data;
  1480. for (i = 0; i < ucode_size; i++)
  1481. WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++));
  1482. /* put the engine back into the active state */
  1483. WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
  1484. WREG32(MC_SEQ_SUP_CNTL, 0x00000004);
  1485. WREG32(MC_SEQ_SUP_CNTL, 0x00000001);
  1486. /* wait for training to complete */
  1487. for (i = 0; i < rdev->usec_timeout; i++) {
  1488. if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL) & TRAIN_DONE_D0)
  1489. break;
  1490. udelay(1);
  1491. }
  1492. for (i = 0; i < rdev->usec_timeout; i++) {
  1493. if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL) & TRAIN_DONE_D1)
  1494. break;
  1495. udelay(1);
  1496. }
  1497. if (running)
  1498. WREG32(MC_SHARED_BLACKOUT_CNTL, blackout);
  1499. }
  1500. return 0;
  1501. }
  1502. /**
  1503. * cik_init_microcode - load ucode images from disk
  1504. *
  1505. * @rdev: radeon_device pointer
  1506. *
  1507. * Use the firmware interface to load the ucode images into
  1508. * the driver (not loaded into hw).
  1509. * Returns 0 on success, error on failure.
  1510. */
  1511. static int cik_init_microcode(struct radeon_device *rdev)
  1512. {
  1513. const char *chip_name;
  1514. size_t pfp_req_size, me_req_size, ce_req_size,
  1515. mec_req_size, rlc_req_size, mc_req_size,
  1516. sdma_req_size, smc_req_size;
  1517. char fw_name[30];
  1518. int err;
  1519. DRM_DEBUG("\n");
  1520. switch (rdev->family) {
  1521. case CHIP_BONAIRE:
  1522. chip_name = "BONAIRE";
  1523. pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
  1524. me_req_size = CIK_ME_UCODE_SIZE * 4;
  1525. ce_req_size = CIK_CE_UCODE_SIZE * 4;
  1526. mec_req_size = CIK_MEC_UCODE_SIZE * 4;
  1527. rlc_req_size = BONAIRE_RLC_UCODE_SIZE * 4;
  1528. mc_req_size = CIK_MC_UCODE_SIZE * 4;
  1529. sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
  1530. smc_req_size = ALIGN(BONAIRE_SMC_UCODE_SIZE, 4);
  1531. break;
  1532. case CHIP_KAVERI:
  1533. chip_name = "KAVERI";
  1534. pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
  1535. me_req_size = CIK_ME_UCODE_SIZE * 4;
  1536. ce_req_size = CIK_CE_UCODE_SIZE * 4;
  1537. mec_req_size = CIK_MEC_UCODE_SIZE * 4;
  1538. rlc_req_size = KV_RLC_UCODE_SIZE * 4;
  1539. sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
  1540. break;
  1541. case CHIP_KABINI:
  1542. chip_name = "KABINI";
  1543. pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
  1544. me_req_size = CIK_ME_UCODE_SIZE * 4;
  1545. ce_req_size = CIK_CE_UCODE_SIZE * 4;
  1546. mec_req_size = CIK_MEC_UCODE_SIZE * 4;
  1547. rlc_req_size = KB_RLC_UCODE_SIZE * 4;
  1548. sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
  1549. break;
  1550. default: BUG();
  1551. }
  1552. DRM_INFO("Loading %s Microcode\n", chip_name);
  1553. snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
  1554. err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
  1555. if (err)
  1556. goto out;
  1557. if (rdev->pfp_fw->size != pfp_req_size) {
  1558. printk(KERN_ERR
  1559. "cik_cp: Bogus length %zu in firmware \"%s\"\n",
  1560. rdev->pfp_fw->size, fw_name);
  1561. err = -EINVAL;
  1562. goto out;
  1563. }
  1564. snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
  1565. err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
  1566. if (err)
  1567. goto out;
  1568. if (rdev->me_fw->size != me_req_size) {
  1569. printk(KERN_ERR
  1570. "cik_cp: Bogus length %zu in firmware \"%s\"\n",
  1571. rdev->me_fw->size, fw_name);
  1572. err = -EINVAL;
  1573. }
  1574. snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name);
  1575. err = request_firmware(&rdev->ce_fw, fw_name, rdev->dev);
  1576. if (err)
  1577. goto out;
  1578. if (rdev->ce_fw->size != ce_req_size) {
  1579. printk(KERN_ERR
  1580. "cik_cp: Bogus length %zu in firmware \"%s\"\n",
  1581. rdev->ce_fw->size, fw_name);
  1582. err = -EINVAL;
  1583. }
  1584. snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec.bin", chip_name);
  1585. err = request_firmware(&rdev->mec_fw, fw_name, rdev->dev);
  1586. if (err)
  1587. goto out;
  1588. if (rdev->mec_fw->size != mec_req_size) {
  1589. printk(KERN_ERR
  1590. "cik_cp: Bogus length %zu in firmware \"%s\"\n",
  1591. rdev->mec_fw->size, fw_name);
  1592. err = -EINVAL;
  1593. }
  1594. snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", chip_name);
  1595. err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
  1596. if (err)
  1597. goto out;
  1598. if (rdev->rlc_fw->size != rlc_req_size) {
  1599. printk(KERN_ERR
  1600. "cik_rlc: Bogus length %zu in firmware \"%s\"\n",
  1601. rdev->rlc_fw->size, fw_name);
  1602. err = -EINVAL;
  1603. }
  1604. snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma.bin", chip_name);
  1605. err = request_firmware(&rdev->sdma_fw, fw_name, rdev->dev);
  1606. if (err)
  1607. goto out;
  1608. if (rdev->sdma_fw->size != sdma_req_size) {
  1609. printk(KERN_ERR
  1610. "cik_sdma: Bogus length %zu in firmware \"%s\"\n",
  1611. rdev->sdma_fw->size, fw_name);
  1612. err = -EINVAL;
  1613. }
  1614. /* No SMC, MC ucode on APUs */
  1615. if (!(rdev->flags & RADEON_IS_IGP)) {
  1616. snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
  1617. err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
  1618. if (err)
  1619. goto out;
  1620. if (rdev->mc_fw->size != mc_req_size) {
  1621. printk(KERN_ERR
  1622. "cik_mc: Bogus length %zu in firmware \"%s\"\n",
  1623. rdev->mc_fw->size, fw_name);
  1624. err = -EINVAL;
  1625. }
  1626. snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
  1627. err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
  1628. if (err) {
  1629. printk(KERN_ERR
  1630. "smc: error loading firmware \"%s\"\n",
  1631. fw_name);
  1632. release_firmware(rdev->smc_fw);
  1633. rdev->smc_fw = NULL;
  1634. } else if (rdev->smc_fw->size != smc_req_size) {
  1635. printk(KERN_ERR
  1636. "cik_smc: Bogus length %zu in firmware \"%s\"\n",
  1637. rdev->smc_fw->size, fw_name);
  1638. err = -EINVAL;
  1639. }
  1640. }
  1641. out:
  1642. if (err) {
  1643. if (err != -EINVAL)
  1644. printk(KERN_ERR
  1645. "cik_cp: Failed to load firmware \"%s\"\n",
  1646. fw_name);
  1647. release_firmware(rdev->pfp_fw);
  1648. rdev->pfp_fw = NULL;
  1649. release_firmware(rdev->me_fw);
  1650. rdev->me_fw = NULL;
  1651. release_firmware(rdev->ce_fw);
  1652. rdev->ce_fw = NULL;
  1653. release_firmware(rdev->rlc_fw);
  1654. rdev->rlc_fw = NULL;
  1655. release_firmware(rdev->mc_fw);
  1656. rdev->mc_fw = NULL;
  1657. release_firmware(rdev->smc_fw);
  1658. rdev->smc_fw = NULL;
  1659. }
  1660. return err;
  1661. }
  1662. /*
  1663. * Core functions
  1664. */
  1665. /**
  1666. * cik_tiling_mode_table_init - init the hw tiling table
  1667. *
  1668. * @rdev: radeon_device pointer
  1669. *
  1670. * Starting with SI, the tiling setup is done globally in a
  1671. * set of 32 tiling modes. Rather than selecting each set of
  1672. * parameters per surface as on older asics, we just select
  1673. * which index in the tiling table we want to use, and the
  1674. * surface uses those parameters (CIK).
  1675. */
  1676. static void cik_tiling_mode_table_init(struct radeon_device *rdev)
  1677. {
  1678. const u32 num_tile_mode_states = 32;
  1679. const u32 num_secondary_tile_mode_states = 16;
  1680. u32 reg_offset, gb_tile_moden, split_equal_to_row_size;
  1681. u32 num_pipe_configs;
  1682. u32 num_rbs = rdev->config.cik.max_backends_per_se *
  1683. rdev->config.cik.max_shader_engines;
  1684. switch (rdev->config.cik.mem_row_size_in_kb) {
  1685. case 1:
  1686. split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_1KB;
  1687. break;
  1688. case 2:
  1689. default:
  1690. split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_2KB;
  1691. break;
  1692. case 4:
  1693. split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_4KB;
  1694. break;
  1695. }
  1696. num_pipe_configs = rdev->config.cik.max_tile_pipes;
  1697. if (num_pipe_configs > 8)
  1698. num_pipe_configs = 8; /* ??? */
  1699. if (num_pipe_configs == 8) {
  1700. for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
  1701. switch (reg_offset) {
  1702. case 0:
  1703. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  1704. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  1705. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  1706. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
  1707. break;
  1708. case 1:
  1709. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  1710. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  1711. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  1712. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
  1713. break;
  1714. case 2:
  1715. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  1716. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  1717. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  1718. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
  1719. break;
  1720. case 3:
  1721. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  1722. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  1723. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  1724. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
  1725. break;
  1726. case 4:
  1727. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  1728. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  1729. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  1730. TILE_SPLIT(split_equal_to_row_size));
  1731. break;
  1732. case 5:
  1733. gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  1734. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
  1735. break;
  1736. case 6:
  1737. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  1738. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  1739. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  1740. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
  1741. break;
  1742. case 7:
  1743. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  1744. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  1745. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  1746. TILE_SPLIT(split_equal_to_row_size));
  1747. break;
  1748. case 8:
  1749. gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
  1750. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16));
  1751. break;
  1752. case 9:
  1753. gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  1754. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
  1755. break;
  1756. case 10:
  1757. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  1758. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
  1759. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  1760. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  1761. break;
  1762. case 11:
  1763. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  1764. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
  1765. PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
  1766. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  1767. break;
  1768. case 12:
  1769. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  1770. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
  1771. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  1772. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  1773. break;
  1774. case 13:
  1775. gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  1776. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
  1777. break;
  1778. case 14:
  1779. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  1780. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  1781. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  1782. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  1783. break;
  1784. case 16:
  1785. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  1786. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  1787. PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
  1788. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  1789. break;
  1790. case 17:
  1791. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  1792. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  1793. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  1794. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  1795. break;
  1796. case 27:
  1797. gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  1798. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
  1799. break;
  1800. case 28:
  1801. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  1802. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
  1803. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  1804. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  1805. break;
  1806. case 29:
  1807. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  1808. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
  1809. PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
  1810. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  1811. break;
  1812. case 30:
  1813. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  1814. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
  1815. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  1816. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  1817. break;
  1818. default:
  1819. gb_tile_moden = 0;
  1820. break;
  1821. }
  1822. rdev->config.cik.tile_mode_array[reg_offset] = gb_tile_moden;
  1823. WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
  1824. }
  1825. for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
  1826. switch (reg_offset) {
  1827. case 0:
  1828. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  1829. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
  1830. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  1831. NUM_BANKS(ADDR_SURF_16_BANK));
  1832. break;
  1833. case 1:
  1834. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  1835. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
  1836. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  1837. NUM_BANKS(ADDR_SURF_16_BANK));
  1838. break;
  1839. case 2:
  1840. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  1841. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  1842. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  1843. NUM_BANKS(ADDR_SURF_16_BANK));
  1844. break;
  1845. case 3:
  1846. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  1847. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  1848. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  1849. NUM_BANKS(ADDR_SURF_16_BANK));
  1850. break;
  1851. case 4:
  1852. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  1853. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  1854. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
  1855. NUM_BANKS(ADDR_SURF_8_BANK));
  1856. break;
  1857. case 5:
  1858. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  1859. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  1860. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
  1861. NUM_BANKS(ADDR_SURF_4_BANK));
  1862. break;
  1863. case 6:
  1864. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  1865. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  1866. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
  1867. NUM_BANKS(ADDR_SURF_2_BANK));
  1868. break;
  1869. case 8:
  1870. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  1871. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
  1872. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  1873. NUM_BANKS(ADDR_SURF_16_BANK));
  1874. break;
  1875. case 9:
  1876. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  1877. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
  1878. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  1879. NUM_BANKS(ADDR_SURF_16_BANK));
  1880. break;
  1881. case 10:
  1882. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  1883. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
  1884. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  1885. NUM_BANKS(ADDR_SURF_16_BANK));
  1886. break;
  1887. case 11:
  1888. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  1889. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  1890. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  1891. NUM_BANKS(ADDR_SURF_16_BANK));
  1892. break;
  1893. case 12:
  1894. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  1895. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  1896. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
  1897. NUM_BANKS(ADDR_SURF_8_BANK));
  1898. break;
  1899. case 13:
  1900. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  1901. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  1902. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
  1903. NUM_BANKS(ADDR_SURF_4_BANK));
  1904. break;
  1905. case 14:
  1906. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  1907. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  1908. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
  1909. NUM_BANKS(ADDR_SURF_2_BANK));
  1910. break;
  1911. default:
  1912. gb_tile_moden = 0;
  1913. break;
  1914. }
  1915. WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
  1916. }
  1917. } else if (num_pipe_configs == 4) {
  1918. if (num_rbs == 4) {
  1919. for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
  1920. switch (reg_offset) {
  1921. case 0:
  1922. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  1923. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  1924. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  1925. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
  1926. break;
  1927. case 1:
  1928. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  1929. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  1930. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  1931. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
  1932. break;
  1933. case 2:
  1934. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  1935. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  1936. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  1937. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
  1938. break;
  1939. case 3:
  1940. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  1941. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  1942. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  1943. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
  1944. break;
  1945. case 4:
  1946. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  1947. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  1948. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  1949. TILE_SPLIT(split_equal_to_row_size));
  1950. break;
  1951. case 5:
  1952. gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  1953. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
  1954. break;
  1955. case 6:
  1956. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  1957. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  1958. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  1959. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
  1960. break;
  1961. case 7:
  1962. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  1963. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  1964. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  1965. TILE_SPLIT(split_equal_to_row_size));
  1966. break;
  1967. case 8:
  1968. gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
  1969. PIPE_CONFIG(ADDR_SURF_P4_16x16));
  1970. break;
  1971. case 9:
  1972. gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  1973. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
  1974. break;
  1975. case 10:
  1976. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  1977. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
  1978. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  1979. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  1980. break;
  1981. case 11:
  1982. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  1983. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
  1984. PIPE_CONFIG(ADDR_SURF_P4_8x16) |
  1985. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  1986. break;
  1987. case 12:
  1988. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  1989. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
  1990. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  1991. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  1992. break;
  1993. case 13:
  1994. gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  1995. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
  1996. break;
  1997. case 14:
  1998. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  1999. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  2000. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  2001. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2002. break;
  2003. case 16:
  2004. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  2005. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  2006. PIPE_CONFIG(ADDR_SURF_P4_8x16) |
  2007. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2008. break;
  2009. case 17:
  2010. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  2011. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  2012. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  2013. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2014. break;
  2015. case 27:
  2016. gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  2017. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
  2018. break;
  2019. case 28:
  2020. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  2021. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
  2022. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  2023. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2024. break;
  2025. case 29:
  2026. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  2027. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
  2028. PIPE_CONFIG(ADDR_SURF_P4_8x16) |
  2029. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2030. break;
  2031. case 30:
  2032. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  2033. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
  2034. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  2035. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2036. break;
  2037. default:
  2038. gb_tile_moden = 0;
  2039. break;
  2040. }
  2041. rdev->config.cik.tile_mode_array[reg_offset] = gb_tile_moden;
  2042. WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
  2043. }
  2044. } else if (num_rbs < 4) {
  2045. for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
  2046. switch (reg_offset) {
  2047. case 0:
  2048. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2049. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2050. PIPE_CONFIG(ADDR_SURF_P4_8x16) |
  2051. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
  2052. break;
  2053. case 1:
  2054. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2055. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2056. PIPE_CONFIG(ADDR_SURF_P4_8x16) |
  2057. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
  2058. break;
  2059. case 2:
  2060. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2061. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2062. PIPE_CONFIG(ADDR_SURF_P4_8x16) |
  2063. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
  2064. break;
  2065. case 3:
  2066. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2067. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2068. PIPE_CONFIG(ADDR_SURF_P4_8x16) |
  2069. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
  2070. break;
  2071. case 4:
  2072. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2073. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2074. PIPE_CONFIG(ADDR_SURF_P4_8x16) |
  2075. TILE_SPLIT(split_equal_to_row_size));
  2076. break;
  2077. case 5:
  2078. gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  2079. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
  2080. break;
  2081. case 6:
  2082. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  2083. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2084. PIPE_CONFIG(ADDR_SURF_P4_8x16) |
  2085. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
  2086. break;
  2087. case 7:
  2088. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  2089. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2090. PIPE_CONFIG(ADDR_SURF_P4_8x16) |
  2091. TILE_SPLIT(split_equal_to_row_size));
  2092. break;
  2093. case 8:
  2094. gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
  2095. PIPE_CONFIG(ADDR_SURF_P4_8x16));
  2096. break;
  2097. case 9:
  2098. gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  2099. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
  2100. break;
  2101. case 10:
  2102. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2103. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
  2104. PIPE_CONFIG(ADDR_SURF_P4_8x16) |
  2105. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2106. break;
  2107. case 11:
  2108. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  2109. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
  2110. PIPE_CONFIG(ADDR_SURF_P4_8x16) |
  2111. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2112. break;
  2113. case 12:
  2114. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  2115. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
  2116. PIPE_CONFIG(ADDR_SURF_P4_8x16) |
  2117. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2118. break;
  2119. case 13:
  2120. gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  2121. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
  2122. break;
  2123. case 14:
  2124. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2125. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  2126. PIPE_CONFIG(ADDR_SURF_P4_8x16) |
  2127. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2128. break;
  2129. case 16:
  2130. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  2131. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  2132. PIPE_CONFIG(ADDR_SURF_P4_8x16) |
  2133. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2134. break;
  2135. case 17:
  2136. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  2137. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  2138. PIPE_CONFIG(ADDR_SURF_P4_8x16) |
  2139. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2140. break;
  2141. case 27:
  2142. gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  2143. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
  2144. break;
  2145. case 28:
  2146. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  2147. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
  2148. PIPE_CONFIG(ADDR_SURF_P4_8x16) |
  2149. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2150. break;
  2151. case 29:
  2152. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  2153. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
  2154. PIPE_CONFIG(ADDR_SURF_P4_8x16) |
  2155. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2156. break;
  2157. case 30:
  2158. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  2159. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
  2160. PIPE_CONFIG(ADDR_SURF_P4_8x16) |
  2161. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2162. break;
  2163. default:
  2164. gb_tile_moden = 0;
  2165. break;
  2166. }
  2167. rdev->config.cik.tile_mode_array[reg_offset] = gb_tile_moden;
  2168. WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
  2169. }
  2170. }
  2171. for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
  2172. switch (reg_offset) {
  2173. case 0:
  2174. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2175. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
  2176. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2177. NUM_BANKS(ADDR_SURF_16_BANK));
  2178. break;
  2179. case 1:
  2180. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2181. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
  2182. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2183. NUM_BANKS(ADDR_SURF_16_BANK));
  2184. break;
  2185. case 2:
  2186. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2187. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2188. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  2189. NUM_BANKS(ADDR_SURF_16_BANK));
  2190. break;
  2191. case 3:
  2192. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2193. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2194. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  2195. NUM_BANKS(ADDR_SURF_16_BANK));
  2196. break;
  2197. case 4:
  2198. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2199. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2200. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  2201. NUM_BANKS(ADDR_SURF_16_BANK));
  2202. break;
  2203. case 5:
  2204. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2205. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2206. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  2207. NUM_BANKS(ADDR_SURF_8_BANK));
  2208. break;
  2209. case 6:
  2210. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2211. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2212. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
  2213. NUM_BANKS(ADDR_SURF_4_BANK));
  2214. break;
  2215. case 8:
  2216. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
  2217. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
  2218. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2219. NUM_BANKS(ADDR_SURF_16_BANK));
  2220. break;
  2221. case 9:
  2222. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
  2223. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
  2224. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2225. NUM_BANKS(ADDR_SURF_16_BANK));
  2226. break;
  2227. case 10:
  2228. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2229. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
  2230. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2231. NUM_BANKS(ADDR_SURF_16_BANK));
  2232. break;
  2233. case 11:
  2234. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2235. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
  2236. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2237. NUM_BANKS(ADDR_SURF_16_BANK));
  2238. break;
  2239. case 12:
  2240. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2241. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2242. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  2243. NUM_BANKS(ADDR_SURF_16_BANK));
  2244. break;
  2245. case 13:
  2246. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2247. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2248. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  2249. NUM_BANKS(ADDR_SURF_8_BANK));
  2250. break;
  2251. case 14:
  2252. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2253. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2254. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
  2255. NUM_BANKS(ADDR_SURF_4_BANK));
  2256. break;
  2257. default:
  2258. gb_tile_moden = 0;
  2259. break;
  2260. }
  2261. WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
  2262. }
  2263. } else if (num_pipe_configs == 2) {
  2264. for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
  2265. switch (reg_offset) {
  2266. case 0:
  2267. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2268. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2269. PIPE_CONFIG(ADDR_SURF_P2) |
  2270. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
  2271. break;
  2272. case 1:
  2273. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2274. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2275. PIPE_CONFIG(ADDR_SURF_P2) |
  2276. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
  2277. break;
  2278. case 2:
  2279. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2280. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2281. PIPE_CONFIG(ADDR_SURF_P2) |
  2282. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
  2283. break;
  2284. case 3:
  2285. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2286. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2287. PIPE_CONFIG(ADDR_SURF_P2) |
  2288. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
  2289. break;
  2290. case 4:
  2291. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2292. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2293. PIPE_CONFIG(ADDR_SURF_P2) |
  2294. TILE_SPLIT(split_equal_to_row_size));
  2295. break;
  2296. case 5:
  2297. gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  2298. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
  2299. break;
  2300. case 6:
  2301. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  2302. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2303. PIPE_CONFIG(ADDR_SURF_P2) |
  2304. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
  2305. break;
  2306. case 7:
  2307. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  2308. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2309. PIPE_CONFIG(ADDR_SURF_P2) |
  2310. TILE_SPLIT(split_equal_to_row_size));
  2311. break;
  2312. case 8:
  2313. gb_tile_moden = ARRAY_MODE(ARRAY_LINEAR_ALIGNED);
  2314. break;
  2315. case 9:
  2316. gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  2317. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
  2318. break;
  2319. case 10:
  2320. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2321. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
  2322. PIPE_CONFIG(ADDR_SURF_P2) |
  2323. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2324. break;
  2325. case 11:
  2326. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  2327. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
  2328. PIPE_CONFIG(ADDR_SURF_P2) |
  2329. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2330. break;
  2331. case 12:
  2332. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  2333. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
  2334. PIPE_CONFIG(ADDR_SURF_P2) |
  2335. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2336. break;
  2337. case 13:
  2338. gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  2339. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
  2340. break;
  2341. case 14:
  2342. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2343. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  2344. PIPE_CONFIG(ADDR_SURF_P2) |
  2345. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2346. break;
  2347. case 16:
  2348. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  2349. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  2350. PIPE_CONFIG(ADDR_SURF_P2) |
  2351. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2352. break;
  2353. case 17:
  2354. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  2355. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  2356. PIPE_CONFIG(ADDR_SURF_P2) |
  2357. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2358. break;
  2359. case 27:
  2360. gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  2361. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
  2362. break;
  2363. case 28:
  2364. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  2365. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
  2366. PIPE_CONFIG(ADDR_SURF_P2) |
  2367. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2368. break;
  2369. case 29:
  2370. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  2371. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
  2372. PIPE_CONFIG(ADDR_SURF_P2) |
  2373. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2374. break;
  2375. case 30:
  2376. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  2377. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
  2378. PIPE_CONFIG(ADDR_SURF_P2) |
  2379. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2380. break;
  2381. default:
  2382. gb_tile_moden = 0;
  2383. break;
  2384. }
  2385. rdev->config.cik.tile_mode_array[reg_offset] = gb_tile_moden;
  2386. WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
  2387. }
  2388. for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
  2389. switch (reg_offset) {
  2390. case 0:
  2391. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
  2392. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
  2393. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2394. NUM_BANKS(ADDR_SURF_16_BANK));
  2395. break;
  2396. case 1:
  2397. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
  2398. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
  2399. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2400. NUM_BANKS(ADDR_SURF_16_BANK));
  2401. break;
  2402. case 2:
  2403. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2404. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
  2405. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2406. NUM_BANKS(ADDR_SURF_16_BANK));
  2407. break;
  2408. case 3:
  2409. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2410. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2411. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2412. NUM_BANKS(ADDR_SURF_16_BANK));
  2413. break;
  2414. case 4:
  2415. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2416. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2417. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2418. NUM_BANKS(ADDR_SURF_16_BANK));
  2419. break;
  2420. case 5:
  2421. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2422. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2423. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2424. NUM_BANKS(ADDR_SURF_16_BANK));
  2425. break;
  2426. case 6:
  2427. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2428. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2429. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  2430. NUM_BANKS(ADDR_SURF_8_BANK));
  2431. break;
  2432. case 8:
  2433. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
  2434. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
  2435. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2436. NUM_BANKS(ADDR_SURF_16_BANK));
  2437. break;
  2438. case 9:
  2439. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
  2440. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
  2441. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2442. NUM_BANKS(ADDR_SURF_16_BANK));
  2443. break;
  2444. case 10:
  2445. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
  2446. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
  2447. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2448. NUM_BANKS(ADDR_SURF_16_BANK));
  2449. break;
  2450. case 11:
  2451. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
  2452. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
  2453. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2454. NUM_BANKS(ADDR_SURF_16_BANK));
  2455. break;
  2456. case 12:
  2457. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2458. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
  2459. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2460. NUM_BANKS(ADDR_SURF_16_BANK));
  2461. break;
  2462. case 13:
  2463. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2464. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2465. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2466. NUM_BANKS(ADDR_SURF_16_BANK));
  2467. break;
  2468. case 14:
  2469. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2470. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2471. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  2472. NUM_BANKS(ADDR_SURF_8_BANK));
  2473. break;
  2474. default:
  2475. gb_tile_moden = 0;
  2476. break;
  2477. }
  2478. WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
  2479. }
  2480. } else
  2481. DRM_ERROR("unknown num pipe config: 0x%x\n", num_pipe_configs);
  2482. }
  2483. /**
  2484. * cik_select_se_sh - select which SE, SH to address
  2485. *
  2486. * @rdev: radeon_device pointer
  2487. * @se_num: shader engine to address
  2488. * @sh_num: sh block to address
  2489. *
  2490. * Select which SE, SH combinations to address. Certain
  2491. * registers are instanced per SE or SH. 0xffffffff means
  2492. * broadcast to all SEs or SHs (CIK).
  2493. */
  2494. static void cik_select_se_sh(struct radeon_device *rdev,
  2495. u32 se_num, u32 sh_num)
  2496. {
  2497. u32 data = INSTANCE_BROADCAST_WRITES;
  2498. if ((se_num == 0xffffffff) && (sh_num == 0xffffffff))
  2499. data |= SH_BROADCAST_WRITES | SE_BROADCAST_WRITES;
  2500. else if (se_num == 0xffffffff)
  2501. data |= SE_BROADCAST_WRITES | SH_INDEX(sh_num);
  2502. else if (sh_num == 0xffffffff)
  2503. data |= SH_BROADCAST_WRITES | SE_INDEX(se_num);
  2504. else
  2505. data |= SH_INDEX(sh_num) | SE_INDEX(se_num);
  2506. WREG32(GRBM_GFX_INDEX, data);
  2507. }
  2508. /**
  2509. * cik_create_bitmask - create a bitmask
  2510. *
  2511. * @bit_width: length of the mask
  2512. *
  2513. * create a variable length bit mask (CIK).
  2514. * Returns the bitmask.
  2515. */
  2516. static u32 cik_create_bitmask(u32 bit_width)
  2517. {
  2518. u32 i, mask = 0;
  2519. for (i = 0; i < bit_width; i++) {
  2520. mask <<= 1;
  2521. mask |= 1;
  2522. }
  2523. return mask;
  2524. }
  2525. /**
  2526. * cik_select_se_sh - select which SE, SH to address
  2527. *
  2528. * @rdev: radeon_device pointer
  2529. * @max_rb_num: max RBs (render backends) for the asic
  2530. * @se_num: number of SEs (shader engines) for the asic
  2531. * @sh_per_se: number of SH blocks per SE for the asic
  2532. *
  2533. * Calculates the bitmask of disabled RBs (CIK).
  2534. * Returns the disabled RB bitmask.
  2535. */
  2536. static u32 cik_get_rb_disabled(struct radeon_device *rdev,
  2537. u32 max_rb_num, u32 se_num,
  2538. u32 sh_per_se)
  2539. {
  2540. u32 data, mask;
  2541. data = RREG32(CC_RB_BACKEND_DISABLE);
  2542. if (data & 1)
  2543. data &= BACKEND_DISABLE_MASK;
  2544. else
  2545. data = 0;
  2546. data |= RREG32(GC_USER_RB_BACKEND_DISABLE);
  2547. data >>= BACKEND_DISABLE_SHIFT;
  2548. mask = cik_create_bitmask(max_rb_num / se_num / sh_per_se);
  2549. return data & mask;
  2550. }
  2551. /**
  2552. * cik_setup_rb - setup the RBs on the asic
  2553. *
  2554. * @rdev: radeon_device pointer
  2555. * @se_num: number of SEs (shader engines) for the asic
  2556. * @sh_per_se: number of SH blocks per SE for the asic
  2557. * @max_rb_num: max RBs (render backends) for the asic
  2558. *
  2559. * Configures per-SE/SH RB registers (CIK).
  2560. */
  2561. static void cik_setup_rb(struct radeon_device *rdev,
  2562. u32 se_num, u32 sh_per_se,
  2563. u32 max_rb_num)
  2564. {
  2565. int i, j;
  2566. u32 data, mask;
  2567. u32 disabled_rbs = 0;
  2568. u32 enabled_rbs = 0;
  2569. for (i = 0; i < se_num; i++) {
  2570. for (j = 0; j < sh_per_se; j++) {
  2571. cik_select_se_sh(rdev, i, j);
  2572. data = cik_get_rb_disabled(rdev, max_rb_num, se_num, sh_per_se);
  2573. disabled_rbs |= data << ((i * sh_per_se + j) * CIK_RB_BITMAP_WIDTH_PER_SH);
  2574. }
  2575. }
  2576. cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
  2577. mask = 1;
  2578. for (i = 0; i < max_rb_num; i++) {
  2579. if (!(disabled_rbs & mask))
  2580. enabled_rbs |= mask;
  2581. mask <<= 1;
  2582. }
  2583. for (i = 0; i < se_num; i++) {
  2584. cik_select_se_sh(rdev, i, 0xffffffff);
  2585. data = 0;
  2586. for (j = 0; j < sh_per_se; j++) {
  2587. switch (enabled_rbs & 3) {
  2588. case 1:
  2589. data |= (RASTER_CONFIG_RB_MAP_0 << (i * sh_per_se + j) * 2);
  2590. break;
  2591. case 2:
  2592. data |= (RASTER_CONFIG_RB_MAP_3 << (i * sh_per_se + j) * 2);
  2593. break;
  2594. case 3:
  2595. default:
  2596. data |= (RASTER_CONFIG_RB_MAP_2 << (i * sh_per_se + j) * 2);
  2597. break;
  2598. }
  2599. enabled_rbs >>= 2;
  2600. }
  2601. WREG32(PA_SC_RASTER_CONFIG, data);
  2602. }
  2603. cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
  2604. }
  2605. /**
  2606. * cik_gpu_init - setup the 3D engine
  2607. *
  2608. * @rdev: radeon_device pointer
  2609. *
  2610. * Configures the 3D engine and tiling configuration
  2611. * registers so that the 3D engine is usable.
  2612. */
  2613. static void cik_gpu_init(struct radeon_device *rdev)
  2614. {
  2615. u32 gb_addr_config = RREG32(GB_ADDR_CONFIG);
  2616. u32 mc_shared_chmap, mc_arb_ramcfg;
  2617. u32 hdp_host_path_cntl;
  2618. u32 tmp;
  2619. int i, j;
  2620. switch (rdev->family) {
  2621. case CHIP_BONAIRE:
  2622. rdev->config.cik.max_shader_engines = 2;
  2623. rdev->config.cik.max_tile_pipes = 4;
  2624. rdev->config.cik.max_cu_per_sh = 7;
  2625. rdev->config.cik.max_sh_per_se = 1;
  2626. rdev->config.cik.max_backends_per_se = 2;
  2627. rdev->config.cik.max_texture_channel_caches = 4;
  2628. rdev->config.cik.max_gprs = 256;
  2629. rdev->config.cik.max_gs_threads = 32;
  2630. rdev->config.cik.max_hw_contexts = 8;
  2631. rdev->config.cik.sc_prim_fifo_size_frontend = 0x20;
  2632. rdev->config.cik.sc_prim_fifo_size_backend = 0x100;
  2633. rdev->config.cik.sc_hiz_tile_fifo_size = 0x30;
  2634. rdev->config.cik.sc_earlyz_tile_fifo_size = 0x130;
  2635. gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
  2636. break;
  2637. case CHIP_KAVERI:
  2638. rdev->config.cik.max_shader_engines = 1;
  2639. rdev->config.cik.max_tile_pipes = 4;
  2640. if ((rdev->pdev->device == 0x1304) ||
  2641. (rdev->pdev->device == 0x1305) ||
  2642. (rdev->pdev->device == 0x130C) ||
  2643. (rdev->pdev->device == 0x130F) ||
  2644. (rdev->pdev->device == 0x1310) ||
  2645. (rdev->pdev->device == 0x1311) ||
  2646. (rdev->pdev->device == 0x131C)) {
  2647. rdev->config.cik.max_cu_per_sh = 8;
  2648. rdev->config.cik.max_backends_per_se = 2;
  2649. } else if ((rdev->pdev->device == 0x1309) ||
  2650. (rdev->pdev->device == 0x130A) ||
  2651. (rdev->pdev->device == 0x130D) ||
  2652. (rdev->pdev->device == 0x1313) ||
  2653. (rdev->pdev->device == 0x131D)) {
  2654. rdev->config.cik.max_cu_per_sh = 6;
  2655. rdev->config.cik.max_backends_per_se = 2;
  2656. } else if ((rdev->pdev->device == 0x1306) ||
  2657. (rdev->pdev->device == 0x1307) ||
  2658. (rdev->pdev->device == 0x130B) ||
  2659. (rdev->pdev->device == 0x130E) ||
  2660. (rdev->pdev->device == 0x1315) ||
  2661. (rdev->pdev->device == 0x131B)) {
  2662. rdev->config.cik.max_cu_per_sh = 4;
  2663. rdev->config.cik.max_backends_per_se = 1;
  2664. } else {
  2665. rdev->config.cik.max_cu_per_sh = 3;
  2666. rdev->config.cik.max_backends_per_se = 1;
  2667. }
  2668. rdev->config.cik.max_sh_per_se = 1;
  2669. rdev->config.cik.max_texture_channel_caches = 4;
  2670. rdev->config.cik.max_gprs = 256;
  2671. rdev->config.cik.max_gs_threads = 16;
  2672. rdev->config.cik.max_hw_contexts = 8;
  2673. rdev->config.cik.sc_prim_fifo_size_frontend = 0x20;
  2674. rdev->config.cik.sc_prim_fifo_size_backend = 0x100;
  2675. rdev->config.cik.sc_hiz_tile_fifo_size = 0x30;
  2676. rdev->config.cik.sc_earlyz_tile_fifo_size = 0x130;
  2677. gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
  2678. break;
  2679. case CHIP_KABINI:
  2680. default:
  2681. rdev->config.cik.max_shader_engines = 1;
  2682. rdev->config.cik.max_tile_pipes = 2;
  2683. rdev->config.cik.max_cu_per_sh = 2;
  2684. rdev->config.cik.max_sh_per_se = 1;
  2685. rdev->config.cik.max_backends_per_se = 1;
  2686. rdev->config.cik.max_texture_channel_caches = 2;
  2687. rdev->config.cik.max_gprs = 256;
  2688. rdev->config.cik.max_gs_threads = 16;
  2689. rdev->config.cik.max_hw_contexts = 8;
  2690. rdev->config.cik.sc_prim_fifo_size_frontend = 0x20;
  2691. rdev->config.cik.sc_prim_fifo_size_backend = 0x100;
  2692. rdev->config.cik.sc_hiz_tile_fifo_size = 0x30;
  2693. rdev->config.cik.sc_earlyz_tile_fifo_size = 0x130;
  2694. gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
  2695. break;
  2696. }
  2697. /* Initialize HDP */
  2698. for (i = 0, j = 0; i < 32; i++, j += 0x18) {
  2699. WREG32((0x2c14 + j), 0x00000000);
  2700. WREG32((0x2c18 + j), 0x00000000);
  2701. WREG32((0x2c1c + j), 0x00000000);
  2702. WREG32((0x2c20 + j), 0x00000000);
  2703. WREG32((0x2c24 + j), 0x00000000);
  2704. }
  2705. WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
  2706. WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN);
  2707. mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
  2708. mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
  2709. rdev->config.cik.num_tile_pipes = rdev->config.cik.max_tile_pipes;
  2710. rdev->config.cik.mem_max_burst_length_bytes = 256;
  2711. tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT;
  2712. rdev->config.cik.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
  2713. if (rdev->config.cik.mem_row_size_in_kb > 4)
  2714. rdev->config.cik.mem_row_size_in_kb = 4;
  2715. /* XXX use MC settings? */
  2716. rdev->config.cik.shader_engine_tile_size = 32;
  2717. rdev->config.cik.num_gpus = 1;
  2718. rdev->config.cik.multi_gpu_tile_size = 64;
  2719. /* fix up row size */
  2720. gb_addr_config &= ~ROW_SIZE_MASK;
  2721. switch (rdev->config.cik.mem_row_size_in_kb) {
  2722. case 1:
  2723. default:
  2724. gb_addr_config |= ROW_SIZE(0);
  2725. break;
  2726. case 2:
  2727. gb_addr_config |= ROW_SIZE(1);
  2728. break;
  2729. case 4:
  2730. gb_addr_config |= ROW_SIZE(2);
  2731. break;
  2732. }
  2733. /* setup tiling info dword. gb_addr_config is not adequate since it does
  2734. * not have bank info, so create a custom tiling dword.
  2735. * bits 3:0 num_pipes
  2736. * bits 7:4 num_banks
  2737. * bits 11:8 group_size
  2738. * bits 15:12 row_size
  2739. */
  2740. rdev->config.cik.tile_config = 0;
  2741. switch (rdev->config.cik.num_tile_pipes) {
  2742. case 1:
  2743. rdev->config.cik.tile_config |= (0 << 0);
  2744. break;
  2745. case 2:
  2746. rdev->config.cik.tile_config |= (1 << 0);
  2747. break;
  2748. case 4:
  2749. rdev->config.cik.tile_config |= (2 << 0);
  2750. break;
  2751. case 8:
  2752. default:
  2753. /* XXX what about 12? */
  2754. rdev->config.cik.tile_config |= (3 << 0);
  2755. break;
  2756. }
  2757. rdev->config.cik.tile_config |=
  2758. ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4;
  2759. rdev->config.cik.tile_config |=
  2760. ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
  2761. rdev->config.cik.tile_config |=
  2762. ((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12;
  2763. WREG32(GB_ADDR_CONFIG, gb_addr_config);
  2764. WREG32(HDP_ADDR_CONFIG, gb_addr_config);
  2765. WREG32(DMIF_ADDR_CALC, gb_addr_config);
  2766. WREG32(SDMA0_TILING_CONFIG + SDMA0_REGISTER_OFFSET, gb_addr_config & 0x70);
  2767. WREG32(SDMA0_TILING_CONFIG + SDMA1_REGISTER_OFFSET, gb_addr_config & 0x70);
  2768. WREG32(UVD_UDEC_ADDR_CONFIG, gb_addr_config);
  2769. WREG32(UVD_UDEC_DB_ADDR_CONFIG, gb_addr_config);
  2770. WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config);
  2771. cik_tiling_mode_table_init(rdev);
  2772. cik_setup_rb(rdev, rdev->config.cik.max_shader_engines,
  2773. rdev->config.cik.max_sh_per_se,
  2774. rdev->config.cik.max_backends_per_se);
  2775. /* set HW defaults for 3D engine */
  2776. WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60));
  2777. WREG32(SX_DEBUG_1, 0x20);
  2778. WREG32(TA_CNTL_AUX, 0x00010000);
  2779. tmp = RREG32(SPI_CONFIG_CNTL);
  2780. tmp |= 0x03000000;
  2781. WREG32(SPI_CONFIG_CNTL, tmp);
  2782. WREG32(SQ_CONFIG, 1);
  2783. WREG32(DB_DEBUG, 0);
  2784. tmp = RREG32(DB_DEBUG2) & ~0xf00fffff;
  2785. tmp |= 0x00000400;
  2786. WREG32(DB_DEBUG2, tmp);
  2787. tmp = RREG32(DB_DEBUG3) & ~0x0002021c;
  2788. tmp |= 0x00020200;
  2789. WREG32(DB_DEBUG3, tmp);
  2790. tmp = RREG32(CB_HW_CONTROL) & ~0x00010000;
  2791. tmp |= 0x00018208;
  2792. WREG32(CB_HW_CONTROL, tmp);
  2793. WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
  2794. WREG32(PA_SC_FIFO_SIZE, (SC_FRONTEND_PRIM_FIFO_SIZE(rdev->config.cik.sc_prim_fifo_size_frontend) |
  2795. SC_BACKEND_PRIM_FIFO_SIZE(rdev->config.cik.sc_prim_fifo_size_backend) |
  2796. SC_HIZ_TILE_FIFO_SIZE(rdev->config.cik.sc_hiz_tile_fifo_size) |
  2797. SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.cik.sc_earlyz_tile_fifo_size)));
  2798. WREG32(VGT_NUM_INSTANCES, 1);
  2799. WREG32(CP_PERFMON_CNTL, 0);
  2800. WREG32(SQ_CONFIG, 0);
  2801. WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
  2802. FORCE_EOV_MAX_REZ_CNT(255)));
  2803. WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC) |
  2804. AUTO_INVLD_EN(ES_AND_GS_AUTO));
  2805. WREG32(VGT_GS_VERTEX_REUSE, 16);
  2806. WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
  2807. tmp = RREG32(HDP_MISC_CNTL);
  2808. tmp |= HDP_FLUSH_INVALIDATE_CACHE;
  2809. WREG32(HDP_MISC_CNTL, tmp);
  2810. hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
  2811. WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
  2812. WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
  2813. WREG32(PA_SC_ENHANCE, ENABLE_PA_SC_OUT_OF_ORDER);
  2814. udelay(50);
  2815. }
  2816. /*
  2817. * GPU scratch registers helpers function.
  2818. */
  2819. /**
  2820. * cik_scratch_init - setup driver info for CP scratch regs
  2821. *
  2822. * @rdev: radeon_device pointer
  2823. *
  2824. * Set up the number and offset of the CP scratch registers.
  2825. * NOTE: use of CP scratch registers is a legacy inferface and
  2826. * is not used by default on newer asics (r6xx+). On newer asics,
  2827. * memory buffers are used for fences rather than scratch regs.
  2828. */
  2829. static void cik_scratch_init(struct radeon_device *rdev)
  2830. {
  2831. int i;
  2832. rdev->scratch.num_reg = 7;
  2833. rdev->scratch.reg_base = SCRATCH_REG0;
  2834. for (i = 0; i < rdev->scratch.num_reg; i++) {
  2835. rdev->scratch.free[i] = true;
  2836. rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
  2837. }
  2838. }
  2839. /**
  2840. * cik_ring_test - basic gfx ring test
  2841. *
  2842. * @rdev: radeon_device pointer
  2843. * @ring: radeon_ring structure holding ring information
  2844. *
  2845. * Allocate a scratch register and write to it using the gfx ring (CIK).
  2846. * Provides a basic gfx ring test to verify that the ring is working.
  2847. * Used by cik_cp_gfx_resume();
  2848. * Returns 0 on success, error on failure.
  2849. */
  2850. int cik_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
  2851. {
  2852. uint32_t scratch;
  2853. uint32_t tmp = 0;
  2854. unsigned i;
  2855. int r;
  2856. r = radeon_scratch_get(rdev, &scratch);
  2857. if (r) {
  2858. DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
  2859. return r;
  2860. }
  2861. WREG32(scratch, 0xCAFEDEAD);
  2862. r = radeon_ring_lock(rdev, ring, 3);
  2863. if (r) {
  2864. DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n", ring->idx, r);
  2865. radeon_scratch_free(rdev, scratch);
  2866. return r;
  2867. }
  2868. radeon_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
  2869. radeon_ring_write(ring, ((scratch - PACKET3_SET_UCONFIG_REG_START) >> 2));
  2870. radeon_ring_write(ring, 0xDEADBEEF);
  2871. radeon_ring_unlock_commit(rdev, ring);
  2872. for (i = 0; i < rdev->usec_timeout; i++) {
  2873. tmp = RREG32(scratch);
  2874. if (tmp == 0xDEADBEEF)
  2875. break;
  2876. DRM_UDELAY(1);
  2877. }
  2878. if (i < rdev->usec_timeout) {
  2879. DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
  2880. } else {
  2881. DRM_ERROR("radeon: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
  2882. ring->idx, scratch, tmp);
  2883. r = -EINVAL;
  2884. }
  2885. radeon_scratch_free(rdev, scratch);
  2886. return r;
  2887. }
  2888. /**
  2889. * cik_fence_gfx_ring_emit - emit a fence on the gfx ring
  2890. *
  2891. * @rdev: radeon_device pointer
  2892. * @fence: radeon fence object
  2893. *
  2894. * Emits a fence sequnce number on the gfx ring and flushes
  2895. * GPU caches.
  2896. */
  2897. void cik_fence_gfx_ring_emit(struct radeon_device *rdev,
  2898. struct radeon_fence *fence)
  2899. {
  2900. struct radeon_ring *ring = &rdev->ring[fence->ring];
  2901. u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
  2902. /* EVENT_WRITE_EOP - flush caches, send int */
  2903. radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
  2904. radeon_ring_write(ring, (EOP_TCL1_ACTION_EN |
  2905. EOP_TC_ACTION_EN |
  2906. EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
  2907. EVENT_INDEX(5)));
  2908. radeon_ring_write(ring, addr & 0xfffffffc);
  2909. radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | DATA_SEL(1) | INT_SEL(2));
  2910. radeon_ring_write(ring, fence->seq);
  2911. radeon_ring_write(ring, 0);
  2912. /* HDP flush */
  2913. /* We should be using the new WAIT_REG_MEM special op packet here
  2914. * but it causes the CP to hang
  2915. */
  2916. radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
  2917. radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
  2918. WRITE_DATA_DST_SEL(0)));
  2919. radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
  2920. radeon_ring_write(ring, 0);
  2921. radeon_ring_write(ring, 0);
  2922. }
  2923. /**
  2924. * cik_fence_compute_ring_emit - emit a fence on the compute ring
  2925. *
  2926. * @rdev: radeon_device pointer
  2927. * @fence: radeon fence object
  2928. *
  2929. * Emits a fence sequnce number on the compute ring and flushes
  2930. * GPU caches.
  2931. */
  2932. void cik_fence_compute_ring_emit(struct radeon_device *rdev,
  2933. struct radeon_fence *fence)
  2934. {
  2935. struct radeon_ring *ring = &rdev->ring[fence->ring];
  2936. u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
  2937. /* RELEASE_MEM - flush caches, send int */
  2938. radeon_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 5));
  2939. radeon_ring_write(ring, (EOP_TCL1_ACTION_EN |
  2940. EOP_TC_ACTION_EN |
  2941. EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
  2942. EVENT_INDEX(5)));
  2943. radeon_ring_write(ring, DATA_SEL(1) | INT_SEL(2));
  2944. radeon_ring_write(ring, addr & 0xfffffffc);
  2945. radeon_ring_write(ring, upper_32_bits(addr));
  2946. radeon_ring_write(ring, fence->seq);
  2947. radeon_ring_write(ring, 0);
  2948. /* HDP flush */
  2949. /* We should be using the new WAIT_REG_MEM special op packet here
  2950. * but it causes the CP to hang
  2951. */
  2952. radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
  2953. radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
  2954. WRITE_DATA_DST_SEL(0)));
  2955. radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
  2956. radeon_ring_write(ring, 0);
  2957. radeon_ring_write(ring, 0);
  2958. }
  2959. void cik_semaphore_ring_emit(struct radeon_device *rdev,
  2960. struct radeon_ring *ring,
  2961. struct radeon_semaphore *semaphore,
  2962. bool emit_wait)
  2963. {
  2964. uint64_t addr = semaphore->gpu_addr;
  2965. unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL;
  2966. radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
  2967. radeon_ring_write(ring, addr & 0xffffffff);
  2968. radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | sel);
  2969. }
  2970. /*
  2971. * IB stuff
  2972. */
  2973. /**
  2974. * cik_ring_ib_execute - emit an IB (Indirect Buffer) on the gfx ring
  2975. *
  2976. * @rdev: radeon_device pointer
  2977. * @ib: radeon indirect buffer object
  2978. *
  2979. * Emits an DE (drawing engine) or CE (constant engine) IB
  2980. * on the gfx ring. IBs are usually generated by userspace
  2981. * acceleration drivers and submitted to the kernel for
  2982. * sheduling on the ring. This function schedules the IB
  2983. * on the gfx ring for execution by the GPU.
  2984. */
  2985. void cik_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
  2986. {
  2987. struct radeon_ring *ring = &rdev->ring[ib->ring];
  2988. u32 header, control = INDIRECT_BUFFER_VALID;
  2989. if (ib->is_const_ib) {
  2990. /* set switch buffer packet before const IB */
  2991. radeon_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
  2992. radeon_ring_write(ring, 0);
  2993. header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
  2994. } else {
  2995. u32 next_rptr;
  2996. if (ring->rptr_save_reg) {
  2997. next_rptr = ring->wptr + 3 + 4;
  2998. radeon_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
  2999. radeon_ring_write(ring, ((ring->rptr_save_reg -
  3000. PACKET3_SET_UCONFIG_REG_START) >> 2));
  3001. radeon_ring_write(ring, next_rptr);
  3002. } else if (rdev->wb.enabled) {
  3003. next_rptr = ring->wptr + 5 + 4;
  3004. radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
  3005. radeon_ring_write(ring, WRITE_DATA_DST_SEL(1));
  3006. radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
  3007. radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
  3008. radeon_ring_write(ring, next_rptr);
  3009. }
  3010. header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
  3011. }
  3012. control |= ib->length_dw |
  3013. (ib->vm ? (ib->vm->id << 24) : 0);
  3014. radeon_ring_write(ring, header);
  3015. radeon_ring_write(ring,
  3016. #ifdef __BIG_ENDIAN
  3017. (2 << 0) |
  3018. #endif
  3019. (ib->gpu_addr & 0xFFFFFFFC));
  3020. radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
  3021. radeon_ring_write(ring, control);
  3022. }
  3023. /**
  3024. * cik_ib_test - basic gfx ring IB test
  3025. *
  3026. * @rdev: radeon_device pointer
  3027. * @ring: radeon_ring structure holding ring information
  3028. *
  3029. * Allocate an IB and execute it on the gfx ring (CIK).
  3030. * Provides a basic gfx ring test to verify that IBs are working.
  3031. * Returns 0 on success, error on failure.
  3032. */
  3033. int cik_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
  3034. {
  3035. struct radeon_ib ib;
  3036. uint32_t scratch;
  3037. uint32_t tmp = 0;
  3038. unsigned i;
  3039. int r;
  3040. r = radeon_scratch_get(rdev, &scratch);
  3041. if (r) {
  3042. DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
  3043. return r;
  3044. }
  3045. WREG32(scratch, 0xCAFEDEAD);
  3046. r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
  3047. if (r) {
  3048. DRM_ERROR("radeon: failed to get ib (%d).\n", r);
  3049. return r;
  3050. }
  3051. ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
  3052. ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START) >> 2);
  3053. ib.ptr[2] = 0xDEADBEEF;
  3054. ib.length_dw = 3;
  3055. r = radeon_ib_schedule(rdev, &ib, NULL);
  3056. if (r) {
  3057. radeon_scratch_free(rdev, scratch);
  3058. radeon_ib_free(rdev, &ib);
  3059. DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
  3060. return r;
  3061. }
  3062. r = radeon_fence_wait(ib.fence, false);
  3063. if (r) {
  3064. DRM_ERROR("radeon: fence wait failed (%d).\n", r);
  3065. return r;
  3066. }
  3067. for (i = 0; i < rdev->usec_timeout; i++) {
  3068. tmp = RREG32(scratch);
  3069. if (tmp == 0xDEADBEEF)
  3070. break;
  3071. DRM_UDELAY(1);
  3072. }
  3073. if (i < rdev->usec_timeout) {
  3074. DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
  3075. } else {
  3076. DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
  3077. scratch, tmp);
  3078. r = -EINVAL;
  3079. }
  3080. radeon_scratch_free(rdev, scratch);
  3081. radeon_ib_free(rdev, &ib);
  3082. return r;
  3083. }
  3084. /*
  3085. * CP.
  3086. * On CIK, gfx and compute now have independant command processors.
  3087. *
  3088. * GFX
  3089. * Gfx consists of a single ring and can process both gfx jobs and
  3090. * compute jobs. The gfx CP consists of three microengines (ME):
  3091. * PFP - Pre-Fetch Parser
  3092. * ME - Micro Engine
  3093. * CE - Constant Engine
  3094. * The PFP and ME make up what is considered the Drawing Engine (DE).
  3095. * The CE is an asynchronous engine used for updating buffer desciptors
  3096. * used by the DE so that they can be loaded into cache in parallel
  3097. * while the DE is processing state update packets.
  3098. *
  3099. * Compute
  3100. * The compute CP consists of two microengines (ME):
  3101. * MEC1 - Compute MicroEngine 1
  3102. * MEC2 - Compute MicroEngine 2
  3103. * Each MEC supports 4 compute pipes and each pipe supports 8 queues.
  3104. * The queues are exposed to userspace and are programmed directly
  3105. * by the compute runtime.
  3106. */
  3107. /**
  3108. * cik_cp_gfx_enable - enable/disable the gfx CP MEs
  3109. *
  3110. * @rdev: radeon_device pointer
  3111. * @enable: enable or disable the MEs
  3112. *
  3113. * Halts or unhalts the gfx MEs.
  3114. */
  3115. static void cik_cp_gfx_enable(struct radeon_device *rdev, bool enable)
  3116. {
  3117. if (enable)
  3118. WREG32(CP_ME_CNTL, 0);
  3119. else {
  3120. WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT));
  3121. rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
  3122. }
  3123. udelay(50);
  3124. }
  3125. /**
  3126. * cik_cp_gfx_load_microcode - load the gfx CP ME ucode
  3127. *
  3128. * @rdev: radeon_device pointer
  3129. *
  3130. * Loads the gfx PFP, ME, and CE ucode.
  3131. * Returns 0 for success, -EINVAL if the ucode is not available.
  3132. */
  3133. static int cik_cp_gfx_load_microcode(struct radeon_device *rdev)
  3134. {
  3135. const __be32 *fw_data;
  3136. int i;
  3137. if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw)
  3138. return -EINVAL;
  3139. cik_cp_gfx_enable(rdev, false);
  3140. /* PFP */
  3141. fw_data = (const __be32 *)rdev->pfp_fw->data;
  3142. WREG32(CP_PFP_UCODE_ADDR, 0);
  3143. for (i = 0; i < CIK_PFP_UCODE_SIZE; i++)
  3144. WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
  3145. WREG32(CP_PFP_UCODE_ADDR, 0);
  3146. /* CE */
  3147. fw_data = (const __be32 *)rdev->ce_fw->data;
  3148. WREG32(CP_CE_UCODE_ADDR, 0);
  3149. for (i = 0; i < CIK_CE_UCODE_SIZE; i++)
  3150. WREG32(CP_CE_UCODE_DATA, be32_to_cpup(fw_data++));
  3151. WREG32(CP_CE_UCODE_ADDR, 0);
  3152. /* ME */
  3153. fw_data = (const __be32 *)rdev->me_fw->data;
  3154. WREG32(CP_ME_RAM_WADDR, 0);
  3155. for (i = 0; i < CIK_ME_UCODE_SIZE; i++)
  3156. WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
  3157. WREG32(CP_ME_RAM_WADDR, 0);
  3158. WREG32(CP_PFP_UCODE_ADDR, 0);
  3159. WREG32(CP_CE_UCODE_ADDR, 0);
  3160. WREG32(CP_ME_RAM_WADDR, 0);
  3161. WREG32(CP_ME_RAM_RADDR, 0);
  3162. return 0;
  3163. }
  3164. /**
  3165. * cik_cp_gfx_start - start the gfx ring
  3166. *
  3167. * @rdev: radeon_device pointer
  3168. *
  3169. * Enables the ring and loads the clear state context and other
  3170. * packets required to init the ring.
  3171. * Returns 0 for success, error for failure.
  3172. */
  3173. static int cik_cp_gfx_start(struct radeon_device *rdev)
  3174. {
  3175. struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
  3176. int r, i;
  3177. /* init the CP */
  3178. WREG32(CP_MAX_CONTEXT, rdev->config.cik.max_hw_contexts - 1);
  3179. WREG32(CP_ENDIAN_SWAP, 0);
  3180. WREG32(CP_DEVICE_ID, 1);
  3181. cik_cp_gfx_enable(rdev, true);
  3182. r = radeon_ring_lock(rdev, ring, cik_default_size + 17);
  3183. if (r) {
  3184. DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
  3185. return r;
  3186. }
  3187. /* init the CE partitions. CE only used for gfx on CIK */
  3188. radeon_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
  3189. radeon_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
  3190. radeon_ring_write(ring, 0xc000);
  3191. radeon_ring_write(ring, 0xc000);
  3192. /* setup clear context state */
  3193. radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
  3194. radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
  3195. radeon_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
  3196. radeon_ring_write(ring, 0x80000000);
  3197. radeon_ring_write(ring, 0x80000000);
  3198. for (i = 0; i < cik_default_size; i++)
  3199. radeon_ring_write(ring, cik_default_state[i]);
  3200. radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
  3201. radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
  3202. /* set clear context state */
  3203. radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
  3204. radeon_ring_write(ring, 0);
  3205. radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
  3206. radeon_ring_write(ring, 0x00000316);
  3207. radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
  3208. radeon_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */
  3209. radeon_ring_unlock_commit(rdev, ring);
  3210. return 0;
  3211. }
  3212. /**
  3213. * cik_cp_gfx_fini - stop the gfx ring
  3214. *
  3215. * @rdev: radeon_device pointer
  3216. *
  3217. * Stop the gfx ring and tear down the driver ring
  3218. * info.
  3219. */
  3220. static void cik_cp_gfx_fini(struct radeon_device *rdev)
  3221. {
  3222. cik_cp_gfx_enable(rdev, false);
  3223. radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
  3224. }
  3225. /**
  3226. * cik_cp_gfx_resume - setup the gfx ring buffer registers
  3227. *
  3228. * @rdev: radeon_device pointer
  3229. *
  3230. * Program the location and size of the gfx ring buffer
  3231. * and test it to make sure it's working.
  3232. * Returns 0 for success, error for failure.
  3233. */
  3234. static int cik_cp_gfx_resume(struct radeon_device *rdev)
  3235. {
  3236. struct radeon_ring *ring;
  3237. u32 tmp;
  3238. u32 rb_bufsz;
  3239. u64 rb_addr;
  3240. int r;
  3241. WREG32(CP_SEM_WAIT_TIMER, 0x0);
  3242. WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
  3243. /* Set the write pointer delay */
  3244. WREG32(CP_RB_WPTR_DELAY, 0);
  3245. /* set the RB to use vmid 0 */
  3246. WREG32(CP_RB_VMID, 0);
  3247. WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
  3248. /* ring 0 - compute and gfx */
  3249. /* Set ring buffer size */
  3250. ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
  3251. rb_bufsz = order_base_2(ring->ring_size / 8);
  3252. tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
  3253. #ifdef __BIG_ENDIAN
  3254. tmp |= BUF_SWAP_32BIT;
  3255. #endif
  3256. WREG32(CP_RB0_CNTL, tmp);
  3257. /* Initialize the ring buffer's read and write pointers */
  3258. WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA);
  3259. ring->wptr = 0;
  3260. WREG32(CP_RB0_WPTR, ring->wptr);
  3261. /* set the wb address wether it's enabled or not */
  3262. WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC);
  3263. WREG32(CP_RB0_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
  3264. /* scratch register shadowing is no longer supported */
  3265. WREG32(SCRATCH_UMSK, 0);
  3266. if (!rdev->wb.enabled)
  3267. tmp |= RB_NO_UPDATE;
  3268. mdelay(1);
  3269. WREG32(CP_RB0_CNTL, tmp);
  3270. rb_addr = ring->gpu_addr >> 8;
  3271. WREG32(CP_RB0_BASE, rb_addr);
  3272. WREG32(CP_RB0_BASE_HI, upper_32_bits(rb_addr));
  3273. ring->rptr = RREG32(CP_RB0_RPTR);
  3274. /* start the ring */
  3275. cik_cp_gfx_start(rdev);
  3276. rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true;
  3277. r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
  3278. if (r) {
  3279. rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
  3280. return r;
  3281. }
  3282. return 0;
  3283. }
  3284. u32 cik_compute_ring_get_rptr(struct radeon_device *rdev,
  3285. struct radeon_ring *ring)
  3286. {
  3287. u32 rptr;
  3288. if (rdev->wb.enabled) {
  3289. rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]);
  3290. } else {
  3291. mutex_lock(&rdev->srbm_mutex);
  3292. cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0);
  3293. rptr = RREG32(CP_HQD_PQ_RPTR);
  3294. cik_srbm_select(rdev, 0, 0, 0, 0);
  3295. mutex_unlock(&rdev->srbm_mutex);
  3296. }
  3297. return rptr;
  3298. }
  3299. u32 cik_compute_ring_get_wptr(struct radeon_device *rdev,
  3300. struct radeon_ring *ring)
  3301. {
  3302. u32 wptr;
  3303. if (rdev->wb.enabled) {
  3304. wptr = le32_to_cpu(rdev->wb.wb[ring->wptr_offs/4]);
  3305. } else {
  3306. mutex_lock(&rdev->srbm_mutex);
  3307. cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0);
  3308. wptr = RREG32(CP_HQD_PQ_WPTR);
  3309. cik_srbm_select(rdev, 0, 0, 0, 0);
  3310. mutex_unlock(&rdev->srbm_mutex);
  3311. }
  3312. return wptr;
  3313. }
  3314. void cik_compute_ring_set_wptr(struct radeon_device *rdev,
  3315. struct radeon_ring *ring)
  3316. {
  3317. rdev->wb.wb[ring->wptr_offs/4] = cpu_to_le32(ring->wptr);
  3318. WDOORBELL32(ring->doorbell_offset, ring->wptr);
  3319. }
  3320. /**
  3321. * cik_cp_compute_enable - enable/disable the compute CP MEs
  3322. *
  3323. * @rdev: radeon_device pointer
  3324. * @enable: enable or disable the MEs
  3325. *
  3326. * Halts or unhalts the compute MEs.
  3327. */
  3328. static void cik_cp_compute_enable(struct radeon_device *rdev, bool enable)
  3329. {
  3330. if (enable)
  3331. WREG32(CP_MEC_CNTL, 0);
  3332. else
  3333. WREG32(CP_MEC_CNTL, (MEC_ME1_HALT | MEC_ME2_HALT));
  3334. udelay(50);
  3335. }
  3336. /**
  3337. * cik_cp_compute_load_microcode - load the compute CP ME ucode
  3338. *
  3339. * @rdev: radeon_device pointer
  3340. *
  3341. * Loads the compute MEC1&2 ucode.
  3342. * Returns 0 for success, -EINVAL if the ucode is not available.
  3343. */
  3344. static int cik_cp_compute_load_microcode(struct radeon_device *rdev)
  3345. {
  3346. const __be32 *fw_data;
  3347. int i;
  3348. if (!rdev->mec_fw)
  3349. return -EINVAL;
  3350. cik_cp_compute_enable(rdev, false);
  3351. /* MEC1 */
  3352. fw_data = (const __be32 *)rdev->mec_fw->data;
  3353. WREG32(CP_MEC_ME1_UCODE_ADDR, 0);
  3354. for (i = 0; i < CIK_MEC_UCODE_SIZE; i++)
  3355. WREG32(CP_MEC_ME1_UCODE_DATA, be32_to_cpup(fw_data++));
  3356. WREG32(CP_MEC_ME1_UCODE_ADDR, 0);
  3357. if (rdev->family == CHIP_KAVERI) {
  3358. /* MEC2 */
  3359. fw_data = (const __be32 *)rdev->mec_fw->data;
  3360. WREG32(CP_MEC_ME2_UCODE_ADDR, 0);
  3361. for (i = 0; i < CIK_MEC_UCODE_SIZE; i++)
  3362. WREG32(CP_MEC_ME2_UCODE_DATA, be32_to_cpup(fw_data++));
  3363. WREG32(CP_MEC_ME2_UCODE_ADDR, 0);
  3364. }
  3365. return 0;
  3366. }
  3367. /**
  3368. * cik_cp_compute_start - start the compute queues
  3369. *
  3370. * @rdev: radeon_device pointer
  3371. *
  3372. * Enable the compute queues.
  3373. * Returns 0 for success, error for failure.
  3374. */
  3375. static int cik_cp_compute_start(struct radeon_device *rdev)
  3376. {
  3377. cik_cp_compute_enable(rdev, true);
  3378. return 0;
  3379. }
  3380. /**
  3381. * cik_cp_compute_fini - stop the compute queues
  3382. *
  3383. * @rdev: radeon_device pointer
  3384. *
  3385. * Stop the compute queues and tear down the driver queue
  3386. * info.
  3387. */
  3388. static void cik_cp_compute_fini(struct radeon_device *rdev)
  3389. {
  3390. int i, idx, r;
  3391. cik_cp_compute_enable(rdev, false);
  3392. for (i = 0; i < 2; i++) {
  3393. if (i == 0)
  3394. idx = CAYMAN_RING_TYPE_CP1_INDEX;
  3395. else
  3396. idx = CAYMAN_RING_TYPE_CP2_INDEX;
  3397. if (rdev->ring[idx].mqd_obj) {
  3398. r = radeon_bo_reserve(rdev->ring[idx].mqd_obj, false);
  3399. if (unlikely(r != 0))
  3400. dev_warn(rdev->dev, "(%d) reserve MQD bo failed\n", r);
  3401. radeon_bo_unpin(rdev->ring[idx].mqd_obj);
  3402. radeon_bo_unreserve(rdev->ring[idx].mqd_obj);
  3403. radeon_bo_unref(&rdev->ring[idx].mqd_obj);
  3404. rdev->ring[idx].mqd_obj = NULL;
  3405. }
  3406. }
  3407. }
  3408. static void cik_mec_fini(struct radeon_device *rdev)
  3409. {
  3410. int r;
  3411. if (rdev->mec.hpd_eop_obj) {
  3412. r = radeon_bo_reserve(rdev->mec.hpd_eop_obj, false);
  3413. if (unlikely(r != 0))
  3414. dev_warn(rdev->dev, "(%d) reserve HPD EOP bo failed\n", r);
  3415. radeon_bo_unpin(rdev->mec.hpd_eop_obj);
  3416. radeon_bo_unreserve(rdev->mec.hpd_eop_obj);
  3417. radeon_bo_unref(&rdev->mec.hpd_eop_obj);
  3418. rdev->mec.hpd_eop_obj = NULL;
  3419. }
  3420. }
  3421. #define MEC_HPD_SIZE 2048
  3422. static int cik_mec_init(struct radeon_device *rdev)
  3423. {
  3424. int r;
  3425. u32 *hpd;
  3426. /*
  3427. * KV: 2 MEC, 4 Pipes/MEC, 8 Queues/Pipe - 64 Queues total
  3428. * CI/KB: 1 MEC, 4 Pipes/MEC, 8 Queues/Pipe - 32 Queues total
  3429. */
  3430. if (rdev->family == CHIP_KAVERI)
  3431. rdev->mec.num_mec = 2;
  3432. else
  3433. rdev->mec.num_mec = 1;
  3434. rdev->mec.num_pipe = 4;
  3435. rdev->mec.num_queue = rdev->mec.num_mec * rdev->mec.num_pipe * 8;
  3436. if (rdev->mec.hpd_eop_obj == NULL) {
  3437. r = radeon_bo_create(rdev,
  3438. rdev->mec.num_mec *rdev->mec.num_pipe * MEC_HPD_SIZE * 2,
  3439. PAGE_SIZE, true,
  3440. RADEON_GEM_DOMAIN_GTT, NULL,
  3441. &rdev->mec.hpd_eop_obj);
  3442. if (r) {
  3443. dev_warn(rdev->dev, "(%d) create HDP EOP bo failed\n", r);
  3444. return r;
  3445. }
  3446. }
  3447. r = radeon_bo_reserve(rdev->mec.hpd_eop_obj, false);
  3448. if (unlikely(r != 0)) {
  3449. cik_mec_fini(rdev);
  3450. return r;
  3451. }
  3452. r = radeon_bo_pin(rdev->mec.hpd_eop_obj, RADEON_GEM_DOMAIN_GTT,
  3453. &rdev->mec.hpd_eop_gpu_addr);
  3454. if (r) {
  3455. dev_warn(rdev->dev, "(%d) pin HDP EOP bo failed\n", r);
  3456. cik_mec_fini(rdev);
  3457. return r;
  3458. }
  3459. r = radeon_bo_kmap(rdev->mec.hpd_eop_obj, (void **)&hpd);
  3460. if (r) {
  3461. dev_warn(rdev->dev, "(%d) map HDP EOP bo failed\n", r);
  3462. cik_mec_fini(rdev);
  3463. return r;
  3464. }
  3465. /* clear memory. Not sure if this is required or not */
  3466. memset(hpd, 0, rdev->mec.num_mec *rdev->mec.num_pipe * MEC_HPD_SIZE * 2);
  3467. radeon_bo_kunmap(rdev->mec.hpd_eop_obj);
  3468. radeon_bo_unreserve(rdev->mec.hpd_eop_obj);
  3469. return 0;
  3470. }
  3471. struct hqd_registers
  3472. {
  3473. u32 cp_mqd_base_addr;
  3474. u32 cp_mqd_base_addr_hi;
  3475. u32 cp_hqd_active;
  3476. u32 cp_hqd_vmid;
  3477. u32 cp_hqd_persistent_state;
  3478. u32 cp_hqd_pipe_priority;
  3479. u32 cp_hqd_queue_priority;
  3480. u32 cp_hqd_quantum;
  3481. u32 cp_hqd_pq_base;
  3482. u32 cp_hqd_pq_base_hi;
  3483. u32 cp_hqd_pq_rptr;
  3484. u32 cp_hqd_pq_rptr_report_addr;
  3485. u32 cp_hqd_pq_rptr_report_addr_hi;
  3486. u32 cp_hqd_pq_wptr_poll_addr;
  3487. u32 cp_hqd_pq_wptr_poll_addr_hi;
  3488. u32 cp_hqd_pq_doorbell_control;
  3489. u32 cp_hqd_pq_wptr;
  3490. u32 cp_hqd_pq_control;
  3491. u32 cp_hqd_ib_base_addr;
  3492. u32 cp_hqd_ib_base_addr_hi;
  3493. u32 cp_hqd_ib_rptr;
  3494. u32 cp_hqd_ib_control;
  3495. u32 cp_hqd_iq_timer;
  3496. u32 cp_hqd_iq_rptr;
  3497. u32 cp_hqd_dequeue_request;
  3498. u32 cp_hqd_dma_offload;
  3499. u32 cp_hqd_sema_cmd;
  3500. u32 cp_hqd_msg_type;
  3501. u32 cp_hqd_atomic0_preop_lo;
  3502. u32 cp_hqd_atomic0_preop_hi;
  3503. u32 cp_hqd_atomic1_preop_lo;
  3504. u32 cp_hqd_atomic1_preop_hi;
  3505. u32 cp_hqd_hq_scheduler0;
  3506. u32 cp_hqd_hq_scheduler1;
  3507. u32 cp_mqd_control;
  3508. };
  3509. struct bonaire_mqd
  3510. {
  3511. u32 header;
  3512. u32 dispatch_initiator;
  3513. u32 dimensions[3];
  3514. u32 start_idx[3];
  3515. u32 num_threads[3];
  3516. u32 pipeline_stat_enable;
  3517. u32 perf_counter_enable;
  3518. u32 pgm[2];
  3519. u32 tba[2];
  3520. u32 tma[2];
  3521. u32 pgm_rsrc[2];
  3522. u32 vmid;
  3523. u32 resource_limits;
  3524. u32 static_thread_mgmt01[2];
  3525. u32 tmp_ring_size;
  3526. u32 static_thread_mgmt23[2];
  3527. u32 restart[3];
  3528. u32 thread_trace_enable;
  3529. u32 reserved1;
  3530. u32 user_data[16];
  3531. u32 vgtcs_invoke_count[2];
  3532. struct hqd_registers queue_state;
  3533. u32 dequeue_cntr;
  3534. u32 interrupt_queue[64];
  3535. };
  3536. /**
  3537. * cik_cp_compute_resume - setup the compute queue registers
  3538. *
  3539. * @rdev: radeon_device pointer
  3540. *
  3541. * Program the compute queues and test them to make sure they
  3542. * are working.
  3543. * Returns 0 for success, error for failure.
  3544. */
  3545. static int cik_cp_compute_resume(struct radeon_device *rdev)
  3546. {
  3547. int r, i, idx;
  3548. u32 tmp;
  3549. bool use_doorbell = true;
  3550. u64 hqd_gpu_addr;
  3551. u64 mqd_gpu_addr;
  3552. u64 eop_gpu_addr;
  3553. u64 wb_gpu_addr;
  3554. u32 *buf;
  3555. struct bonaire_mqd *mqd;
  3556. r = cik_cp_compute_start(rdev);
  3557. if (r)
  3558. return r;
  3559. /* fix up chicken bits */
  3560. tmp = RREG32(CP_CPF_DEBUG);
  3561. tmp |= (1 << 23);
  3562. WREG32(CP_CPF_DEBUG, tmp);
  3563. /* init the pipes */
  3564. mutex_lock(&rdev->srbm_mutex);
  3565. for (i = 0; i < (rdev->mec.num_pipe * rdev->mec.num_mec); i++) {
  3566. int me = (i < 4) ? 1 : 2;
  3567. int pipe = (i < 4) ? i : (i - 4);
  3568. eop_gpu_addr = rdev->mec.hpd_eop_gpu_addr + (i * MEC_HPD_SIZE * 2);
  3569. cik_srbm_select(rdev, me, pipe, 0, 0);
  3570. /* write the EOP addr */
  3571. WREG32(CP_HPD_EOP_BASE_ADDR, eop_gpu_addr >> 8);
  3572. WREG32(CP_HPD_EOP_BASE_ADDR_HI, upper_32_bits(eop_gpu_addr) >> 8);
  3573. /* set the VMID assigned */
  3574. WREG32(CP_HPD_EOP_VMID, 0);
  3575. /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
  3576. tmp = RREG32(CP_HPD_EOP_CONTROL);
  3577. tmp &= ~EOP_SIZE_MASK;
  3578. tmp |= order_base_2(MEC_HPD_SIZE / 8);
  3579. WREG32(CP_HPD_EOP_CONTROL, tmp);
  3580. }
  3581. cik_srbm_select(rdev, 0, 0, 0, 0);
  3582. mutex_unlock(&rdev->srbm_mutex);
  3583. /* init the queues. Just two for now. */
  3584. for (i = 0; i < 2; i++) {
  3585. if (i == 0)
  3586. idx = CAYMAN_RING_TYPE_CP1_INDEX;
  3587. else
  3588. idx = CAYMAN_RING_TYPE_CP2_INDEX;
  3589. if (rdev->ring[idx].mqd_obj == NULL) {
  3590. r = radeon_bo_create(rdev,
  3591. sizeof(struct bonaire_mqd),
  3592. PAGE_SIZE, true,
  3593. RADEON_GEM_DOMAIN_GTT, NULL,
  3594. &rdev->ring[idx].mqd_obj);
  3595. if (r) {
  3596. dev_warn(rdev->dev, "(%d) create MQD bo failed\n", r);
  3597. return r;
  3598. }
  3599. }
  3600. r = radeon_bo_reserve(rdev->ring[idx].mqd_obj, false);
  3601. if (unlikely(r != 0)) {
  3602. cik_cp_compute_fini(rdev);
  3603. return r;
  3604. }
  3605. r = radeon_bo_pin(rdev->ring[idx].mqd_obj, RADEON_GEM_DOMAIN_GTT,
  3606. &mqd_gpu_addr);
  3607. if (r) {
  3608. dev_warn(rdev->dev, "(%d) pin MQD bo failed\n", r);
  3609. cik_cp_compute_fini(rdev);
  3610. return r;
  3611. }
  3612. r = radeon_bo_kmap(rdev->ring[idx].mqd_obj, (void **)&buf);
  3613. if (r) {
  3614. dev_warn(rdev->dev, "(%d) map MQD bo failed\n", r);
  3615. cik_cp_compute_fini(rdev);
  3616. return r;
  3617. }
  3618. /* doorbell offset */
  3619. rdev->ring[idx].doorbell_offset =
  3620. (rdev->ring[idx].doorbell_page_num * PAGE_SIZE) + 0;
  3621. /* init the mqd struct */
  3622. memset(buf, 0, sizeof(struct bonaire_mqd));
  3623. mqd = (struct bonaire_mqd *)buf;
  3624. mqd->header = 0xC0310800;
  3625. mqd->static_thread_mgmt01[0] = 0xffffffff;
  3626. mqd->static_thread_mgmt01[1] = 0xffffffff;
  3627. mqd->static_thread_mgmt23[0] = 0xffffffff;
  3628. mqd->static_thread_mgmt23[1] = 0xffffffff;
  3629. mutex_lock(&rdev->srbm_mutex);
  3630. cik_srbm_select(rdev, rdev->ring[idx].me,
  3631. rdev->ring[idx].pipe,
  3632. rdev->ring[idx].queue, 0);
  3633. /* disable wptr polling */
  3634. tmp = RREG32(CP_PQ_WPTR_POLL_CNTL);
  3635. tmp &= ~WPTR_POLL_EN;
  3636. WREG32(CP_PQ_WPTR_POLL_CNTL, tmp);
  3637. /* enable doorbell? */
  3638. mqd->queue_state.cp_hqd_pq_doorbell_control =
  3639. RREG32(CP_HQD_PQ_DOORBELL_CONTROL);
  3640. if (use_doorbell)
  3641. mqd->queue_state.cp_hqd_pq_doorbell_control |= DOORBELL_EN;
  3642. else
  3643. mqd->queue_state.cp_hqd_pq_doorbell_control &= ~DOORBELL_EN;
  3644. WREG32(CP_HQD_PQ_DOORBELL_CONTROL,
  3645. mqd->queue_state.cp_hqd_pq_doorbell_control);
  3646. /* disable the queue if it's active */
  3647. mqd->queue_state.cp_hqd_dequeue_request = 0;
  3648. mqd->queue_state.cp_hqd_pq_rptr = 0;
  3649. mqd->queue_state.cp_hqd_pq_wptr= 0;
  3650. if (RREG32(CP_HQD_ACTIVE) & 1) {
  3651. WREG32(CP_HQD_DEQUEUE_REQUEST, 1);
  3652. for (i = 0; i < rdev->usec_timeout; i++) {
  3653. if (!(RREG32(CP_HQD_ACTIVE) & 1))
  3654. break;
  3655. udelay(1);
  3656. }
  3657. WREG32(CP_HQD_DEQUEUE_REQUEST, mqd->queue_state.cp_hqd_dequeue_request);
  3658. WREG32(CP_HQD_PQ_RPTR, mqd->queue_state.cp_hqd_pq_rptr);
  3659. WREG32(CP_HQD_PQ_WPTR, mqd->queue_state.cp_hqd_pq_wptr);
  3660. }
  3661. /* set the pointer to the MQD */
  3662. mqd->queue_state.cp_mqd_base_addr = mqd_gpu_addr & 0xfffffffc;
  3663. mqd->queue_state.cp_mqd_base_addr_hi = upper_32_bits(mqd_gpu_addr);
  3664. WREG32(CP_MQD_BASE_ADDR, mqd->queue_state.cp_mqd_base_addr);
  3665. WREG32(CP_MQD_BASE_ADDR_HI, mqd->queue_state.cp_mqd_base_addr_hi);
  3666. /* set MQD vmid to 0 */
  3667. mqd->queue_state.cp_mqd_control = RREG32(CP_MQD_CONTROL);
  3668. mqd->queue_state.cp_mqd_control &= ~MQD_VMID_MASK;
  3669. WREG32(CP_MQD_CONTROL, mqd->queue_state.cp_mqd_control);
  3670. /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
  3671. hqd_gpu_addr = rdev->ring[idx].gpu_addr >> 8;
  3672. mqd->queue_state.cp_hqd_pq_base = hqd_gpu_addr;
  3673. mqd->queue_state.cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
  3674. WREG32(CP_HQD_PQ_BASE, mqd->queue_state.cp_hqd_pq_base);
  3675. WREG32(CP_HQD_PQ_BASE_HI, mqd->queue_state.cp_hqd_pq_base_hi);
  3676. /* set up the HQD, this is similar to CP_RB0_CNTL */
  3677. mqd->queue_state.cp_hqd_pq_control = RREG32(CP_HQD_PQ_CONTROL);
  3678. mqd->queue_state.cp_hqd_pq_control &=
  3679. ~(QUEUE_SIZE_MASK | RPTR_BLOCK_SIZE_MASK);
  3680. mqd->queue_state.cp_hqd_pq_control |=
  3681. order_base_2(rdev->ring[idx].ring_size / 8);
  3682. mqd->queue_state.cp_hqd_pq_control |=
  3683. (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8);
  3684. #ifdef __BIG_ENDIAN
  3685. mqd->queue_state.cp_hqd_pq_control |= BUF_SWAP_32BIT;
  3686. #endif
  3687. mqd->queue_state.cp_hqd_pq_control &=
  3688. ~(UNORD_DISPATCH | ROQ_PQ_IB_FLIP | PQ_VOLATILE);
  3689. mqd->queue_state.cp_hqd_pq_control |=
  3690. PRIV_STATE | KMD_QUEUE; /* assuming kernel queue control */
  3691. WREG32(CP_HQD_PQ_CONTROL, mqd->queue_state.cp_hqd_pq_control);
  3692. /* only used if CP_PQ_WPTR_POLL_CNTL.WPTR_POLL_EN=1 */
  3693. if (i == 0)
  3694. wb_gpu_addr = rdev->wb.gpu_addr + CIK_WB_CP1_WPTR_OFFSET;
  3695. else
  3696. wb_gpu_addr = rdev->wb.gpu_addr + CIK_WB_CP2_WPTR_OFFSET;
  3697. mqd->queue_state.cp_hqd_pq_wptr_poll_addr = wb_gpu_addr & 0xfffffffc;
  3698. mqd->queue_state.cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
  3699. WREG32(CP_HQD_PQ_WPTR_POLL_ADDR, mqd->queue_state.cp_hqd_pq_wptr_poll_addr);
  3700. WREG32(CP_HQD_PQ_WPTR_POLL_ADDR_HI,
  3701. mqd->queue_state.cp_hqd_pq_wptr_poll_addr_hi);
  3702. /* set the wb address wether it's enabled or not */
  3703. if (i == 0)
  3704. wb_gpu_addr = rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET;
  3705. else
  3706. wb_gpu_addr = rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET;
  3707. mqd->queue_state.cp_hqd_pq_rptr_report_addr = wb_gpu_addr & 0xfffffffc;
  3708. mqd->queue_state.cp_hqd_pq_rptr_report_addr_hi =
  3709. upper_32_bits(wb_gpu_addr) & 0xffff;
  3710. WREG32(CP_HQD_PQ_RPTR_REPORT_ADDR,
  3711. mqd->queue_state.cp_hqd_pq_rptr_report_addr);
  3712. WREG32(CP_HQD_PQ_RPTR_REPORT_ADDR_HI,
  3713. mqd->queue_state.cp_hqd_pq_rptr_report_addr_hi);
  3714. /* enable the doorbell if requested */
  3715. if (use_doorbell) {
  3716. mqd->queue_state.cp_hqd_pq_doorbell_control =
  3717. RREG32(CP_HQD_PQ_DOORBELL_CONTROL);
  3718. mqd->queue_state.cp_hqd_pq_doorbell_control &= ~DOORBELL_OFFSET_MASK;
  3719. mqd->queue_state.cp_hqd_pq_doorbell_control |=
  3720. DOORBELL_OFFSET(rdev->ring[idx].doorbell_offset / 4);
  3721. mqd->queue_state.cp_hqd_pq_doorbell_control |= DOORBELL_EN;
  3722. mqd->queue_state.cp_hqd_pq_doorbell_control &=
  3723. ~(DOORBELL_SOURCE | DOORBELL_HIT);
  3724. } else {
  3725. mqd->queue_state.cp_hqd_pq_doorbell_control = 0;
  3726. }
  3727. WREG32(CP_HQD_PQ_DOORBELL_CONTROL,
  3728. mqd->queue_state.cp_hqd_pq_doorbell_control);
  3729. /* read and write pointers, similar to CP_RB0_WPTR/_RPTR */
  3730. rdev->ring[idx].wptr = 0;
  3731. mqd->queue_state.cp_hqd_pq_wptr = rdev->ring[idx].wptr;
  3732. WREG32(CP_HQD_PQ_WPTR, mqd->queue_state.cp_hqd_pq_wptr);
  3733. rdev->ring[idx].rptr = RREG32(CP_HQD_PQ_RPTR);
  3734. mqd->queue_state.cp_hqd_pq_rptr = rdev->ring[idx].rptr;
  3735. /* set the vmid for the queue */
  3736. mqd->queue_state.cp_hqd_vmid = 0;
  3737. WREG32(CP_HQD_VMID, mqd->queue_state.cp_hqd_vmid);
  3738. /* activate the queue */
  3739. mqd->queue_state.cp_hqd_active = 1;
  3740. WREG32(CP_HQD_ACTIVE, mqd->queue_state.cp_hqd_active);
  3741. cik_srbm_select(rdev, 0, 0, 0, 0);
  3742. mutex_unlock(&rdev->srbm_mutex);
  3743. radeon_bo_kunmap(rdev->ring[idx].mqd_obj);
  3744. radeon_bo_unreserve(rdev->ring[idx].mqd_obj);
  3745. rdev->ring[idx].ready = true;
  3746. r = radeon_ring_test(rdev, idx, &rdev->ring[idx]);
  3747. if (r)
  3748. rdev->ring[idx].ready = false;
  3749. }
  3750. return 0;
  3751. }
  3752. static void cik_cp_enable(struct radeon_device *rdev, bool enable)
  3753. {
  3754. cik_cp_gfx_enable(rdev, enable);
  3755. cik_cp_compute_enable(rdev, enable);
  3756. }
  3757. static int cik_cp_load_microcode(struct radeon_device *rdev)
  3758. {
  3759. int r;
  3760. r = cik_cp_gfx_load_microcode(rdev);
  3761. if (r)
  3762. return r;
  3763. r = cik_cp_compute_load_microcode(rdev);
  3764. if (r)
  3765. return r;
  3766. return 0;
  3767. }
  3768. static void cik_cp_fini(struct radeon_device *rdev)
  3769. {
  3770. cik_cp_gfx_fini(rdev);
  3771. cik_cp_compute_fini(rdev);
  3772. }
  3773. static int cik_cp_resume(struct radeon_device *rdev)
  3774. {
  3775. int r;
  3776. cik_enable_gui_idle_interrupt(rdev, false);
  3777. r = cik_cp_load_microcode(rdev);
  3778. if (r)
  3779. return r;
  3780. r = cik_cp_gfx_resume(rdev);
  3781. if (r)
  3782. return r;
  3783. r = cik_cp_compute_resume(rdev);
  3784. if (r)
  3785. return r;
  3786. cik_enable_gui_idle_interrupt(rdev, true);
  3787. return 0;
  3788. }
  3789. static void cik_print_gpu_status_regs(struct radeon_device *rdev)
  3790. {
  3791. dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
  3792. RREG32(GRBM_STATUS));
  3793. dev_info(rdev->dev, " GRBM_STATUS2=0x%08X\n",
  3794. RREG32(GRBM_STATUS2));
  3795. dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
  3796. RREG32(GRBM_STATUS_SE0));
  3797. dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
  3798. RREG32(GRBM_STATUS_SE1));
  3799. dev_info(rdev->dev, " GRBM_STATUS_SE2=0x%08X\n",
  3800. RREG32(GRBM_STATUS_SE2));
  3801. dev_info(rdev->dev, " GRBM_STATUS_SE3=0x%08X\n",
  3802. RREG32(GRBM_STATUS_SE3));
  3803. dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
  3804. RREG32(SRBM_STATUS));
  3805. dev_info(rdev->dev, " SRBM_STATUS2=0x%08X\n",
  3806. RREG32(SRBM_STATUS2));
  3807. dev_info(rdev->dev, " SDMA0_STATUS_REG = 0x%08X\n",
  3808. RREG32(SDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET));
  3809. dev_info(rdev->dev, " SDMA1_STATUS_REG = 0x%08X\n",
  3810. RREG32(SDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET));
  3811. dev_info(rdev->dev, " CP_STAT = 0x%08x\n", RREG32(CP_STAT));
  3812. dev_info(rdev->dev, " CP_STALLED_STAT1 = 0x%08x\n",
  3813. RREG32(CP_STALLED_STAT1));
  3814. dev_info(rdev->dev, " CP_STALLED_STAT2 = 0x%08x\n",
  3815. RREG32(CP_STALLED_STAT2));
  3816. dev_info(rdev->dev, " CP_STALLED_STAT3 = 0x%08x\n",
  3817. RREG32(CP_STALLED_STAT3));
  3818. dev_info(rdev->dev, " CP_CPF_BUSY_STAT = 0x%08x\n",
  3819. RREG32(CP_CPF_BUSY_STAT));
  3820. dev_info(rdev->dev, " CP_CPF_STALLED_STAT1 = 0x%08x\n",
  3821. RREG32(CP_CPF_STALLED_STAT1));
  3822. dev_info(rdev->dev, " CP_CPF_STATUS = 0x%08x\n", RREG32(CP_CPF_STATUS));
  3823. dev_info(rdev->dev, " CP_CPC_BUSY_STAT = 0x%08x\n", RREG32(CP_CPC_BUSY_STAT));
  3824. dev_info(rdev->dev, " CP_CPC_STALLED_STAT1 = 0x%08x\n",
  3825. RREG32(CP_CPC_STALLED_STAT1));
  3826. dev_info(rdev->dev, " CP_CPC_STATUS = 0x%08x\n", RREG32(CP_CPC_STATUS));
  3827. }
  3828. /**
  3829. * cik_gpu_check_soft_reset - check which blocks are busy
  3830. *
  3831. * @rdev: radeon_device pointer
  3832. *
  3833. * Check which blocks are busy and return the relevant reset
  3834. * mask to be used by cik_gpu_soft_reset().
  3835. * Returns a mask of the blocks to be reset.
  3836. */
  3837. u32 cik_gpu_check_soft_reset(struct radeon_device *rdev)
  3838. {
  3839. u32 reset_mask = 0;
  3840. u32 tmp;
  3841. /* GRBM_STATUS */
  3842. tmp = RREG32(GRBM_STATUS);
  3843. if (tmp & (PA_BUSY | SC_BUSY |
  3844. BCI_BUSY | SX_BUSY |
  3845. TA_BUSY | VGT_BUSY |
  3846. DB_BUSY | CB_BUSY |
  3847. GDS_BUSY | SPI_BUSY |
  3848. IA_BUSY | IA_BUSY_NO_DMA))
  3849. reset_mask |= RADEON_RESET_GFX;
  3850. if (tmp & (CP_BUSY | CP_COHERENCY_BUSY))
  3851. reset_mask |= RADEON_RESET_CP;
  3852. /* GRBM_STATUS2 */
  3853. tmp = RREG32(GRBM_STATUS2);
  3854. if (tmp & RLC_BUSY)
  3855. reset_mask |= RADEON_RESET_RLC;
  3856. /* SDMA0_STATUS_REG */
  3857. tmp = RREG32(SDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET);
  3858. if (!(tmp & SDMA_IDLE))
  3859. reset_mask |= RADEON_RESET_DMA;
  3860. /* SDMA1_STATUS_REG */
  3861. tmp = RREG32(SDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET);
  3862. if (!(tmp & SDMA_IDLE))
  3863. reset_mask |= RADEON_RESET_DMA1;
  3864. /* SRBM_STATUS2 */
  3865. tmp = RREG32(SRBM_STATUS2);
  3866. if (tmp & SDMA_BUSY)
  3867. reset_mask |= RADEON_RESET_DMA;
  3868. if (tmp & SDMA1_BUSY)
  3869. reset_mask |= RADEON_RESET_DMA1;
  3870. /* SRBM_STATUS */
  3871. tmp = RREG32(SRBM_STATUS);
  3872. if (tmp & IH_BUSY)
  3873. reset_mask |= RADEON_RESET_IH;
  3874. if (tmp & SEM_BUSY)
  3875. reset_mask |= RADEON_RESET_SEM;
  3876. if (tmp & GRBM_RQ_PENDING)
  3877. reset_mask |= RADEON_RESET_GRBM;
  3878. if (tmp & VMC_BUSY)
  3879. reset_mask |= RADEON_RESET_VMC;
  3880. if (tmp & (MCB_BUSY | MCB_NON_DISPLAY_BUSY |
  3881. MCC_BUSY | MCD_BUSY))
  3882. reset_mask |= RADEON_RESET_MC;
  3883. if (evergreen_is_display_hung(rdev))
  3884. reset_mask |= RADEON_RESET_DISPLAY;
  3885. /* Skip MC reset as it's mostly likely not hung, just busy */
  3886. if (reset_mask & RADEON_RESET_MC) {
  3887. DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
  3888. reset_mask &= ~RADEON_RESET_MC;
  3889. }
  3890. return reset_mask;
  3891. }
  3892. /**
  3893. * cik_gpu_soft_reset - soft reset GPU
  3894. *
  3895. * @rdev: radeon_device pointer
  3896. * @reset_mask: mask of which blocks to reset
  3897. *
  3898. * Soft reset the blocks specified in @reset_mask.
  3899. */
  3900. static void cik_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
  3901. {
  3902. struct evergreen_mc_save save;
  3903. u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
  3904. u32 tmp;
  3905. if (reset_mask == 0)
  3906. return;
  3907. dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
  3908. cik_print_gpu_status_regs(rdev);
  3909. dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
  3910. RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR));
  3911. dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
  3912. RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
  3913. /* stop the rlc */
  3914. cik_rlc_stop(rdev);
  3915. /* Disable GFX parsing/prefetching */
  3916. WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT);
  3917. /* Disable MEC parsing/prefetching */
  3918. WREG32(CP_MEC_CNTL, MEC_ME1_HALT | MEC_ME2_HALT);
  3919. if (reset_mask & RADEON_RESET_DMA) {
  3920. /* sdma0 */
  3921. tmp = RREG32(SDMA0_ME_CNTL + SDMA0_REGISTER_OFFSET);
  3922. tmp |= SDMA_HALT;
  3923. WREG32(SDMA0_ME_CNTL + SDMA0_REGISTER_OFFSET, tmp);
  3924. }
  3925. if (reset_mask & RADEON_RESET_DMA1) {
  3926. /* sdma1 */
  3927. tmp = RREG32(SDMA0_ME_CNTL + SDMA1_REGISTER_OFFSET);
  3928. tmp |= SDMA_HALT;
  3929. WREG32(SDMA0_ME_CNTL + SDMA1_REGISTER_OFFSET, tmp);
  3930. }
  3931. evergreen_mc_stop(rdev, &save);
  3932. if (evergreen_mc_wait_for_idle(rdev)) {
  3933. dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
  3934. }
  3935. if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE | RADEON_RESET_CP))
  3936. grbm_soft_reset = SOFT_RESET_CP | SOFT_RESET_GFX;
  3937. if (reset_mask & RADEON_RESET_CP) {
  3938. grbm_soft_reset |= SOFT_RESET_CP;
  3939. srbm_soft_reset |= SOFT_RESET_GRBM;
  3940. }
  3941. if (reset_mask & RADEON_RESET_DMA)
  3942. srbm_soft_reset |= SOFT_RESET_SDMA;
  3943. if (reset_mask & RADEON_RESET_DMA1)
  3944. srbm_soft_reset |= SOFT_RESET_SDMA1;
  3945. if (reset_mask & RADEON_RESET_DISPLAY)
  3946. srbm_soft_reset |= SOFT_RESET_DC;
  3947. if (reset_mask & RADEON_RESET_RLC)
  3948. grbm_soft_reset |= SOFT_RESET_RLC;
  3949. if (reset_mask & RADEON_RESET_SEM)
  3950. srbm_soft_reset |= SOFT_RESET_SEM;
  3951. if (reset_mask & RADEON_RESET_IH)
  3952. srbm_soft_reset |= SOFT_RESET_IH;
  3953. if (reset_mask & RADEON_RESET_GRBM)
  3954. srbm_soft_reset |= SOFT_RESET_GRBM;
  3955. if (reset_mask & RADEON_RESET_VMC)
  3956. srbm_soft_reset |= SOFT_RESET_VMC;
  3957. if (!(rdev->flags & RADEON_IS_IGP)) {
  3958. if (reset_mask & RADEON_RESET_MC)
  3959. srbm_soft_reset |= SOFT_RESET_MC;
  3960. }
  3961. if (grbm_soft_reset) {
  3962. tmp = RREG32(GRBM_SOFT_RESET);
  3963. tmp |= grbm_soft_reset;
  3964. dev_info(rdev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
  3965. WREG32(GRBM_SOFT_RESET, tmp);
  3966. tmp = RREG32(GRBM_SOFT_RESET);
  3967. udelay(50);
  3968. tmp &= ~grbm_soft_reset;
  3969. WREG32(GRBM_SOFT_RESET, tmp);
  3970. tmp = RREG32(GRBM_SOFT_RESET);
  3971. }
  3972. if (srbm_soft_reset) {
  3973. tmp = RREG32(SRBM_SOFT_RESET);
  3974. tmp |= srbm_soft_reset;
  3975. dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
  3976. WREG32(SRBM_SOFT_RESET, tmp);
  3977. tmp = RREG32(SRBM_SOFT_RESET);
  3978. udelay(50);
  3979. tmp &= ~srbm_soft_reset;
  3980. WREG32(SRBM_SOFT_RESET, tmp);
  3981. tmp = RREG32(SRBM_SOFT_RESET);
  3982. }
  3983. /* Wait a little for things to settle down */
  3984. udelay(50);
  3985. evergreen_mc_resume(rdev, &save);
  3986. udelay(50);
  3987. cik_print_gpu_status_regs(rdev);
  3988. }
  3989. /**
  3990. * cik_asic_reset - soft reset GPU
  3991. *
  3992. * @rdev: radeon_device pointer
  3993. *
  3994. * Look up which blocks are hung and attempt
  3995. * to reset them.
  3996. * Returns 0 for success.
  3997. */
  3998. int cik_asic_reset(struct radeon_device *rdev)
  3999. {
  4000. u32 reset_mask;
  4001. reset_mask = cik_gpu_check_soft_reset(rdev);
  4002. if (reset_mask)
  4003. r600_set_bios_scratch_engine_hung(rdev, true);
  4004. cik_gpu_soft_reset(rdev, reset_mask);
  4005. reset_mask = cik_gpu_check_soft_reset(rdev);
  4006. if (!reset_mask)
  4007. r600_set_bios_scratch_engine_hung(rdev, false);
  4008. return 0;
  4009. }
  4010. /**
  4011. * cik_gfx_is_lockup - check if the 3D engine is locked up
  4012. *
  4013. * @rdev: radeon_device pointer
  4014. * @ring: radeon_ring structure holding ring information
  4015. *
  4016. * Check if the 3D engine is locked up (CIK).
  4017. * Returns true if the engine is locked, false if not.
  4018. */
  4019. bool cik_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
  4020. {
  4021. u32 reset_mask = cik_gpu_check_soft_reset(rdev);
  4022. if (!(reset_mask & (RADEON_RESET_GFX |
  4023. RADEON_RESET_COMPUTE |
  4024. RADEON_RESET_CP))) {
  4025. radeon_ring_lockup_update(ring);
  4026. return false;
  4027. }
  4028. /* force CP activities */
  4029. radeon_ring_force_activity(rdev, ring);
  4030. return radeon_ring_test_lockup(rdev, ring);
  4031. }
  4032. /* MC */
  4033. /**
  4034. * cik_mc_program - program the GPU memory controller
  4035. *
  4036. * @rdev: radeon_device pointer
  4037. *
  4038. * Set the location of vram, gart, and AGP in the GPU's
  4039. * physical address space (CIK).
  4040. */
  4041. static void cik_mc_program(struct radeon_device *rdev)
  4042. {
  4043. struct evergreen_mc_save save;
  4044. u32 tmp;
  4045. int i, j;
  4046. /* Initialize HDP */
  4047. for (i = 0, j = 0; i < 32; i++, j += 0x18) {
  4048. WREG32((0x2c14 + j), 0x00000000);
  4049. WREG32((0x2c18 + j), 0x00000000);
  4050. WREG32((0x2c1c + j), 0x00000000);
  4051. WREG32((0x2c20 + j), 0x00000000);
  4052. WREG32((0x2c24 + j), 0x00000000);
  4053. }
  4054. WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
  4055. evergreen_mc_stop(rdev, &save);
  4056. if (radeon_mc_wait_for_idle(rdev)) {
  4057. dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
  4058. }
  4059. /* Lockout access through VGA aperture*/
  4060. WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
  4061. /* Update configuration */
  4062. WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
  4063. rdev->mc.vram_start >> 12);
  4064. WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
  4065. rdev->mc.vram_end >> 12);
  4066. WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
  4067. rdev->vram_scratch.gpu_addr >> 12);
  4068. tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
  4069. tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
  4070. WREG32(MC_VM_FB_LOCATION, tmp);
  4071. /* XXX double check these! */
  4072. WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
  4073. WREG32(HDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
  4074. WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
  4075. WREG32(MC_VM_AGP_BASE, 0);
  4076. WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
  4077. WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
  4078. if (radeon_mc_wait_for_idle(rdev)) {
  4079. dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
  4080. }
  4081. evergreen_mc_resume(rdev, &save);
  4082. /* we need to own VRAM, so turn off the VGA renderer here
  4083. * to stop it overwriting our objects */
  4084. rv515_vga_render_disable(rdev);
  4085. }
  4086. /**
  4087. * cik_mc_init - initialize the memory controller driver params
  4088. *
  4089. * @rdev: radeon_device pointer
  4090. *
  4091. * Look up the amount of vram, vram width, and decide how to place
  4092. * vram and gart within the GPU's physical address space (CIK).
  4093. * Returns 0 for success.
  4094. */
  4095. static int cik_mc_init(struct radeon_device *rdev)
  4096. {
  4097. u32 tmp;
  4098. int chansize, numchan;
  4099. /* Get VRAM informations */
  4100. rdev->mc.vram_is_ddr = true;
  4101. tmp = RREG32(MC_ARB_RAMCFG);
  4102. if (tmp & CHANSIZE_MASK) {
  4103. chansize = 64;
  4104. } else {
  4105. chansize = 32;
  4106. }
  4107. tmp = RREG32(MC_SHARED_CHMAP);
  4108. switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
  4109. case 0:
  4110. default:
  4111. numchan = 1;
  4112. break;
  4113. case 1:
  4114. numchan = 2;
  4115. break;
  4116. case 2:
  4117. numchan = 4;
  4118. break;
  4119. case 3:
  4120. numchan = 8;
  4121. break;
  4122. case 4:
  4123. numchan = 3;
  4124. break;
  4125. case 5:
  4126. numchan = 6;
  4127. break;
  4128. case 6:
  4129. numchan = 10;
  4130. break;
  4131. case 7:
  4132. numchan = 12;
  4133. break;
  4134. case 8:
  4135. numchan = 16;
  4136. break;
  4137. }
  4138. rdev->mc.vram_width = numchan * chansize;
  4139. /* Could aper size report 0 ? */
  4140. rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
  4141. rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
  4142. /* size in MB on si */
  4143. rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
  4144. rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
  4145. rdev->mc.visible_vram_size = rdev->mc.aper_size;
  4146. si_vram_gtt_location(rdev, &rdev->mc);
  4147. radeon_update_bandwidth_info(rdev);
  4148. return 0;
  4149. }
  4150. /*
  4151. * GART
  4152. * VMID 0 is the physical GPU addresses as used by the kernel.
  4153. * VMIDs 1-15 are used for userspace clients and are handled
  4154. * by the radeon vm/hsa code.
  4155. */
  4156. /**
  4157. * cik_pcie_gart_tlb_flush - gart tlb flush callback
  4158. *
  4159. * @rdev: radeon_device pointer
  4160. *
  4161. * Flush the TLB for the VMID 0 page table (CIK).
  4162. */
  4163. void cik_pcie_gart_tlb_flush(struct radeon_device *rdev)
  4164. {
  4165. /* flush hdp cache */
  4166. WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0);
  4167. /* bits 0-15 are the VM contexts0-15 */
  4168. WREG32(VM_INVALIDATE_REQUEST, 0x1);
  4169. }
  4170. /**
  4171. * cik_pcie_gart_enable - gart enable
  4172. *
  4173. * @rdev: radeon_device pointer
  4174. *
  4175. * This sets up the TLBs, programs the page tables for VMID0,
  4176. * sets up the hw for VMIDs 1-15 which are allocated on
  4177. * demand, and sets up the global locations for the LDS, GDS,
  4178. * and GPUVM for FSA64 clients (CIK).
  4179. * Returns 0 for success, errors for failure.
  4180. */
  4181. static int cik_pcie_gart_enable(struct radeon_device *rdev)
  4182. {
  4183. int r, i;
  4184. if (rdev->gart.robj == NULL) {
  4185. dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
  4186. return -EINVAL;
  4187. }
  4188. r = radeon_gart_table_vram_pin(rdev);
  4189. if (r)
  4190. return r;
  4191. radeon_gart_restore(rdev);
  4192. /* Setup TLB control */
  4193. WREG32(MC_VM_MX_L1_TLB_CNTL,
  4194. (0xA << 7) |
  4195. ENABLE_L1_TLB |
  4196. SYSTEM_ACCESS_MODE_NOT_IN_SYS |
  4197. ENABLE_ADVANCED_DRIVER_MODEL |
  4198. SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
  4199. /* Setup L2 cache */
  4200. WREG32(VM_L2_CNTL, ENABLE_L2_CACHE |
  4201. ENABLE_L2_FRAGMENT_PROCESSING |
  4202. ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
  4203. ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
  4204. EFFECTIVE_L2_QUEUE_SIZE(7) |
  4205. CONTEXT1_IDENTITY_ACCESS_MODE(1));
  4206. WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE);
  4207. WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
  4208. L2_CACHE_BIGK_FRAGMENT_SIZE(6));
  4209. /* setup context0 */
  4210. WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
  4211. WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
  4212. WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
  4213. WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
  4214. (u32)(rdev->dummy_page.addr >> 12));
  4215. WREG32(VM_CONTEXT0_CNTL2, 0);
  4216. WREG32(VM_CONTEXT0_CNTL, (ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
  4217. RANGE_PROTECTION_FAULT_ENABLE_DEFAULT));
  4218. WREG32(0x15D4, 0);
  4219. WREG32(0x15D8, 0);
  4220. WREG32(0x15DC, 0);
  4221. /* empty context1-15 */
  4222. /* FIXME start with 4G, once using 2 level pt switch to full
  4223. * vm size space
  4224. */
  4225. /* set vm size, must be a multiple of 4 */
  4226. WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
  4227. WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn);
  4228. for (i = 1; i < 16; i++) {
  4229. if (i < 8)
  4230. WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
  4231. rdev->gart.table_addr >> 12);
  4232. else
  4233. WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2),
  4234. rdev->gart.table_addr >> 12);
  4235. }
  4236. /* enable context1-15 */
  4237. WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
  4238. (u32)(rdev->dummy_page.addr >> 12));
  4239. WREG32(VM_CONTEXT1_CNTL2, 4);
  4240. WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
  4241. RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
  4242. RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
  4243. DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
  4244. DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT |
  4245. PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT |
  4246. PDE0_PROTECTION_FAULT_ENABLE_DEFAULT |
  4247. VALID_PROTECTION_FAULT_ENABLE_INTERRUPT |
  4248. VALID_PROTECTION_FAULT_ENABLE_DEFAULT |
  4249. READ_PROTECTION_FAULT_ENABLE_INTERRUPT |
  4250. READ_PROTECTION_FAULT_ENABLE_DEFAULT |
  4251. WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT |
  4252. WRITE_PROTECTION_FAULT_ENABLE_DEFAULT);
  4253. /* TC cache setup ??? */
  4254. WREG32(TC_CFG_L1_LOAD_POLICY0, 0);
  4255. WREG32(TC_CFG_L1_LOAD_POLICY1, 0);
  4256. WREG32(TC_CFG_L1_STORE_POLICY, 0);
  4257. WREG32(TC_CFG_L2_LOAD_POLICY0, 0);
  4258. WREG32(TC_CFG_L2_LOAD_POLICY1, 0);
  4259. WREG32(TC_CFG_L2_STORE_POLICY0, 0);
  4260. WREG32(TC_CFG_L2_STORE_POLICY1, 0);
  4261. WREG32(TC_CFG_L2_ATOMIC_POLICY, 0);
  4262. WREG32(TC_CFG_L1_VOLATILE, 0);
  4263. WREG32(TC_CFG_L2_VOLATILE, 0);
  4264. if (rdev->family == CHIP_KAVERI) {
  4265. u32 tmp = RREG32(CHUB_CONTROL);
  4266. tmp &= ~BYPASS_VM;
  4267. WREG32(CHUB_CONTROL, tmp);
  4268. }
  4269. /* XXX SH_MEM regs */
  4270. /* where to put LDS, scratch, GPUVM in FSA64 space */
  4271. mutex_lock(&rdev->srbm_mutex);
  4272. for (i = 0; i < 16; i++) {
  4273. cik_srbm_select(rdev, 0, 0, 0, i);
  4274. /* CP and shaders */
  4275. WREG32(SH_MEM_CONFIG, 0);
  4276. WREG32(SH_MEM_APE1_BASE, 1);
  4277. WREG32(SH_MEM_APE1_LIMIT, 0);
  4278. WREG32(SH_MEM_BASES, 0);
  4279. /* SDMA GFX */
  4280. WREG32(SDMA0_GFX_VIRTUAL_ADDR + SDMA0_REGISTER_OFFSET, 0);
  4281. WREG32(SDMA0_GFX_APE1_CNTL + SDMA0_REGISTER_OFFSET, 0);
  4282. WREG32(SDMA0_GFX_VIRTUAL_ADDR + SDMA1_REGISTER_OFFSET, 0);
  4283. WREG32(SDMA0_GFX_APE1_CNTL + SDMA1_REGISTER_OFFSET, 0);
  4284. /* XXX SDMA RLC - todo */
  4285. }
  4286. cik_srbm_select(rdev, 0, 0, 0, 0);
  4287. mutex_unlock(&rdev->srbm_mutex);
  4288. cik_pcie_gart_tlb_flush(rdev);
  4289. DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
  4290. (unsigned)(rdev->mc.gtt_size >> 20),
  4291. (unsigned long long)rdev->gart.table_addr);
  4292. rdev->gart.ready = true;
  4293. return 0;
  4294. }
  4295. /**
  4296. * cik_pcie_gart_disable - gart disable
  4297. *
  4298. * @rdev: radeon_device pointer
  4299. *
  4300. * This disables all VM page table (CIK).
  4301. */
  4302. static void cik_pcie_gart_disable(struct radeon_device *rdev)
  4303. {
  4304. /* Disable all tables */
  4305. WREG32(VM_CONTEXT0_CNTL, 0);
  4306. WREG32(VM_CONTEXT1_CNTL, 0);
  4307. /* Setup TLB control */
  4308. WREG32(MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE_NOT_IN_SYS |
  4309. SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
  4310. /* Setup L2 cache */
  4311. WREG32(VM_L2_CNTL,
  4312. ENABLE_L2_FRAGMENT_PROCESSING |
  4313. ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
  4314. ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
  4315. EFFECTIVE_L2_QUEUE_SIZE(7) |
  4316. CONTEXT1_IDENTITY_ACCESS_MODE(1));
  4317. WREG32(VM_L2_CNTL2, 0);
  4318. WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
  4319. L2_CACHE_BIGK_FRAGMENT_SIZE(6));
  4320. radeon_gart_table_vram_unpin(rdev);
  4321. }
  4322. /**
  4323. * cik_pcie_gart_fini - vm fini callback
  4324. *
  4325. * @rdev: radeon_device pointer
  4326. *
  4327. * Tears down the driver GART/VM setup (CIK).
  4328. */
  4329. static void cik_pcie_gart_fini(struct radeon_device *rdev)
  4330. {
  4331. cik_pcie_gart_disable(rdev);
  4332. radeon_gart_table_vram_free(rdev);
  4333. radeon_gart_fini(rdev);
  4334. }
  4335. /* vm parser */
  4336. /**
  4337. * cik_ib_parse - vm ib_parse callback
  4338. *
  4339. * @rdev: radeon_device pointer
  4340. * @ib: indirect buffer pointer
  4341. *
  4342. * CIK uses hw IB checking so this is a nop (CIK).
  4343. */
  4344. int cik_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
  4345. {
  4346. return 0;
  4347. }
  4348. /*
  4349. * vm
  4350. * VMID 0 is the physical GPU addresses as used by the kernel.
  4351. * VMIDs 1-15 are used for userspace clients and are handled
  4352. * by the radeon vm/hsa code.
  4353. */
  4354. /**
  4355. * cik_vm_init - cik vm init callback
  4356. *
  4357. * @rdev: radeon_device pointer
  4358. *
  4359. * Inits cik specific vm parameters (number of VMs, base of vram for
  4360. * VMIDs 1-15) (CIK).
  4361. * Returns 0 for success.
  4362. */
  4363. int cik_vm_init(struct radeon_device *rdev)
  4364. {
  4365. /* number of VMs */
  4366. rdev->vm_manager.nvm = 16;
  4367. /* base offset of vram pages */
  4368. if (rdev->flags & RADEON_IS_IGP) {
  4369. u64 tmp = RREG32(MC_VM_FB_OFFSET);
  4370. tmp <<= 22;
  4371. rdev->vm_manager.vram_base_offset = tmp;
  4372. } else
  4373. rdev->vm_manager.vram_base_offset = 0;
  4374. return 0;
  4375. }
  4376. /**
  4377. * cik_vm_fini - cik vm fini callback
  4378. *
  4379. * @rdev: radeon_device pointer
  4380. *
  4381. * Tear down any asic specific VM setup (CIK).
  4382. */
  4383. void cik_vm_fini(struct radeon_device *rdev)
  4384. {
  4385. }
  4386. /**
  4387. * cik_vm_decode_fault - print human readable fault info
  4388. *
  4389. * @rdev: radeon_device pointer
  4390. * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
  4391. * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
  4392. *
  4393. * Print human readable fault information (CIK).
  4394. */
  4395. static void cik_vm_decode_fault(struct radeon_device *rdev,
  4396. u32 status, u32 addr, u32 mc_client)
  4397. {
  4398. u32 mc_id = (status & MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT;
  4399. u32 vmid = (status & FAULT_VMID_MASK) >> FAULT_VMID_SHIFT;
  4400. u32 protections = (status & PROTECTIONS_MASK) >> PROTECTIONS_SHIFT;
  4401. char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
  4402. (mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
  4403. printk("VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
  4404. protections, vmid, addr,
  4405. (status & MEMORY_CLIENT_RW_MASK) ? "write" : "read",
  4406. block, mc_client, mc_id);
  4407. }
  4408. /**
  4409. * cik_vm_flush - cik vm flush using the CP
  4410. *
  4411. * @rdev: radeon_device pointer
  4412. *
  4413. * Update the page table base and flush the VM TLB
  4414. * using the CP (CIK).
  4415. */
  4416. void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
  4417. {
  4418. struct radeon_ring *ring = &rdev->ring[ridx];
  4419. if (vm == NULL)
  4420. return;
  4421. radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
  4422. radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
  4423. WRITE_DATA_DST_SEL(0)));
  4424. if (vm->id < 8) {
  4425. radeon_ring_write(ring,
  4426. (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2);
  4427. } else {
  4428. radeon_ring_write(ring,
  4429. (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2);
  4430. }
  4431. radeon_ring_write(ring, 0);
  4432. radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
  4433. /* update SH_MEM_* regs */
  4434. radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
  4435. radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
  4436. WRITE_DATA_DST_SEL(0)));
  4437. radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
  4438. radeon_ring_write(ring, 0);
  4439. radeon_ring_write(ring, VMID(vm->id));
  4440. radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 6));
  4441. radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
  4442. WRITE_DATA_DST_SEL(0)));
  4443. radeon_ring_write(ring, SH_MEM_BASES >> 2);
  4444. radeon_ring_write(ring, 0);
  4445. radeon_ring_write(ring, 0); /* SH_MEM_BASES */
  4446. radeon_ring_write(ring, 0); /* SH_MEM_CONFIG */
  4447. radeon_ring_write(ring, 1); /* SH_MEM_APE1_BASE */
  4448. radeon_ring_write(ring, 0); /* SH_MEM_APE1_LIMIT */
  4449. radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
  4450. radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
  4451. WRITE_DATA_DST_SEL(0)));
  4452. radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
  4453. radeon_ring_write(ring, 0);
  4454. radeon_ring_write(ring, VMID(0));
  4455. /* HDP flush */
  4456. /* We should be using the WAIT_REG_MEM packet here like in
  4457. * cik_fence_ring_emit(), but it causes the CP to hang in this
  4458. * context...
  4459. */
  4460. radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
  4461. radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
  4462. WRITE_DATA_DST_SEL(0)));
  4463. radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
  4464. radeon_ring_write(ring, 0);
  4465. radeon_ring_write(ring, 0);
  4466. /* bits 0-15 are the VM contexts0-15 */
  4467. radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
  4468. radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
  4469. WRITE_DATA_DST_SEL(0)));
  4470. radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
  4471. radeon_ring_write(ring, 0);
  4472. radeon_ring_write(ring, 1 << vm->id);
  4473. /* compute doesn't have PFP */
  4474. if (ridx == RADEON_RING_TYPE_GFX_INDEX) {
  4475. /* sync PFP to ME, otherwise we might get invalid PFP reads */
  4476. radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
  4477. radeon_ring_write(ring, 0x0);
  4478. }
  4479. }
  4480. /**
  4481. * cik_vm_set_page - update the page tables using sDMA
  4482. *
  4483. * @rdev: radeon_device pointer
  4484. * @ib: indirect buffer to fill with commands
  4485. * @pe: addr of the page entry
  4486. * @addr: dst addr to write into pe
  4487. * @count: number of page entries to update
  4488. * @incr: increase next addr by incr bytes
  4489. * @flags: access flags
  4490. *
  4491. * Update the page tables using CP or sDMA (CIK).
  4492. */
  4493. void cik_vm_set_page(struct radeon_device *rdev,
  4494. struct radeon_ib *ib,
  4495. uint64_t pe,
  4496. uint64_t addr, unsigned count,
  4497. uint32_t incr, uint32_t flags)
  4498. {
  4499. uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
  4500. uint64_t value;
  4501. unsigned ndw;
  4502. if (rdev->asic->vm.pt_ring_index == RADEON_RING_TYPE_GFX_INDEX) {
  4503. /* CP */
  4504. while (count) {
  4505. ndw = 2 + count * 2;
  4506. if (ndw > 0x3FFE)
  4507. ndw = 0x3FFE;
  4508. ib->ptr[ib->length_dw++] = PACKET3(PACKET3_WRITE_DATA, ndw);
  4509. ib->ptr[ib->length_dw++] = (WRITE_DATA_ENGINE_SEL(0) |
  4510. WRITE_DATA_DST_SEL(1));
  4511. ib->ptr[ib->length_dw++] = pe;
  4512. ib->ptr[ib->length_dw++] = upper_32_bits(pe);
  4513. for (; ndw > 2; ndw -= 2, --count, pe += 8) {
  4514. if (flags & RADEON_VM_PAGE_SYSTEM) {
  4515. value = radeon_vm_map_gart(rdev, addr);
  4516. value &= 0xFFFFFFFFFFFFF000ULL;
  4517. } else if (flags & RADEON_VM_PAGE_VALID) {
  4518. value = addr;
  4519. } else {
  4520. value = 0;
  4521. }
  4522. addr += incr;
  4523. value |= r600_flags;
  4524. ib->ptr[ib->length_dw++] = value;
  4525. ib->ptr[ib->length_dw++] = upper_32_bits(value);
  4526. }
  4527. }
  4528. } else {
  4529. /* DMA */
  4530. cik_sdma_vm_set_page(rdev, ib, pe, addr, count, incr, flags);
  4531. }
  4532. }
  4533. /*
  4534. * RLC
  4535. * The RLC is a multi-purpose microengine that handles a
  4536. * variety of functions, the most important of which is
  4537. * the interrupt controller.
  4538. */
  4539. static void cik_enable_gui_idle_interrupt(struct radeon_device *rdev,
  4540. bool enable)
  4541. {
  4542. u32 tmp = RREG32(CP_INT_CNTL_RING0);
  4543. if (enable)
  4544. tmp |= (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
  4545. else
  4546. tmp &= ~(CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
  4547. WREG32(CP_INT_CNTL_RING0, tmp);
  4548. }
  4549. static void cik_enable_lbpw(struct radeon_device *rdev, bool enable)
  4550. {
  4551. u32 tmp;
  4552. tmp = RREG32(RLC_LB_CNTL);
  4553. if (enable)
  4554. tmp |= LOAD_BALANCE_ENABLE;
  4555. else
  4556. tmp &= ~LOAD_BALANCE_ENABLE;
  4557. WREG32(RLC_LB_CNTL, tmp);
  4558. }
  4559. static void cik_wait_for_rlc_serdes(struct radeon_device *rdev)
  4560. {
  4561. u32 i, j, k;
  4562. u32 mask;
  4563. for (i = 0; i < rdev->config.cik.max_shader_engines; i++) {
  4564. for (j = 0; j < rdev->config.cik.max_sh_per_se; j++) {
  4565. cik_select_se_sh(rdev, i, j);
  4566. for (k = 0; k < rdev->usec_timeout; k++) {
  4567. if (RREG32(RLC_SERDES_CU_MASTER_BUSY) == 0)
  4568. break;
  4569. udelay(1);
  4570. }
  4571. }
  4572. }
  4573. cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
  4574. mask = SE_MASTER_BUSY_MASK | GC_MASTER_BUSY | TC0_MASTER_BUSY | TC1_MASTER_BUSY;
  4575. for (k = 0; k < rdev->usec_timeout; k++) {
  4576. if ((RREG32(RLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0)
  4577. break;
  4578. udelay(1);
  4579. }
  4580. }
  4581. static void cik_update_rlc(struct radeon_device *rdev, u32 rlc)
  4582. {
  4583. u32 tmp;
  4584. tmp = RREG32(RLC_CNTL);
  4585. if (tmp != rlc)
  4586. WREG32(RLC_CNTL, rlc);
  4587. }
  4588. static u32 cik_halt_rlc(struct radeon_device *rdev)
  4589. {
  4590. u32 data, orig;
  4591. orig = data = RREG32(RLC_CNTL);
  4592. if (data & RLC_ENABLE) {
  4593. u32 i;
  4594. data &= ~RLC_ENABLE;
  4595. WREG32(RLC_CNTL, data);
  4596. for (i = 0; i < rdev->usec_timeout; i++) {
  4597. if ((RREG32(RLC_GPM_STAT) & RLC_GPM_BUSY) == 0)
  4598. break;
  4599. udelay(1);
  4600. }
  4601. cik_wait_for_rlc_serdes(rdev);
  4602. }
  4603. return orig;
  4604. }
  4605. void cik_enter_rlc_safe_mode(struct radeon_device *rdev)
  4606. {
  4607. u32 tmp, i, mask;
  4608. tmp = REQ | MESSAGE(MSG_ENTER_RLC_SAFE_MODE);
  4609. WREG32(RLC_GPR_REG2, tmp);
  4610. mask = GFX_POWER_STATUS | GFX_CLOCK_STATUS;
  4611. for (i = 0; i < rdev->usec_timeout; i++) {
  4612. if ((RREG32(RLC_GPM_STAT) & mask) == mask)
  4613. break;
  4614. udelay(1);
  4615. }
  4616. for (i = 0; i < rdev->usec_timeout; i++) {
  4617. if ((RREG32(RLC_GPR_REG2) & REQ) == 0)
  4618. break;
  4619. udelay(1);
  4620. }
  4621. }
  4622. void cik_exit_rlc_safe_mode(struct radeon_device *rdev)
  4623. {
  4624. u32 tmp;
  4625. tmp = REQ | MESSAGE(MSG_EXIT_RLC_SAFE_MODE);
  4626. WREG32(RLC_GPR_REG2, tmp);
  4627. }
  4628. /**
  4629. * cik_rlc_stop - stop the RLC ME
  4630. *
  4631. * @rdev: radeon_device pointer
  4632. *
  4633. * Halt the RLC ME (MicroEngine) (CIK).
  4634. */
  4635. static void cik_rlc_stop(struct radeon_device *rdev)
  4636. {
  4637. WREG32(RLC_CNTL, 0);
  4638. cik_enable_gui_idle_interrupt(rdev, false);
  4639. cik_wait_for_rlc_serdes(rdev);
  4640. }
  4641. /**
  4642. * cik_rlc_start - start the RLC ME
  4643. *
  4644. * @rdev: radeon_device pointer
  4645. *
  4646. * Unhalt the RLC ME (MicroEngine) (CIK).
  4647. */
  4648. static void cik_rlc_start(struct radeon_device *rdev)
  4649. {
  4650. WREG32(RLC_CNTL, RLC_ENABLE);
  4651. cik_enable_gui_idle_interrupt(rdev, true);
  4652. udelay(50);
  4653. }
  4654. /**
  4655. * cik_rlc_resume - setup the RLC hw
  4656. *
  4657. * @rdev: radeon_device pointer
  4658. *
  4659. * Initialize the RLC registers, load the ucode,
  4660. * and start the RLC (CIK).
  4661. * Returns 0 for success, -EINVAL if the ucode is not available.
  4662. */
  4663. static int cik_rlc_resume(struct radeon_device *rdev)
  4664. {
  4665. u32 i, size, tmp;
  4666. const __be32 *fw_data;
  4667. if (!rdev->rlc_fw)
  4668. return -EINVAL;
  4669. switch (rdev->family) {
  4670. case CHIP_BONAIRE:
  4671. default:
  4672. size = BONAIRE_RLC_UCODE_SIZE;
  4673. break;
  4674. case CHIP_KAVERI:
  4675. size = KV_RLC_UCODE_SIZE;
  4676. break;
  4677. case CHIP_KABINI:
  4678. size = KB_RLC_UCODE_SIZE;
  4679. break;
  4680. }
  4681. cik_rlc_stop(rdev);
  4682. /* disable CG */
  4683. tmp = RREG32(RLC_CGCG_CGLS_CTRL) & 0xfffffffc;
  4684. WREG32(RLC_CGCG_CGLS_CTRL, tmp);
  4685. si_rlc_reset(rdev);
  4686. cik_init_pg(rdev);
  4687. cik_init_cg(rdev);
  4688. WREG32(RLC_LB_CNTR_INIT, 0);
  4689. WREG32(RLC_LB_CNTR_MAX, 0x00008000);
  4690. cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
  4691. WREG32(RLC_LB_INIT_CU_MASK, 0xffffffff);
  4692. WREG32(RLC_LB_PARAMS, 0x00600408);
  4693. WREG32(RLC_LB_CNTL, 0x80000004);
  4694. WREG32(RLC_MC_CNTL, 0);
  4695. WREG32(RLC_UCODE_CNTL, 0);
  4696. fw_data = (const __be32 *)rdev->rlc_fw->data;
  4697. WREG32(RLC_GPM_UCODE_ADDR, 0);
  4698. for (i = 0; i < size; i++)
  4699. WREG32(RLC_GPM_UCODE_DATA, be32_to_cpup(fw_data++));
  4700. WREG32(RLC_GPM_UCODE_ADDR, 0);
  4701. /* XXX - find out what chips support lbpw */
  4702. cik_enable_lbpw(rdev, false);
  4703. if (rdev->family == CHIP_BONAIRE)
  4704. WREG32(RLC_DRIVER_DMA_STATUS, 0);
  4705. cik_rlc_start(rdev);
  4706. return 0;
  4707. }
  4708. static void cik_enable_cgcg(struct radeon_device *rdev, bool enable)
  4709. {
  4710. u32 data, orig, tmp, tmp2;
  4711. orig = data = RREG32(RLC_CGCG_CGLS_CTRL);
  4712. if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CGCG)) {
  4713. cik_enable_gui_idle_interrupt(rdev, true);
  4714. tmp = cik_halt_rlc(rdev);
  4715. cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
  4716. WREG32(RLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
  4717. WREG32(RLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
  4718. tmp2 = BPM_ADDR_MASK | CGCG_OVERRIDE_0 | CGLS_ENABLE;
  4719. WREG32(RLC_SERDES_WR_CTRL, tmp2);
  4720. cik_update_rlc(rdev, tmp);
  4721. data |= CGCG_EN | CGLS_EN;
  4722. } else {
  4723. cik_enable_gui_idle_interrupt(rdev, false);
  4724. RREG32(CB_CGTT_SCLK_CTRL);
  4725. RREG32(CB_CGTT_SCLK_CTRL);
  4726. RREG32(CB_CGTT_SCLK_CTRL);
  4727. RREG32(CB_CGTT_SCLK_CTRL);
  4728. data &= ~(CGCG_EN | CGLS_EN);
  4729. }
  4730. if (orig != data)
  4731. WREG32(RLC_CGCG_CGLS_CTRL, data);
  4732. }
  4733. static void cik_enable_mgcg(struct radeon_device *rdev, bool enable)
  4734. {
  4735. u32 data, orig, tmp = 0;
  4736. if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_MGCG)) {
  4737. if (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_MGLS) {
  4738. if (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CP_LS) {
  4739. orig = data = RREG32(CP_MEM_SLP_CNTL);
  4740. data |= CP_MEM_LS_EN;
  4741. if (orig != data)
  4742. WREG32(CP_MEM_SLP_CNTL, data);
  4743. }
  4744. }
  4745. orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
  4746. data &= 0xfffffffd;
  4747. if (orig != data)
  4748. WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
  4749. tmp = cik_halt_rlc(rdev);
  4750. cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
  4751. WREG32(RLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
  4752. WREG32(RLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
  4753. data = BPM_ADDR_MASK | MGCG_OVERRIDE_0;
  4754. WREG32(RLC_SERDES_WR_CTRL, data);
  4755. cik_update_rlc(rdev, tmp);
  4756. if (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CGTS) {
  4757. orig = data = RREG32(CGTS_SM_CTRL_REG);
  4758. data &= ~SM_MODE_MASK;
  4759. data |= SM_MODE(0x2);
  4760. data |= SM_MODE_ENABLE;
  4761. data &= ~CGTS_OVERRIDE;
  4762. if ((rdev->cg_flags & RADEON_CG_SUPPORT_GFX_MGLS) &&
  4763. (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CGTS_LS))
  4764. data &= ~CGTS_LS_OVERRIDE;
  4765. data &= ~ON_MONITOR_ADD_MASK;
  4766. data |= ON_MONITOR_ADD_EN;
  4767. data |= ON_MONITOR_ADD(0x96);
  4768. if (orig != data)
  4769. WREG32(CGTS_SM_CTRL_REG, data);
  4770. }
  4771. } else {
  4772. orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
  4773. data |= 0x00000002;
  4774. if (orig != data)
  4775. WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
  4776. data = RREG32(RLC_MEM_SLP_CNTL);
  4777. if (data & RLC_MEM_LS_EN) {
  4778. data &= ~RLC_MEM_LS_EN;
  4779. WREG32(RLC_MEM_SLP_CNTL, data);
  4780. }
  4781. data = RREG32(CP_MEM_SLP_CNTL);
  4782. if (data & CP_MEM_LS_EN) {
  4783. data &= ~CP_MEM_LS_EN;
  4784. WREG32(CP_MEM_SLP_CNTL, data);
  4785. }
  4786. orig = data = RREG32(CGTS_SM_CTRL_REG);
  4787. data |= CGTS_OVERRIDE | CGTS_LS_OVERRIDE;
  4788. if (orig != data)
  4789. WREG32(CGTS_SM_CTRL_REG, data);
  4790. tmp = cik_halt_rlc(rdev);
  4791. cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
  4792. WREG32(RLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
  4793. WREG32(RLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
  4794. data = BPM_ADDR_MASK | MGCG_OVERRIDE_1;
  4795. WREG32(RLC_SERDES_WR_CTRL, data);
  4796. cik_update_rlc(rdev, tmp);
  4797. }
  4798. }
  4799. static const u32 mc_cg_registers[] =
  4800. {
  4801. MC_HUB_MISC_HUB_CG,
  4802. MC_HUB_MISC_SIP_CG,
  4803. MC_HUB_MISC_VM_CG,
  4804. MC_XPB_CLK_GAT,
  4805. ATC_MISC_CG,
  4806. MC_CITF_MISC_WR_CG,
  4807. MC_CITF_MISC_RD_CG,
  4808. MC_CITF_MISC_VM_CG,
  4809. VM_L2_CG,
  4810. };
  4811. static void cik_enable_mc_ls(struct radeon_device *rdev,
  4812. bool enable)
  4813. {
  4814. int i;
  4815. u32 orig, data;
  4816. for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
  4817. orig = data = RREG32(mc_cg_registers[i]);
  4818. if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_MC_LS))
  4819. data |= MC_LS_ENABLE;
  4820. else
  4821. data &= ~MC_LS_ENABLE;
  4822. if (data != orig)
  4823. WREG32(mc_cg_registers[i], data);
  4824. }
  4825. }
  4826. static void cik_enable_mc_mgcg(struct radeon_device *rdev,
  4827. bool enable)
  4828. {
  4829. int i;
  4830. u32 orig, data;
  4831. for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
  4832. orig = data = RREG32(mc_cg_registers[i]);
  4833. if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_MC_MGCG))
  4834. data |= MC_CG_ENABLE;
  4835. else
  4836. data &= ~MC_CG_ENABLE;
  4837. if (data != orig)
  4838. WREG32(mc_cg_registers[i], data);
  4839. }
  4840. }
  4841. static void cik_enable_sdma_mgcg(struct radeon_device *rdev,
  4842. bool enable)
  4843. {
  4844. u32 orig, data;
  4845. if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_SDMA_MGCG)) {
  4846. WREG32(SDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET, 0x00000100);
  4847. WREG32(SDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET, 0x00000100);
  4848. } else {
  4849. orig = data = RREG32(SDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET);
  4850. data |= 0xff000000;
  4851. if (data != orig)
  4852. WREG32(SDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET, data);
  4853. orig = data = RREG32(SDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET);
  4854. data |= 0xff000000;
  4855. if (data != orig)
  4856. WREG32(SDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET, data);
  4857. }
  4858. }
  4859. static void cik_enable_sdma_mgls(struct radeon_device *rdev,
  4860. bool enable)
  4861. {
  4862. u32 orig, data;
  4863. if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_SDMA_LS)) {
  4864. orig = data = RREG32(SDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET);
  4865. data |= 0x100;
  4866. if (orig != data)
  4867. WREG32(SDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET, data);
  4868. orig = data = RREG32(SDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET);
  4869. data |= 0x100;
  4870. if (orig != data)
  4871. WREG32(SDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET, data);
  4872. } else {
  4873. orig = data = RREG32(SDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET);
  4874. data &= ~0x100;
  4875. if (orig != data)
  4876. WREG32(SDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET, data);
  4877. orig = data = RREG32(SDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET);
  4878. data &= ~0x100;
  4879. if (orig != data)
  4880. WREG32(SDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET, data);
  4881. }
  4882. }
  4883. static void cik_enable_uvd_mgcg(struct radeon_device *rdev,
  4884. bool enable)
  4885. {
  4886. u32 orig, data;
  4887. if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_UVD_MGCG)) {
  4888. data = RREG32_UVD_CTX(UVD_CGC_MEM_CTRL);
  4889. data = 0xfff;
  4890. WREG32_UVD_CTX(UVD_CGC_MEM_CTRL, data);
  4891. orig = data = RREG32(UVD_CGC_CTRL);
  4892. data |= DCM;
  4893. if (orig != data)
  4894. WREG32(UVD_CGC_CTRL, data);
  4895. } else {
  4896. data = RREG32_UVD_CTX(UVD_CGC_MEM_CTRL);
  4897. data &= ~0xfff;
  4898. WREG32_UVD_CTX(UVD_CGC_MEM_CTRL, data);
  4899. orig = data = RREG32(UVD_CGC_CTRL);
  4900. data &= ~DCM;
  4901. if (orig != data)
  4902. WREG32(UVD_CGC_CTRL, data);
  4903. }
  4904. }
  4905. static void cik_enable_bif_mgls(struct radeon_device *rdev,
  4906. bool enable)
  4907. {
  4908. u32 orig, data;
  4909. orig = data = RREG32_PCIE_PORT(PCIE_CNTL2);
  4910. if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_BIF_LS))
  4911. data |= SLV_MEM_LS_EN | MST_MEM_LS_EN |
  4912. REPLAY_MEM_LS_EN | SLV_MEM_AGGRESSIVE_LS_EN;
  4913. else
  4914. data &= ~(SLV_MEM_LS_EN | MST_MEM_LS_EN |
  4915. REPLAY_MEM_LS_EN | SLV_MEM_AGGRESSIVE_LS_EN);
  4916. if (orig != data)
  4917. WREG32_PCIE_PORT(PCIE_CNTL2, data);
  4918. }
  4919. static void cik_enable_hdp_mgcg(struct radeon_device *rdev,
  4920. bool enable)
  4921. {
  4922. u32 orig, data;
  4923. orig = data = RREG32(HDP_HOST_PATH_CNTL);
  4924. if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_HDP_MGCG))
  4925. data &= ~CLOCK_GATING_DIS;
  4926. else
  4927. data |= CLOCK_GATING_DIS;
  4928. if (orig != data)
  4929. WREG32(HDP_HOST_PATH_CNTL, data);
  4930. }
  4931. static void cik_enable_hdp_ls(struct radeon_device *rdev,
  4932. bool enable)
  4933. {
  4934. u32 orig, data;
  4935. orig = data = RREG32(HDP_MEM_POWER_LS);
  4936. if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_HDP_LS))
  4937. data |= HDP_LS_ENABLE;
  4938. else
  4939. data &= ~HDP_LS_ENABLE;
  4940. if (orig != data)
  4941. WREG32(HDP_MEM_POWER_LS, data);
  4942. }
  4943. void cik_update_cg(struct radeon_device *rdev,
  4944. u32 block, bool enable)
  4945. {
  4946. if (block & RADEON_CG_BLOCK_GFX) {
  4947. cik_enable_gui_idle_interrupt(rdev, false);
  4948. /* order matters! */
  4949. if (enable) {
  4950. cik_enable_mgcg(rdev, true);
  4951. cik_enable_cgcg(rdev, true);
  4952. } else {
  4953. cik_enable_cgcg(rdev, false);
  4954. cik_enable_mgcg(rdev, false);
  4955. }
  4956. cik_enable_gui_idle_interrupt(rdev, true);
  4957. }
  4958. if (block & RADEON_CG_BLOCK_MC) {
  4959. if (!(rdev->flags & RADEON_IS_IGP)) {
  4960. cik_enable_mc_mgcg(rdev, enable);
  4961. cik_enable_mc_ls(rdev, enable);
  4962. }
  4963. }
  4964. if (block & RADEON_CG_BLOCK_SDMA) {
  4965. cik_enable_sdma_mgcg(rdev, enable);
  4966. cik_enable_sdma_mgls(rdev, enable);
  4967. }
  4968. if (block & RADEON_CG_BLOCK_BIF) {
  4969. cik_enable_bif_mgls(rdev, enable);
  4970. }
  4971. if (block & RADEON_CG_BLOCK_UVD) {
  4972. if (rdev->has_uvd)
  4973. cik_enable_uvd_mgcg(rdev, enable);
  4974. }
  4975. if (block & RADEON_CG_BLOCK_HDP) {
  4976. cik_enable_hdp_mgcg(rdev, enable);
  4977. cik_enable_hdp_ls(rdev, enable);
  4978. }
  4979. }
  4980. static void cik_init_cg(struct radeon_device *rdev)
  4981. {
  4982. cik_update_cg(rdev, RADEON_CG_BLOCK_GFX, true);
  4983. if (rdev->has_uvd)
  4984. si_init_uvd_internal_cg(rdev);
  4985. cik_update_cg(rdev, (RADEON_CG_BLOCK_MC |
  4986. RADEON_CG_BLOCK_SDMA |
  4987. RADEON_CG_BLOCK_BIF |
  4988. RADEON_CG_BLOCK_UVD |
  4989. RADEON_CG_BLOCK_HDP), true);
  4990. }
  4991. static void cik_fini_cg(struct radeon_device *rdev)
  4992. {
  4993. cik_update_cg(rdev, (RADEON_CG_BLOCK_MC |
  4994. RADEON_CG_BLOCK_SDMA |
  4995. RADEON_CG_BLOCK_BIF |
  4996. RADEON_CG_BLOCK_UVD |
  4997. RADEON_CG_BLOCK_HDP), false);
  4998. cik_update_cg(rdev, RADEON_CG_BLOCK_GFX, false);
  4999. }
  5000. static void cik_enable_sck_slowdown_on_pu(struct radeon_device *rdev,
  5001. bool enable)
  5002. {
  5003. u32 data, orig;
  5004. orig = data = RREG32(RLC_PG_CNTL);
  5005. if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_RLC_SMU_HS))
  5006. data |= SMU_CLK_SLOWDOWN_ON_PU_ENABLE;
  5007. else
  5008. data &= ~SMU_CLK_SLOWDOWN_ON_PU_ENABLE;
  5009. if (orig != data)
  5010. WREG32(RLC_PG_CNTL, data);
  5011. }
  5012. static void cik_enable_sck_slowdown_on_pd(struct radeon_device *rdev,
  5013. bool enable)
  5014. {
  5015. u32 data, orig;
  5016. orig = data = RREG32(RLC_PG_CNTL);
  5017. if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_RLC_SMU_HS))
  5018. data |= SMU_CLK_SLOWDOWN_ON_PD_ENABLE;
  5019. else
  5020. data &= ~SMU_CLK_SLOWDOWN_ON_PD_ENABLE;
  5021. if (orig != data)
  5022. WREG32(RLC_PG_CNTL, data);
  5023. }
  5024. static void cik_enable_cp_pg(struct radeon_device *rdev, bool enable)
  5025. {
  5026. u32 data, orig;
  5027. orig = data = RREG32(RLC_PG_CNTL);
  5028. if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_CP))
  5029. data &= ~DISABLE_CP_PG;
  5030. else
  5031. data |= DISABLE_CP_PG;
  5032. if (orig != data)
  5033. WREG32(RLC_PG_CNTL, data);
  5034. }
  5035. static void cik_enable_gds_pg(struct radeon_device *rdev, bool enable)
  5036. {
  5037. u32 data, orig;
  5038. orig = data = RREG32(RLC_PG_CNTL);
  5039. if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GDS))
  5040. data &= ~DISABLE_GDS_PG;
  5041. else
  5042. data |= DISABLE_GDS_PG;
  5043. if (orig != data)
  5044. WREG32(RLC_PG_CNTL, data);
  5045. }
  5046. #define CP_ME_TABLE_SIZE 96
  5047. #define CP_ME_TABLE_OFFSET 2048
  5048. #define CP_MEC_TABLE_OFFSET 4096
  5049. void cik_init_cp_pg_table(struct radeon_device *rdev)
  5050. {
  5051. const __be32 *fw_data;
  5052. volatile u32 *dst_ptr;
  5053. int me, i, max_me = 4;
  5054. u32 bo_offset = 0;
  5055. u32 table_offset;
  5056. if (rdev->family == CHIP_KAVERI)
  5057. max_me = 5;
  5058. if (rdev->rlc.cp_table_ptr == NULL)
  5059. return;
  5060. /* write the cp table buffer */
  5061. dst_ptr = rdev->rlc.cp_table_ptr;
  5062. for (me = 0; me < max_me; me++) {
  5063. if (me == 0) {
  5064. fw_data = (const __be32 *)rdev->ce_fw->data;
  5065. table_offset = CP_ME_TABLE_OFFSET;
  5066. } else if (me == 1) {
  5067. fw_data = (const __be32 *)rdev->pfp_fw->data;
  5068. table_offset = CP_ME_TABLE_OFFSET;
  5069. } else if (me == 2) {
  5070. fw_data = (const __be32 *)rdev->me_fw->data;
  5071. table_offset = CP_ME_TABLE_OFFSET;
  5072. } else {
  5073. fw_data = (const __be32 *)rdev->mec_fw->data;
  5074. table_offset = CP_MEC_TABLE_OFFSET;
  5075. }
  5076. for (i = 0; i < CP_ME_TABLE_SIZE; i ++) {
  5077. dst_ptr[bo_offset + i] = be32_to_cpu(fw_data[table_offset + i]);
  5078. }
  5079. bo_offset += CP_ME_TABLE_SIZE;
  5080. }
  5081. }
  5082. static void cik_enable_gfx_cgpg(struct radeon_device *rdev,
  5083. bool enable)
  5084. {
  5085. u32 data, orig;
  5086. if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG)) {
  5087. orig = data = RREG32(RLC_PG_CNTL);
  5088. data |= GFX_PG_ENABLE;
  5089. if (orig != data)
  5090. WREG32(RLC_PG_CNTL, data);
  5091. orig = data = RREG32(RLC_AUTO_PG_CTRL);
  5092. data |= AUTO_PG_EN;
  5093. if (orig != data)
  5094. WREG32(RLC_AUTO_PG_CTRL, data);
  5095. } else {
  5096. orig = data = RREG32(RLC_PG_CNTL);
  5097. data &= ~GFX_PG_ENABLE;
  5098. if (orig != data)
  5099. WREG32(RLC_PG_CNTL, data);
  5100. orig = data = RREG32(RLC_AUTO_PG_CTRL);
  5101. data &= ~AUTO_PG_EN;
  5102. if (orig != data)
  5103. WREG32(RLC_AUTO_PG_CTRL, data);
  5104. data = RREG32(DB_RENDER_CONTROL);
  5105. }
  5106. }
  5107. static u32 cik_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh)
  5108. {
  5109. u32 mask = 0, tmp, tmp1;
  5110. int i;
  5111. cik_select_se_sh(rdev, se, sh);
  5112. tmp = RREG32(CC_GC_SHADER_ARRAY_CONFIG);
  5113. tmp1 = RREG32(GC_USER_SHADER_ARRAY_CONFIG);
  5114. cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
  5115. tmp &= 0xffff0000;
  5116. tmp |= tmp1;
  5117. tmp >>= 16;
  5118. for (i = 0; i < rdev->config.cik.max_cu_per_sh; i ++) {
  5119. mask <<= 1;
  5120. mask |= 1;
  5121. }
  5122. return (~tmp) & mask;
  5123. }
  5124. static void cik_init_ao_cu_mask(struct radeon_device *rdev)
  5125. {
  5126. u32 i, j, k, active_cu_number = 0;
  5127. u32 mask, counter, cu_bitmap;
  5128. u32 tmp = 0;
  5129. for (i = 0; i < rdev->config.cik.max_shader_engines; i++) {
  5130. for (j = 0; j < rdev->config.cik.max_sh_per_se; j++) {
  5131. mask = 1;
  5132. cu_bitmap = 0;
  5133. counter = 0;
  5134. for (k = 0; k < rdev->config.cik.max_cu_per_sh; k ++) {
  5135. if (cik_get_cu_active_bitmap(rdev, i, j) & mask) {
  5136. if (counter < 2)
  5137. cu_bitmap |= mask;
  5138. counter ++;
  5139. }
  5140. mask <<= 1;
  5141. }
  5142. active_cu_number += counter;
  5143. tmp |= (cu_bitmap << (i * 16 + j * 8));
  5144. }
  5145. }
  5146. WREG32(RLC_PG_AO_CU_MASK, tmp);
  5147. tmp = RREG32(RLC_MAX_PG_CU);
  5148. tmp &= ~MAX_PU_CU_MASK;
  5149. tmp |= MAX_PU_CU(active_cu_number);
  5150. WREG32(RLC_MAX_PG_CU, tmp);
  5151. }
  5152. static void cik_enable_gfx_static_mgpg(struct radeon_device *rdev,
  5153. bool enable)
  5154. {
  5155. u32 data, orig;
  5156. orig = data = RREG32(RLC_PG_CNTL);
  5157. if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_SMG))
  5158. data |= STATIC_PER_CU_PG_ENABLE;
  5159. else
  5160. data &= ~STATIC_PER_CU_PG_ENABLE;
  5161. if (orig != data)
  5162. WREG32(RLC_PG_CNTL, data);
  5163. }
  5164. static void cik_enable_gfx_dynamic_mgpg(struct radeon_device *rdev,
  5165. bool enable)
  5166. {
  5167. u32 data, orig;
  5168. orig = data = RREG32(RLC_PG_CNTL);
  5169. if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_DMG))
  5170. data |= DYN_PER_CU_PG_ENABLE;
  5171. else
  5172. data &= ~DYN_PER_CU_PG_ENABLE;
  5173. if (orig != data)
  5174. WREG32(RLC_PG_CNTL, data);
  5175. }
  5176. #define RLC_SAVE_AND_RESTORE_STARTING_OFFSET 0x90
  5177. #define RLC_CLEAR_STATE_DESCRIPTOR_OFFSET 0x3D
  5178. static void cik_init_gfx_cgpg(struct radeon_device *rdev)
  5179. {
  5180. u32 data, orig;
  5181. u32 i;
  5182. if (rdev->rlc.cs_data) {
  5183. WREG32(RLC_GPM_SCRATCH_ADDR, RLC_CLEAR_STATE_DESCRIPTOR_OFFSET);
  5184. WREG32(RLC_GPM_SCRATCH_DATA, upper_32_bits(rdev->rlc.clear_state_gpu_addr));
  5185. WREG32(RLC_GPM_SCRATCH_DATA, lower_32_bits(rdev->rlc.clear_state_gpu_addr));
  5186. WREG32(RLC_GPM_SCRATCH_DATA, rdev->rlc.clear_state_size);
  5187. } else {
  5188. WREG32(RLC_GPM_SCRATCH_ADDR, RLC_CLEAR_STATE_DESCRIPTOR_OFFSET);
  5189. for (i = 0; i < 3; i++)
  5190. WREG32(RLC_GPM_SCRATCH_DATA, 0);
  5191. }
  5192. if (rdev->rlc.reg_list) {
  5193. WREG32(RLC_GPM_SCRATCH_ADDR, RLC_SAVE_AND_RESTORE_STARTING_OFFSET);
  5194. for (i = 0; i < rdev->rlc.reg_list_size; i++)
  5195. WREG32(RLC_GPM_SCRATCH_DATA, rdev->rlc.reg_list[i]);
  5196. }
  5197. orig = data = RREG32(RLC_PG_CNTL);
  5198. data |= GFX_PG_SRC;
  5199. if (orig != data)
  5200. WREG32(RLC_PG_CNTL, data);
  5201. WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
  5202. WREG32(RLC_CP_TABLE_RESTORE, rdev->rlc.cp_table_gpu_addr >> 8);
  5203. data = RREG32(CP_RB_WPTR_POLL_CNTL);
  5204. data &= ~IDLE_POLL_COUNT_MASK;
  5205. data |= IDLE_POLL_COUNT(0x60);
  5206. WREG32(CP_RB_WPTR_POLL_CNTL, data);
  5207. data = 0x10101010;
  5208. WREG32(RLC_PG_DELAY, data);
  5209. data = RREG32(RLC_PG_DELAY_2);
  5210. data &= ~0xff;
  5211. data |= 0x3;
  5212. WREG32(RLC_PG_DELAY_2, data);
  5213. data = RREG32(RLC_AUTO_PG_CTRL);
  5214. data &= ~GRBM_REG_SGIT_MASK;
  5215. data |= GRBM_REG_SGIT(0x700);
  5216. WREG32(RLC_AUTO_PG_CTRL, data);
  5217. }
  5218. static void cik_update_gfx_pg(struct radeon_device *rdev, bool enable)
  5219. {
  5220. cik_enable_gfx_cgpg(rdev, enable);
  5221. cik_enable_gfx_static_mgpg(rdev, enable);
  5222. cik_enable_gfx_dynamic_mgpg(rdev, enable);
  5223. }
  5224. u32 cik_get_csb_size(struct radeon_device *rdev)
  5225. {
  5226. u32 count = 0;
  5227. const struct cs_section_def *sect = NULL;
  5228. const struct cs_extent_def *ext = NULL;
  5229. if (rdev->rlc.cs_data == NULL)
  5230. return 0;
  5231. /* begin clear state */
  5232. count += 2;
  5233. /* context control state */
  5234. count += 3;
  5235. for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) {
  5236. for (ext = sect->section; ext->extent != NULL; ++ext) {
  5237. if (sect->id == SECT_CONTEXT)
  5238. count += 2 + ext->reg_count;
  5239. else
  5240. return 0;
  5241. }
  5242. }
  5243. /* pa_sc_raster_config/pa_sc_raster_config1 */
  5244. count += 4;
  5245. /* end clear state */
  5246. count += 2;
  5247. /* clear state */
  5248. count += 2;
  5249. return count;
  5250. }
  5251. void cik_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer)
  5252. {
  5253. u32 count = 0, i;
  5254. const struct cs_section_def *sect = NULL;
  5255. const struct cs_extent_def *ext = NULL;
  5256. if (rdev->rlc.cs_data == NULL)
  5257. return;
  5258. if (buffer == NULL)
  5259. return;
  5260. buffer[count++] = PACKET3(PACKET3_PREAMBLE_CNTL, 0);
  5261. buffer[count++] = PACKET3_PREAMBLE_BEGIN_CLEAR_STATE;
  5262. buffer[count++] = PACKET3(PACKET3_CONTEXT_CONTROL, 1);
  5263. buffer[count++] = 0x80000000;
  5264. buffer[count++] = 0x80000000;
  5265. for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) {
  5266. for (ext = sect->section; ext->extent != NULL; ++ext) {
  5267. if (sect->id == SECT_CONTEXT) {
  5268. buffer[count++] = PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count);
  5269. buffer[count++] = ext->reg_index - 0xa000;
  5270. for (i = 0; i < ext->reg_count; i++)
  5271. buffer[count++] = ext->extent[i];
  5272. } else {
  5273. return;
  5274. }
  5275. }
  5276. }
  5277. buffer[count++] = PACKET3(PACKET3_SET_CONTEXT_REG, 2);
  5278. buffer[count++] = PA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START;
  5279. switch (rdev->family) {
  5280. case CHIP_BONAIRE:
  5281. buffer[count++] = 0x16000012;
  5282. buffer[count++] = 0x00000000;
  5283. break;
  5284. case CHIP_KAVERI:
  5285. buffer[count++] = 0x00000000; /* XXX */
  5286. buffer[count++] = 0x00000000;
  5287. break;
  5288. case CHIP_KABINI:
  5289. buffer[count++] = 0x00000000; /* XXX */
  5290. buffer[count++] = 0x00000000;
  5291. break;
  5292. default:
  5293. buffer[count++] = 0x00000000;
  5294. buffer[count++] = 0x00000000;
  5295. break;
  5296. }
  5297. buffer[count++] = PACKET3(PACKET3_PREAMBLE_CNTL, 0);
  5298. buffer[count++] = PACKET3_PREAMBLE_END_CLEAR_STATE;
  5299. buffer[count++] = PACKET3(PACKET3_CLEAR_STATE, 0);
  5300. buffer[count++] = 0;
  5301. }
  5302. static void cik_init_pg(struct radeon_device *rdev)
  5303. {
  5304. if (rdev->pg_flags) {
  5305. cik_enable_sck_slowdown_on_pu(rdev, true);
  5306. cik_enable_sck_slowdown_on_pd(rdev, true);
  5307. if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG) {
  5308. cik_init_gfx_cgpg(rdev);
  5309. cik_enable_cp_pg(rdev, true);
  5310. cik_enable_gds_pg(rdev, true);
  5311. }
  5312. cik_init_ao_cu_mask(rdev);
  5313. cik_update_gfx_pg(rdev, true);
  5314. }
  5315. }
  5316. static void cik_fini_pg(struct radeon_device *rdev)
  5317. {
  5318. if (rdev->pg_flags) {
  5319. cik_update_gfx_pg(rdev, false);
  5320. if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG) {
  5321. cik_enable_cp_pg(rdev, false);
  5322. cik_enable_gds_pg(rdev, false);
  5323. }
  5324. }
  5325. }
  5326. /*
  5327. * Interrupts
  5328. * Starting with r6xx, interrupts are handled via a ring buffer.
  5329. * Ring buffers are areas of GPU accessible memory that the GPU
  5330. * writes interrupt vectors into and the host reads vectors out of.
  5331. * There is a rptr (read pointer) that determines where the
  5332. * host is currently reading, and a wptr (write pointer)
  5333. * which determines where the GPU has written. When the
  5334. * pointers are equal, the ring is idle. When the GPU
  5335. * writes vectors to the ring buffer, it increments the
  5336. * wptr. When there is an interrupt, the host then starts
  5337. * fetching commands and processing them until the pointers are
  5338. * equal again at which point it updates the rptr.
  5339. */
  5340. /**
  5341. * cik_enable_interrupts - Enable the interrupt ring buffer
  5342. *
  5343. * @rdev: radeon_device pointer
  5344. *
  5345. * Enable the interrupt ring buffer (CIK).
  5346. */
  5347. static void cik_enable_interrupts(struct radeon_device *rdev)
  5348. {
  5349. u32 ih_cntl = RREG32(IH_CNTL);
  5350. u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
  5351. ih_cntl |= ENABLE_INTR;
  5352. ih_rb_cntl |= IH_RB_ENABLE;
  5353. WREG32(IH_CNTL, ih_cntl);
  5354. WREG32(IH_RB_CNTL, ih_rb_cntl);
  5355. rdev->ih.enabled = true;
  5356. }
  5357. /**
  5358. * cik_disable_interrupts - Disable the interrupt ring buffer
  5359. *
  5360. * @rdev: radeon_device pointer
  5361. *
  5362. * Disable the interrupt ring buffer (CIK).
  5363. */
  5364. static void cik_disable_interrupts(struct radeon_device *rdev)
  5365. {
  5366. u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
  5367. u32 ih_cntl = RREG32(IH_CNTL);
  5368. ih_rb_cntl &= ~IH_RB_ENABLE;
  5369. ih_cntl &= ~ENABLE_INTR;
  5370. WREG32(IH_RB_CNTL, ih_rb_cntl);
  5371. WREG32(IH_CNTL, ih_cntl);
  5372. /* set rptr, wptr to 0 */
  5373. WREG32(IH_RB_RPTR, 0);
  5374. WREG32(IH_RB_WPTR, 0);
  5375. rdev->ih.enabled = false;
  5376. rdev->ih.rptr = 0;
  5377. }
  5378. /**
  5379. * cik_disable_interrupt_state - Disable all interrupt sources
  5380. *
  5381. * @rdev: radeon_device pointer
  5382. *
  5383. * Clear all interrupt enable bits used by the driver (CIK).
  5384. */
  5385. static void cik_disable_interrupt_state(struct radeon_device *rdev)
  5386. {
  5387. u32 tmp;
  5388. /* gfx ring */
  5389. tmp = RREG32(CP_INT_CNTL_RING0) &
  5390. (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
  5391. WREG32(CP_INT_CNTL_RING0, tmp);
  5392. /* sdma */
  5393. tmp = RREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
  5394. WREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET, tmp);
  5395. tmp = RREG32(SDMA0_CNTL + SDMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
  5396. WREG32(SDMA0_CNTL + SDMA1_REGISTER_OFFSET, tmp);
  5397. /* compute queues */
  5398. WREG32(CP_ME1_PIPE0_INT_CNTL, 0);
  5399. WREG32(CP_ME1_PIPE1_INT_CNTL, 0);
  5400. WREG32(CP_ME1_PIPE2_INT_CNTL, 0);
  5401. WREG32(CP_ME1_PIPE3_INT_CNTL, 0);
  5402. WREG32(CP_ME2_PIPE0_INT_CNTL, 0);
  5403. WREG32(CP_ME2_PIPE1_INT_CNTL, 0);
  5404. WREG32(CP_ME2_PIPE2_INT_CNTL, 0);
  5405. WREG32(CP_ME2_PIPE3_INT_CNTL, 0);
  5406. /* grbm */
  5407. WREG32(GRBM_INT_CNTL, 0);
  5408. /* vline/vblank, etc. */
  5409. WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
  5410. WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
  5411. if (rdev->num_crtc >= 4) {
  5412. WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
  5413. WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
  5414. }
  5415. if (rdev->num_crtc >= 6) {
  5416. WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
  5417. WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
  5418. }
  5419. /* dac hotplug */
  5420. WREG32(DAC_AUTODETECT_INT_CONTROL, 0);
  5421. /* digital hotplug */
  5422. tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
  5423. WREG32(DC_HPD1_INT_CONTROL, tmp);
  5424. tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
  5425. WREG32(DC_HPD2_INT_CONTROL, tmp);
  5426. tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
  5427. WREG32(DC_HPD3_INT_CONTROL, tmp);
  5428. tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
  5429. WREG32(DC_HPD4_INT_CONTROL, tmp);
  5430. tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
  5431. WREG32(DC_HPD5_INT_CONTROL, tmp);
  5432. tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
  5433. WREG32(DC_HPD6_INT_CONTROL, tmp);
  5434. }
  5435. /**
  5436. * cik_irq_init - init and enable the interrupt ring
  5437. *
  5438. * @rdev: radeon_device pointer
  5439. *
  5440. * Allocate a ring buffer for the interrupt controller,
  5441. * enable the RLC, disable interrupts, enable the IH
  5442. * ring buffer and enable it (CIK).
  5443. * Called at device load and reume.
  5444. * Returns 0 for success, errors for failure.
  5445. */
  5446. static int cik_irq_init(struct radeon_device *rdev)
  5447. {
  5448. int ret = 0;
  5449. int rb_bufsz;
  5450. u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
  5451. /* allocate ring */
  5452. ret = r600_ih_ring_alloc(rdev);
  5453. if (ret)
  5454. return ret;
  5455. /* disable irqs */
  5456. cik_disable_interrupts(rdev);
  5457. /* init rlc */
  5458. ret = cik_rlc_resume(rdev);
  5459. if (ret) {
  5460. r600_ih_ring_fini(rdev);
  5461. return ret;
  5462. }
  5463. /* setup interrupt control */
  5464. /* XXX this should actually be a bus address, not an MC address. same on older asics */
  5465. WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
  5466. interrupt_cntl = RREG32(INTERRUPT_CNTL);
  5467. /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
  5468. * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
  5469. */
  5470. interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
  5471. /* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
  5472. interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
  5473. WREG32(INTERRUPT_CNTL, interrupt_cntl);
  5474. WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
  5475. rb_bufsz = order_base_2(rdev->ih.ring_size / 4);
  5476. ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
  5477. IH_WPTR_OVERFLOW_CLEAR |
  5478. (rb_bufsz << 1));
  5479. if (rdev->wb.enabled)
  5480. ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;
  5481. /* set the writeback address whether it's enabled or not */
  5482. WREG32(IH_RB_WPTR_ADDR_LO, (rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFFFFFFFC);
  5483. WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFF);
  5484. WREG32(IH_RB_CNTL, ih_rb_cntl);
  5485. /* set rptr, wptr to 0 */
  5486. WREG32(IH_RB_RPTR, 0);
  5487. WREG32(IH_RB_WPTR, 0);
  5488. /* Default settings for IH_CNTL (disabled at first) */
  5489. ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10) | MC_VMID(0);
  5490. /* RPTR_REARM only works if msi's are enabled */
  5491. if (rdev->msi_enabled)
  5492. ih_cntl |= RPTR_REARM;
  5493. WREG32(IH_CNTL, ih_cntl);
  5494. /* force the active interrupt state to all disabled */
  5495. cik_disable_interrupt_state(rdev);
  5496. pci_set_master(rdev->pdev);
  5497. /* enable irqs */
  5498. cik_enable_interrupts(rdev);
  5499. return ret;
  5500. }
  5501. /**
  5502. * cik_irq_set - enable/disable interrupt sources
  5503. *
  5504. * @rdev: radeon_device pointer
  5505. *
  5506. * Enable interrupt sources on the GPU (vblanks, hpd,
  5507. * etc.) (CIK).
  5508. * Returns 0 for success, errors for failure.
  5509. */
  5510. int cik_irq_set(struct radeon_device *rdev)
  5511. {
  5512. u32 cp_int_cntl;
  5513. u32 cp_m1p0, cp_m1p1, cp_m1p2, cp_m1p3;
  5514. u32 cp_m2p0, cp_m2p1, cp_m2p2, cp_m2p3;
  5515. u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
  5516. u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
  5517. u32 grbm_int_cntl = 0;
  5518. u32 dma_cntl, dma_cntl1;
  5519. u32 thermal_int;
  5520. if (!rdev->irq.installed) {
  5521. WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
  5522. return -EINVAL;
  5523. }
  5524. /* don't enable anything if the ih is disabled */
  5525. if (!rdev->ih.enabled) {
  5526. cik_disable_interrupts(rdev);
  5527. /* force the active interrupt state to all disabled */
  5528. cik_disable_interrupt_state(rdev);
  5529. return 0;
  5530. }
  5531. cp_int_cntl = RREG32(CP_INT_CNTL_RING0) &
  5532. (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
  5533. cp_int_cntl |= PRIV_INSTR_INT_ENABLE | PRIV_REG_INT_ENABLE;
  5534. hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
  5535. hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
  5536. hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
  5537. hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
  5538. hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
  5539. hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
  5540. dma_cntl = RREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
  5541. dma_cntl1 = RREG32(SDMA0_CNTL + SDMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
  5542. cp_m1p0 = RREG32(CP_ME1_PIPE0_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
  5543. cp_m1p1 = RREG32(CP_ME1_PIPE1_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
  5544. cp_m1p2 = RREG32(CP_ME1_PIPE2_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
  5545. cp_m1p3 = RREG32(CP_ME1_PIPE3_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
  5546. cp_m2p0 = RREG32(CP_ME2_PIPE0_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
  5547. cp_m2p1 = RREG32(CP_ME2_PIPE1_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
  5548. cp_m2p2 = RREG32(CP_ME2_PIPE2_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
  5549. cp_m2p3 = RREG32(CP_ME2_PIPE3_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
  5550. if (rdev->flags & RADEON_IS_IGP)
  5551. thermal_int = RREG32_SMC(CG_THERMAL_INT_CTRL) &
  5552. ~(THERM_INTH_MASK | THERM_INTL_MASK);
  5553. else
  5554. thermal_int = RREG32_SMC(CG_THERMAL_INT) &
  5555. ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
  5556. /* enable CP interrupts on all rings */
  5557. if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
  5558. DRM_DEBUG("cik_irq_set: sw int gfx\n");
  5559. cp_int_cntl |= TIME_STAMP_INT_ENABLE;
  5560. }
  5561. if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP1_INDEX])) {
  5562. struct radeon_ring *ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
  5563. DRM_DEBUG("si_irq_set: sw int cp1\n");
  5564. if (ring->me == 1) {
  5565. switch (ring->pipe) {
  5566. case 0:
  5567. cp_m1p0 |= TIME_STAMP_INT_ENABLE;
  5568. break;
  5569. case 1:
  5570. cp_m1p1 |= TIME_STAMP_INT_ENABLE;
  5571. break;
  5572. case 2:
  5573. cp_m1p2 |= TIME_STAMP_INT_ENABLE;
  5574. break;
  5575. case 3:
  5576. cp_m1p2 |= TIME_STAMP_INT_ENABLE;
  5577. break;
  5578. default:
  5579. DRM_DEBUG("si_irq_set: sw int cp1 invalid pipe %d\n", ring->pipe);
  5580. break;
  5581. }
  5582. } else if (ring->me == 2) {
  5583. switch (ring->pipe) {
  5584. case 0:
  5585. cp_m2p0 |= TIME_STAMP_INT_ENABLE;
  5586. break;
  5587. case 1:
  5588. cp_m2p1 |= TIME_STAMP_INT_ENABLE;
  5589. break;
  5590. case 2:
  5591. cp_m2p2 |= TIME_STAMP_INT_ENABLE;
  5592. break;
  5593. case 3:
  5594. cp_m2p2 |= TIME_STAMP_INT_ENABLE;
  5595. break;
  5596. default:
  5597. DRM_DEBUG("si_irq_set: sw int cp1 invalid pipe %d\n", ring->pipe);
  5598. break;
  5599. }
  5600. } else {
  5601. DRM_DEBUG("si_irq_set: sw int cp1 invalid me %d\n", ring->me);
  5602. }
  5603. }
  5604. if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP2_INDEX])) {
  5605. struct radeon_ring *ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
  5606. DRM_DEBUG("si_irq_set: sw int cp2\n");
  5607. if (ring->me == 1) {
  5608. switch (ring->pipe) {
  5609. case 0:
  5610. cp_m1p0 |= TIME_STAMP_INT_ENABLE;
  5611. break;
  5612. case 1:
  5613. cp_m1p1 |= TIME_STAMP_INT_ENABLE;
  5614. break;
  5615. case 2:
  5616. cp_m1p2 |= TIME_STAMP_INT_ENABLE;
  5617. break;
  5618. case 3:
  5619. cp_m1p2 |= TIME_STAMP_INT_ENABLE;
  5620. break;
  5621. default:
  5622. DRM_DEBUG("si_irq_set: sw int cp2 invalid pipe %d\n", ring->pipe);
  5623. break;
  5624. }
  5625. } else if (ring->me == 2) {
  5626. switch (ring->pipe) {
  5627. case 0:
  5628. cp_m2p0 |= TIME_STAMP_INT_ENABLE;
  5629. break;
  5630. case 1:
  5631. cp_m2p1 |= TIME_STAMP_INT_ENABLE;
  5632. break;
  5633. case 2:
  5634. cp_m2p2 |= TIME_STAMP_INT_ENABLE;
  5635. break;
  5636. case 3:
  5637. cp_m2p2 |= TIME_STAMP_INT_ENABLE;
  5638. break;
  5639. default:
  5640. DRM_DEBUG("si_irq_set: sw int cp2 invalid pipe %d\n", ring->pipe);
  5641. break;
  5642. }
  5643. } else {
  5644. DRM_DEBUG("si_irq_set: sw int cp2 invalid me %d\n", ring->me);
  5645. }
  5646. }
  5647. if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) {
  5648. DRM_DEBUG("cik_irq_set: sw int dma\n");
  5649. dma_cntl |= TRAP_ENABLE;
  5650. }
  5651. if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_DMA1_INDEX])) {
  5652. DRM_DEBUG("cik_irq_set: sw int dma1\n");
  5653. dma_cntl1 |= TRAP_ENABLE;
  5654. }
  5655. if (rdev->irq.crtc_vblank_int[0] ||
  5656. atomic_read(&rdev->irq.pflip[0])) {
  5657. DRM_DEBUG("cik_irq_set: vblank 0\n");
  5658. crtc1 |= VBLANK_INTERRUPT_MASK;
  5659. }
  5660. if (rdev->irq.crtc_vblank_int[1] ||
  5661. atomic_read(&rdev->irq.pflip[1])) {
  5662. DRM_DEBUG("cik_irq_set: vblank 1\n");
  5663. crtc2 |= VBLANK_INTERRUPT_MASK;
  5664. }
  5665. if (rdev->irq.crtc_vblank_int[2] ||
  5666. atomic_read(&rdev->irq.pflip[2])) {
  5667. DRM_DEBUG("cik_irq_set: vblank 2\n");
  5668. crtc3 |= VBLANK_INTERRUPT_MASK;
  5669. }
  5670. if (rdev->irq.crtc_vblank_int[3] ||
  5671. atomic_read(&rdev->irq.pflip[3])) {
  5672. DRM_DEBUG("cik_irq_set: vblank 3\n");
  5673. crtc4 |= VBLANK_INTERRUPT_MASK;
  5674. }
  5675. if (rdev->irq.crtc_vblank_int[4] ||
  5676. atomic_read(&rdev->irq.pflip[4])) {
  5677. DRM_DEBUG("cik_irq_set: vblank 4\n");
  5678. crtc5 |= VBLANK_INTERRUPT_MASK;
  5679. }
  5680. if (rdev->irq.crtc_vblank_int[5] ||
  5681. atomic_read(&rdev->irq.pflip[5])) {
  5682. DRM_DEBUG("cik_irq_set: vblank 5\n");
  5683. crtc6 |= VBLANK_INTERRUPT_MASK;
  5684. }
  5685. if (rdev->irq.hpd[0]) {
  5686. DRM_DEBUG("cik_irq_set: hpd 1\n");
  5687. hpd1 |= DC_HPDx_INT_EN;
  5688. }
  5689. if (rdev->irq.hpd[1]) {
  5690. DRM_DEBUG("cik_irq_set: hpd 2\n");
  5691. hpd2 |= DC_HPDx_INT_EN;
  5692. }
  5693. if (rdev->irq.hpd[2]) {
  5694. DRM_DEBUG("cik_irq_set: hpd 3\n");
  5695. hpd3 |= DC_HPDx_INT_EN;
  5696. }
  5697. if (rdev->irq.hpd[3]) {
  5698. DRM_DEBUG("cik_irq_set: hpd 4\n");
  5699. hpd4 |= DC_HPDx_INT_EN;
  5700. }
  5701. if (rdev->irq.hpd[4]) {
  5702. DRM_DEBUG("cik_irq_set: hpd 5\n");
  5703. hpd5 |= DC_HPDx_INT_EN;
  5704. }
  5705. if (rdev->irq.hpd[5]) {
  5706. DRM_DEBUG("cik_irq_set: hpd 6\n");
  5707. hpd6 |= DC_HPDx_INT_EN;
  5708. }
  5709. if (rdev->irq.dpm_thermal) {
  5710. DRM_DEBUG("dpm thermal\n");
  5711. if (rdev->flags & RADEON_IS_IGP)
  5712. thermal_int |= THERM_INTH_MASK | THERM_INTL_MASK;
  5713. else
  5714. thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
  5715. }
  5716. WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
  5717. WREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET, dma_cntl);
  5718. WREG32(SDMA0_CNTL + SDMA1_REGISTER_OFFSET, dma_cntl1);
  5719. WREG32(CP_ME1_PIPE0_INT_CNTL, cp_m1p0);
  5720. WREG32(CP_ME1_PIPE1_INT_CNTL, cp_m1p1);
  5721. WREG32(CP_ME1_PIPE2_INT_CNTL, cp_m1p2);
  5722. WREG32(CP_ME1_PIPE3_INT_CNTL, cp_m1p3);
  5723. WREG32(CP_ME2_PIPE0_INT_CNTL, cp_m2p0);
  5724. WREG32(CP_ME2_PIPE1_INT_CNTL, cp_m2p1);
  5725. WREG32(CP_ME2_PIPE2_INT_CNTL, cp_m2p2);
  5726. WREG32(CP_ME2_PIPE3_INT_CNTL, cp_m2p3);
  5727. WREG32(GRBM_INT_CNTL, grbm_int_cntl);
  5728. WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
  5729. WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2);
  5730. if (rdev->num_crtc >= 4) {
  5731. WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3);
  5732. WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4);
  5733. }
  5734. if (rdev->num_crtc >= 6) {
  5735. WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5);
  5736. WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
  5737. }
  5738. WREG32(DC_HPD1_INT_CONTROL, hpd1);
  5739. WREG32(DC_HPD2_INT_CONTROL, hpd2);
  5740. WREG32(DC_HPD3_INT_CONTROL, hpd3);
  5741. WREG32(DC_HPD4_INT_CONTROL, hpd4);
  5742. WREG32(DC_HPD5_INT_CONTROL, hpd5);
  5743. WREG32(DC_HPD6_INT_CONTROL, hpd6);
  5744. if (rdev->flags & RADEON_IS_IGP)
  5745. WREG32_SMC(CG_THERMAL_INT_CTRL, thermal_int);
  5746. else
  5747. WREG32_SMC(CG_THERMAL_INT, thermal_int);
  5748. return 0;
  5749. }
  5750. /**
  5751. * cik_irq_ack - ack interrupt sources
  5752. *
  5753. * @rdev: radeon_device pointer
  5754. *
  5755. * Ack interrupt sources on the GPU (vblanks, hpd,
  5756. * etc.) (CIK). Certain interrupts sources are sw
  5757. * generated and do not require an explicit ack.
  5758. */
  5759. static inline void cik_irq_ack(struct radeon_device *rdev)
  5760. {
  5761. u32 tmp;
  5762. rdev->irq.stat_regs.cik.disp_int = RREG32(DISP_INTERRUPT_STATUS);
  5763. rdev->irq.stat_regs.cik.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
  5764. rdev->irq.stat_regs.cik.disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2);
  5765. rdev->irq.stat_regs.cik.disp_int_cont3 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE3);
  5766. rdev->irq.stat_regs.cik.disp_int_cont4 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE4);
  5767. rdev->irq.stat_regs.cik.disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5);
  5768. rdev->irq.stat_regs.cik.disp_int_cont6 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE6);
  5769. if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VBLANK_INTERRUPT)
  5770. WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK);
  5771. if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VLINE_INTERRUPT)
  5772. WREG32(LB_VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VLINE_ACK);
  5773. if (rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VBLANK_INTERRUPT)
  5774. WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VBLANK_ACK);
  5775. if (rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VLINE_INTERRUPT)
  5776. WREG32(LB_VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK);
  5777. if (rdev->num_crtc >= 4) {
  5778. if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT)
  5779. WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK);
  5780. if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VLINE_INTERRUPT)
  5781. WREG32(LB_VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VLINE_ACK);
  5782. if (rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT)
  5783. WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VBLANK_ACK);
  5784. if (rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VLINE_INTERRUPT)
  5785. WREG32(LB_VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VLINE_ACK);
  5786. }
  5787. if (rdev->num_crtc >= 6) {
  5788. if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT)
  5789. WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK);
  5790. if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VLINE_INTERRUPT)
  5791. WREG32(LB_VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VLINE_ACK);
  5792. if (rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT)
  5793. WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VBLANK_ACK);
  5794. if (rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VLINE_INTERRUPT)
  5795. WREG32(LB_VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VLINE_ACK);
  5796. }
  5797. if (rdev->irq.stat_regs.cik.disp_int & DC_HPD1_INTERRUPT) {
  5798. tmp = RREG32(DC_HPD1_INT_CONTROL);
  5799. tmp |= DC_HPDx_INT_ACK;
  5800. WREG32(DC_HPD1_INT_CONTROL, tmp);
  5801. }
  5802. if (rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_INTERRUPT) {
  5803. tmp = RREG32(DC_HPD2_INT_CONTROL);
  5804. tmp |= DC_HPDx_INT_ACK;
  5805. WREG32(DC_HPD2_INT_CONTROL, tmp);
  5806. }
  5807. if (rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_INTERRUPT) {
  5808. tmp = RREG32(DC_HPD3_INT_CONTROL);
  5809. tmp |= DC_HPDx_INT_ACK;
  5810. WREG32(DC_HPD3_INT_CONTROL, tmp);
  5811. }
  5812. if (rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_INTERRUPT) {
  5813. tmp = RREG32(DC_HPD4_INT_CONTROL);
  5814. tmp |= DC_HPDx_INT_ACK;
  5815. WREG32(DC_HPD4_INT_CONTROL, tmp);
  5816. }
  5817. if (rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_INTERRUPT) {
  5818. tmp = RREG32(DC_HPD5_INT_CONTROL);
  5819. tmp |= DC_HPDx_INT_ACK;
  5820. WREG32(DC_HPD5_INT_CONTROL, tmp);
  5821. }
  5822. if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_INTERRUPT) {
  5823. tmp = RREG32(DC_HPD5_INT_CONTROL);
  5824. tmp |= DC_HPDx_INT_ACK;
  5825. WREG32(DC_HPD6_INT_CONTROL, tmp);
  5826. }
  5827. }
  5828. /**
  5829. * cik_irq_disable - disable interrupts
  5830. *
  5831. * @rdev: radeon_device pointer
  5832. *
  5833. * Disable interrupts on the hw (CIK).
  5834. */
  5835. static void cik_irq_disable(struct radeon_device *rdev)
  5836. {
  5837. cik_disable_interrupts(rdev);
  5838. /* Wait and acknowledge irq */
  5839. mdelay(1);
  5840. cik_irq_ack(rdev);
  5841. cik_disable_interrupt_state(rdev);
  5842. }
  5843. /**
  5844. * cik_irq_disable - disable interrupts for suspend
  5845. *
  5846. * @rdev: radeon_device pointer
  5847. *
  5848. * Disable interrupts and stop the RLC (CIK).
  5849. * Used for suspend.
  5850. */
  5851. static void cik_irq_suspend(struct radeon_device *rdev)
  5852. {
  5853. cik_irq_disable(rdev);
  5854. cik_rlc_stop(rdev);
  5855. }
  5856. /**
  5857. * cik_irq_fini - tear down interrupt support
  5858. *
  5859. * @rdev: radeon_device pointer
  5860. *
  5861. * Disable interrupts on the hw and free the IH ring
  5862. * buffer (CIK).
  5863. * Used for driver unload.
  5864. */
  5865. static void cik_irq_fini(struct radeon_device *rdev)
  5866. {
  5867. cik_irq_suspend(rdev);
  5868. r600_ih_ring_fini(rdev);
  5869. }
  5870. /**
  5871. * cik_get_ih_wptr - get the IH ring buffer wptr
  5872. *
  5873. * @rdev: radeon_device pointer
  5874. *
  5875. * Get the IH ring buffer wptr from either the register
  5876. * or the writeback memory buffer (CIK). Also check for
  5877. * ring buffer overflow and deal with it.
  5878. * Used by cik_irq_process().
  5879. * Returns the value of the wptr.
  5880. */
  5881. static inline u32 cik_get_ih_wptr(struct radeon_device *rdev)
  5882. {
  5883. u32 wptr, tmp;
  5884. if (rdev->wb.enabled)
  5885. wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]);
  5886. else
  5887. wptr = RREG32(IH_RB_WPTR);
  5888. if (wptr & RB_OVERFLOW) {
  5889. /* When a ring buffer overflow happen start parsing interrupt
  5890. * from the last not overwritten vector (wptr + 16). Hopefully
  5891. * this should allow us to catchup.
  5892. */
  5893. dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
  5894. wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
  5895. rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
  5896. tmp = RREG32(IH_RB_CNTL);
  5897. tmp |= IH_WPTR_OVERFLOW_CLEAR;
  5898. WREG32(IH_RB_CNTL, tmp);
  5899. }
  5900. return (wptr & rdev->ih.ptr_mask);
  5901. }
  5902. /* CIK IV Ring
  5903. * Each IV ring entry is 128 bits:
  5904. * [7:0] - interrupt source id
  5905. * [31:8] - reserved
  5906. * [59:32] - interrupt source data
  5907. * [63:60] - reserved
  5908. * [71:64] - RINGID
  5909. * CP:
  5910. * ME_ID [1:0], PIPE_ID[1:0], QUEUE_ID[2:0]
  5911. * QUEUE_ID - for compute, which of the 8 queues owned by the dispatcher
  5912. * - for gfx, hw shader state (0=PS...5=LS, 6=CS)
  5913. * ME_ID - 0 = gfx, 1 = first 4 CS pipes, 2 = second 4 CS pipes
  5914. * PIPE_ID - ME0 0=3D
  5915. * - ME1&2 compute dispatcher (4 pipes each)
  5916. * SDMA:
  5917. * INSTANCE_ID [1:0], QUEUE_ID[1:0]
  5918. * INSTANCE_ID - 0 = sdma0, 1 = sdma1
  5919. * QUEUE_ID - 0 = gfx, 1 = rlc0, 2 = rlc1
  5920. * [79:72] - VMID
  5921. * [95:80] - PASID
  5922. * [127:96] - reserved
  5923. */
  5924. /**
  5925. * cik_irq_process - interrupt handler
  5926. *
  5927. * @rdev: radeon_device pointer
  5928. *
  5929. * Interrupt hander (CIK). Walk the IH ring,
  5930. * ack interrupts and schedule work to handle
  5931. * interrupt events.
  5932. * Returns irq process return code.
  5933. */
  5934. int cik_irq_process(struct radeon_device *rdev)
  5935. {
  5936. struct radeon_ring *cp1_ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
  5937. struct radeon_ring *cp2_ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
  5938. u32 wptr;
  5939. u32 rptr;
  5940. u32 src_id, src_data, ring_id;
  5941. u8 me_id, pipe_id, queue_id;
  5942. u32 ring_index;
  5943. bool queue_hotplug = false;
  5944. bool queue_reset = false;
  5945. u32 addr, status, mc_client;
  5946. bool queue_thermal = false;
  5947. if (!rdev->ih.enabled || rdev->shutdown)
  5948. return IRQ_NONE;
  5949. wptr = cik_get_ih_wptr(rdev);
  5950. restart_ih:
  5951. /* is somebody else already processing irqs? */
  5952. if (atomic_xchg(&rdev->ih.lock, 1))
  5953. return IRQ_NONE;
  5954. rptr = rdev->ih.rptr;
  5955. DRM_DEBUG("cik_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
  5956. /* Order reading of wptr vs. reading of IH ring data */
  5957. rmb();
  5958. /* display interrupts */
  5959. cik_irq_ack(rdev);
  5960. while (rptr != wptr) {
  5961. /* wptr/rptr are in bytes! */
  5962. ring_index = rptr / 4;
  5963. src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
  5964. src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
  5965. ring_id = le32_to_cpu(rdev->ih.ring[ring_index + 2]) & 0xff;
  5966. switch (src_id) {
  5967. case 1: /* D1 vblank/vline */
  5968. switch (src_data) {
  5969. case 0: /* D1 vblank */
  5970. if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VBLANK_INTERRUPT) {
  5971. if (rdev->irq.crtc_vblank_int[0]) {
  5972. drm_handle_vblank(rdev->ddev, 0);
  5973. rdev->pm.vblank_sync = true;
  5974. wake_up(&rdev->irq.vblank_queue);
  5975. }
  5976. if (atomic_read(&rdev->irq.pflip[0]))
  5977. radeon_crtc_handle_flip(rdev, 0);
  5978. rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
  5979. DRM_DEBUG("IH: D1 vblank\n");
  5980. }
  5981. break;
  5982. case 1: /* D1 vline */
  5983. if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VLINE_INTERRUPT) {
  5984. rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VLINE_INTERRUPT;
  5985. DRM_DEBUG("IH: D1 vline\n");
  5986. }
  5987. break;
  5988. default:
  5989. DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
  5990. break;
  5991. }
  5992. break;
  5993. case 2: /* D2 vblank/vline */
  5994. switch (src_data) {
  5995. case 0: /* D2 vblank */
  5996. if (rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
  5997. if (rdev->irq.crtc_vblank_int[1]) {
  5998. drm_handle_vblank(rdev->ddev, 1);
  5999. rdev->pm.vblank_sync = true;
  6000. wake_up(&rdev->irq.vblank_queue);
  6001. }
  6002. if (atomic_read(&rdev->irq.pflip[1]))
  6003. radeon_crtc_handle_flip(rdev, 1);
  6004. rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
  6005. DRM_DEBUG("IH: D2 vblank\n");
  6006. }
  6007. break;
  6008. case 1: /* D2 vline */
  6009. if (rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VLINE_INTERRUPT) {
  6010. rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
  6011. DRM_DEBUG("IH: D2 vline\n");
  6012. }
  6013. break;
  6014. default:
  6015. DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
  6016. break;
  6017. }
  6018. break;
  6019. case 3: /* D3 vblank/vline */
  6020. switch (src_data) {
  6021. case 0: /* D3 vblank */
  6022. if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
  6023. if (rdev->irq.crtc_vblank_int[2]) {
  6024. drm_handle_vblank(rdev->ddev, 2);
  6025. rdev->pm.vblank_sync = true;
  6026. wake_up(&rdev->irq.vblank_queue);
  6027. }
  6028. if (atomic_read(&rdev->irq.pflip[2]))
  6029. radeon_crtc_handle_flip(rdev, 2);
  6030. rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
  6031. DRM_DEBUG("IH: D3 vblank\n");
  6032. }
  6033. break;
  6034. case 1: /* D3 vline */
  6035. if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {
  6036. rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
  6037. DRM_DEBUG("IH: D3 vline\n");
  6038. }
  6039. break;
  6040. default:
  6041. DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
  6042. break;
  6043. }
  6044. break;
  6045. case 4: /* D4 vblank/vline */
  6046. switch (src_data) {
  6047. case 0: /* D4 vblank */
  6048. if (rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
  6049. if (rdev->irq.crtc_vblank_int[3]) {
  6050. drm_handle_vblank(rdev->ddev, 3);
  6051. rdev->pm.vblank_sync = true;
  6052. wake_up(&rdev->irq.vblank_queue);
  6053. }
  6054. if (atomic_read(&rdev->irq.pflip[3]))
  6055. radeon_crtc_handle_flip(rdev, 3);
  6056. rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
  6057. DRM_DEBUG("IH: D4 vblank\n");
  6058. }
  6059. break;
  6060. case 1: /* D4 vline */
  6061. if (rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {
  6062. rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
  6063. DRM_DEBUG("IH: D4 vline\n");
  6064. }
  6065. break;
  6066. default:
  6067. DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
  6068. break;
  6069. }
  6070. break;
  6071. case 5: /* D5 vblank/vline */
  6072. switch (src_data) {
  6073. case 0: /* D5 vblank */
  6074. if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
  6075. if (rdev->irq.crtc_vblank_int[4]) {
  6076. drm_handle_vblank(rdev->ddev, 4);
  6077. rdev->pm.vblank_sync = true;
  6078. wake_up(&rdev->irq.vblank_queue);
  6079. }
  6080. if (atomic_read(&rdev->irq.pflip[4]))
  6081. radeon_crtc_handle_flip(rdev, 4);
  6082. rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
  6083. DRM_DEBUG("IH: D5 vblank\n");
  6084. }
  6085. break;
  6086. case 1: /* D5 vline */
  6087. if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {
  6088. rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
  6089. DRM_DEBUG("IH: D5 vline\n");
  6090. }
  6091. break;
  6092. default:
  6093. DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
  6094. break;
  6095. }
  6096. break;
  6097. case 6: /* D6 vblank/vline */
  6098. switch (src_data) {
  6099. case 0: /* D6 vblank */
  6100. if (rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
  6101. if (rdev->irq.crtc_vblank_int[5]) {
  6102. drm_handle_vblank(rdev->ddev, 5);
  6103. rdev->pm.vblank_sync = true;
  6104. wake_up(&rdev->irq.vblank_queue);
  6105. }
  6106. if (atomic_read(&rdev->irq.pflip[5]))
  6107. radeon_crtc_handle_flip(rdev, 5);
  6108. rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
  6109. DRM_DEBUG("IH: D6 vblank\n");
  6110. }
  6111. break;
  6112. case 1: /* D6 vline */
  6113. if (rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {
  6114. rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
  6115. DRM_DEBUG("IH: D6 vline\n");
  6116. }
  6117. break;
  6118. default:
  6119. DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
  6120. break;
  6121. }
  6122. break;
  6123. case 42: /* HPD hotplug */
  6124. switch (src_data) {
  6125. case 0:
  6126. if (rdev->irq.stat_regs.cik.disp_int & DC_HPD1_INTERRUPT) {
  6127. rdev->irq.stat_regs.cik.disp_int &= ~DC_HPD1_INTERRUPT;
  6128. queue_hotplug = true;
  6129. DRM_DEBUG("IH: HPD1\n");
  6130. }
  6131. break;
  6132. case 1:
  6133. if (rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_INTERRUPT) {
  6134. rdev->irq.stat_regs.cik.disp_int_cont &= ~DC_HPD2_INTERRUPT;
  6135. queue_hotplug = true;
  6136. DRM_DEBUG("IH: HPD2\n");
  6137. }
  6138. break;
  6139. case 2:
  6140. if (rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_INTERRUPT) {
  6141. rdev->irq.stat_regs.cik.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
  6142. queue_hotplug = true;
  6143. DRM_DEBUG("IH: HPD3\n");
  6144. }
  6145. break;
  6146. case 3:
  6147. if (rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_INTERRUPT) {
  6148. rdev->irq.stat_regs.cik.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
  6149. queue_hotplug = true;
  6150. DRM_DEBUG("IH: HPD4\n");
  6151. }
  6152. break;
  6153. case 4:
  6154. if (rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_INTERRUPT) {
  6155. rdev->irq.stat_regs.cik.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
  6156. queue_hotplug = true;
  6157. DRM_DEBUG("IH: HPD5\n");
  6158. }
  6159. break;
  6160. case 5:
  6161. if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_INTERRUPT) {
  6162. rdev->irq.stat_regs.cik.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
  6163. queue_hotplug = true;
  6164. DRM_DEBUG("IH: HPD6\n");
  6165. }
  6166. break;
  6167. default:
  6168. DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
  6169. break;
  6170. }
  6171. break;
  6172. case 124: /* UVD */
  6173. DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
  6174. radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
  6175. break;
  6176. case 146:
  6177. case 147:
  6178. addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR);
  6179. status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS);
  6180. mc_client = RREG32(VM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
  6181. dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
  6182. dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
  6183. addr);
  6184. dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
  6185. status);
  6186. cik_vm_decode_fault(rdev, status, addr, mc_client);
  6187. /* reset addr and status */
  6188. WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
  6189. break;
  6190. case 176: /* GFX RB CP_INT */
  6191. case 177: /* GFX IB CP_INT */
  6192. radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
  6193. break;
  6194. case 181: /* CP EOP event */
  6195. DRM_DEBUG("IH: CP EOP\n");
  6196. /* XXX check the bitfield order! */
  6197. me_id = (ring_id & 0x60) >> 5;
  6198. pipe_id = (ring_id & 0x18) >> 3;
  6199. queue_id = (ring_id & 0x7) >> 0;
  6200. switch (me_id) {
  6201. case 0:
  6202. radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
  6203. break;
  6204. case 1:
  6205. case 2:
  6206. if ((cp1_ring->me == me_id) & (cp1_ring->pipe == pipe_id))
  6207. radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
  6208. if ((cp2_ring->me == me_id) & (cp2_ring->pipe == pipe_id))
  6209. radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
  6210. break;
  6211. }
  6212. break;
  6213. case 184: /* CP Privileged reg access */
  6214. DRM_ERROR("Illegal register access in command stream\n");
  6215. /* XXX check the bitfield order! */
  6216. me_id = (ring_id & 0x60) >> 5;
  6217. pipe_id = (ring_id & 0x18) >> 3;
  6218. queue_id = (ring_id & 0x7) >> 0;
  6219. switch (me_id) {
  6220. case 0:
  6221. /* This results in a full GPU reset, but all we need to do is soft
  6222. * reset the CP for gfx
  6223. */
  6224. queue_reset = true;
  6225. break;
  6226. case 1:
  6227. /* XXX compute */
  6228. queue_reset = true;
  6229. break;
  6230. case 2:
  6231. /* XXX compute */
  6232. queue_reset = true;
  6233. break;
  6234. }
  6235. break;
  6236. case 185: /* CP Privileged inst */
  6237. DRM_ERROR("Illegal instruction in command stream\n");
  6238. /* XXX check the bitfield order! */
  6239. me_id = (ring_id & 0x60) >> 5;
  6240. pipe_id = (ring_id & 0x18) >> 3;
  6241. queue_id = (ring_id & 0x7) >> 0;
  6242. switch (me_id) {
  6243. case 0:
  6244. /* This results in a full GPU reset, but all we need to do is soft
  6245. * reset the CP for gfx
  6246. */
  6247. queue_reset = true;
  6248. break;
  6249. case 1:
  6250. /* XXX compute */
  6251. queue_reset = true;
  6252. break;
  6253. case 2:
  6254. /* XXX compute */
  6255. queue_reset = true;
  6256. break;
  6257. }
  6258. break;
  6259. case 224: /* SDMA trap event */
  6260. /* XXX check the bitfield order! */
  6261. me_id = (ring_id & 0x3) >> 0;
  6262. queue_id = (ring_id & 0xc) >> 2;
  6263. DRM_DEBUG("IH: SDMA trap\n");
  6264. switch (me_id) {
  6265. case 0:
  6266. switch (queue_id) {
  6267. case 0:
  6268. radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
  6269. break;
  6270. case 1:
  6271. /* XXX compute */
  6272. break;
  6273. case 2:
  6274. /* XXX compute */
  6275. break;
  6276. }
  6277. break;
  6278. case 1:
  6279. switch (queue_id) {
  6280. case 0:
  6281. radeon_fence_process(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
  6282. break;
  6283. case 1:
  6284. /* XXX compute */
  6285. break;
  6286. case 2:
  6287. /* XXX compute */
  6288. break;
  6289. }
  6290. break;
  6291. }
  6292. break;
  6293. case 230: /* thermal low to high */
  6294. DRM_DEBUG("IH: thermal low to high\n");
  6295. rdev->pm.dpm.thermal.high_to_low = false;
  6296. queue_thermal = true;
  6297. break;
  6298. case 231: /* thermal high to low */
  6299. DRM_DEBUG("IH: thermal high to low\n");
  6300. rdev->pm.dpm.thermal.high_to_low = true;
  6301. queue_thermal = true;
  6302. break;
  6303. case 233: /* GUI IDLE */
  6304. DRM_DEBUG("IH: GUI idle\n");
  6305. break;
  6306. case 241: /* SDMA Privileged inst */
  6307. case 247: /* SDMA Privileged inst */
  6308. DRM_ERROR("Illegal instruction in SDMA command stream\n");
  6309. /* XXX check the bitfield order! */
  6310. me_id = (ring_id & 0x3) >> 0;
  6311. queue_id = (ring_id & 0xc) >> 2;
  6312. switch (me_id) {
  6313. case 0:
  6314. switch (queue_id) {
  6315. case 0:
  6316. queue_reset = true;
  6317. break;
  6318. case 1:
  6319. /* XXX compute */
  6320. queue_reset = true;
  6321. break;
  6322. case 2:
  6323. /* XXX compute */
  6324. queue_reset = true;
  6325. break;
  6326. }
  6327. break;
  6328. case 1:
  6329. switch (queue_id) {
  6330. case 0:
  6331. queue_reset = true;
  6332. break;
  6333. case 1:
  6334. /* XXX compute */
  6335. queue_reset = true;
  6336. break;
  6337. case 2:
  6338. /* XXX compute */
  6339. queue_reset = true;
  6340. break;
  6341. }
  6342. break;
  6343. }
  6344. break;
  6345. default:
  6346. DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
  6347. break;
  6348. }
  6349. /* wptr/rptr are in bytes! */
  6350. rptr += 16;
  6351. rptr &= rdev->ih.ptr_mask;
  6352. }
  6353. if (queue_hotplug)
  6354. schedule_work(&rdev->hotplug_work);
  6355. if (queue_reset)
  6356. schedule_work(&rdev->reset_work);
  6357. if (queue_thermal)
  6358. schedule_work(&rdev->pm.dpm.thermal.work);
  6359. rdev->ih.rptr = rptr;
  6360. WREG32(IH_RB_RPTR, rdev->ih.rptr);
  6361. atomic_set(&rdev->ih.lock, 0);
  6362. /* make sure wptr hasn't changed while processing */
  6363. wptr = cik_get_ih_wptr(rdev);
  6364. if (wptr != rptr)
  6365. goto restart_ih;
  6366. return IRQ_HANDLED;
  6367. }
  6368. /*
  6369. * startup/shutdown callbacks
  6370. */
  6371. /**
  6372. * cik_startup - program the asic to a functional state
  6373. *
  6374. * @rdev: radeon_device pointer
  6375. *
  6376. * Programs the asic to a functional state (CIK).
  6377. * Called by cik_init() and cik_resume().
  6378. * Returns 0 for success, error for failure.
  6379. */
  6380. static int cik_startup(struct radeon_device *rdev)
  6381. {
  6382. struct radeon_ring *ring;
  6383. int r;
  6384. /* enable pcie gen2/3 link */
  6385. cik_pcie_gen3_enable(rdev);
  6386. /* enable aspm */
  6387. cik_program_aspm(rdev);
  6388. /* scratch needs to be initialized before MC */
  6389. r = r600_vram_scratch_init(rdev);
  6390. if (r)
  6391. return r;
  6392. cik_mc_program(rdev);
  6393. if (rdev->flags & RADEON_IS_IGP) {
  6394. if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
  6395. !rdev->mec_fw || !rdev->sdma_fw || !rdev->rlc_fw) {
  6396. r = cik_init_microcode(rdev);
  6397. if (r) {
  6398. DRM_ERROR("Failed to load firmware!\n");
  6399. return r;
  6400. }
  6401. }
  6402. } else {
  6403. if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
  6404. !rdev->mec_fw || !rdev->sdma_fw || !rdev->rlc_fw ||
  6405. !rdev->mc_fw) {
  6406. r = cik_init_microcode(rdev);
  6407. if (r) {
  6408. DRM_ERROR("Failed to load firmware!\n");
  6409. return r;
  6410. }
  6411. }
  6412. r = ci_mc_load_microcode(rdev);
  6413. if (r) {
  6414. DRM_ERROR("Failed to load MC firmware!\n");
  6415. return r;
  6416. }
  6417. }
  6418. r = cik_pcie_gart_enable(rdev);
  6419. if (r)
  6420. return r;
  6421. cik_gpu_init(rdev);
  6422. /* allocate rlc buffers */
  6423. if (rdev->flags & RADEON_IS_IGP) {
  6424. if (rdev->family == CHIP_KAVERI) {
  6425. rdev->rlc.reg_list = spectre_rlc_save_restore_register_list;
  6426. rdev->rlc.reg_list_size =
  6427. (u32)ARRAY_SIZE(spectre_rlc_save_restore_register_list);
  6428. } else {
  6429. rdev->rlc.reg_list = kalindi_rlc_save_restore_register_list;
  6430. rdev->rlc.reg_list_size =
  6431. (u32)ARRAY_SIZE(kalindi_rlc_save_restore_register_list);
  6432. }
  6433. }
  6434. rdev->rlc.cs_data = ci_cs_data;
  6435. rdev->rlc.cp_table_size = CP_ME_TABLE_SIZE * 5 * 4;
  6436. r = sumo_rlc_init(rdev);
  6437. if (r) {
  6438. DRM_ERROR("Failed to init rlc BOs!\n");
  6439. return r;
  6440. }
  6441. /* allocate wb buffer */
  6442. r = radeon_wb_init(rdev);
  6443. if (r)
  6444. return r;
  6445. /* allocate mec buffers */
  6446. r = cik_mec_init(rdev);
  6447. if (r) {
  6448. DRM_ERROR("Failed to init MEC BOs!\n");
  6449. return r;
  6450. }
  6451. r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
  6452. if (r) {
  6453. dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
  6454. return r;
  6455. }
  6456. r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
  6457. if (r) {
  6458. dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
  6459. return r;
  6460. }
  6461. r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
  6462. if (r) {
  6463. dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
  6464. return r;
  6465. }
  6466. r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
  6467. if (r) {
  6468. dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
  6469. return r;
  6470. }
  6471. r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
  6472. if (r) {
  6473. dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
  6474. return r;
  6475. }
  6476. r = radeon_uvd_resume(rdev);
  6477. if (!r) {
  6478. r = uvd_v4_2_resume(rdev);
  6479. if (!r) {
  6480. r = radeon_fence_driver_start_ring(rdev,
  6481. R600_RING_TYPE_UVD_INDEX);
  6482. if (r)
  6483. dev_err(rdev->dev, "UVD fences init error (%d).\n", r);
  6484. }
  6485. }
  6486. if (r)
  6487. rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
  6488. /* Enable IRQ */
  6489. if (!rdev->irq.installed) {
  6490. r = radeon_irq_kms_init(rdev);
  6491. if (r)
  6492. return r;
  6493. }
  6494. r = cik_irq_init(rdev);
  6495. if (r) {
  6496. DRM_ERROR("radeon: IH init failed (%d).\n", r);
  6497. radeon_irq_kms_fini(rdev);
  6498. return r;
  6499. }
  6500. cik_irq_set(rdev);
  6501. ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
  6502. r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
  6503. CP_RB0_RPTR, CP_RB0_WPTR,
  6504. RADEON_CP_PACKET2);
  6505. if (r)
  6506. return r;
  6507. /* set up the compute queues */
  6508. /* type-2 packets are deprecated on MEC, use type-3 instead */
  6509. ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
  6510. r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP1_RPTR_OFFSET,
  6511. CP_HQD_PQ_RPTR, CP_HQD_PQ_WPTR,
  6512. PACKET3(PACKET3_NOP, 0x3FFF));
  6513. if (r)
  6514. return r;
  6515. ring->me = 1; /* first MEC */
  6516. ring->pipe = 0; /* first pipe */
  6517. ring->queue = 0; /* first queue */
  6518. ring->wptr_offs = CIK_WB_CP1_WPTR_OFFSET;
  6519. /* type-2 packets are deprecated on MEC, use type-3 instead */
  6520. ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
  6521. r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP2_RPTR_OFFSET,
  6522. CP_HQD_PQ_RPTR, CP_HQD_PQ_WPTR,
  6523. PACKET3(PACKET3_NOP, 0x3FFF));
  6524. if (r)
  6525. return r;
  6526. /* dGPU only have 1 MEC */
  6527. ring->me = 1; /* first MEC */
  6528. ring->pipe = 0; /* first pipe */
  6529. ring->queue = 1; /* second queue */
  6530. ring->wptr_offs = CIK_WB_CP2_WPTR_OFFSET;
  6531. ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
  6532. r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
  6533. SDMA0_GFX_RB_RPTR + SDMA0_REGISTER_OFFSET,
  6534. SDMA0_GFX_RB_WPTR + SDMA0_REGISTER_OFFSET,
  6535. SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0));
  6536. if (r)
  6537. return r;
  6538. ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
  6539. r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
  6540. SDMA0_GFX_RB_RPTR + SDMA1_REGISTER_OFFSET,
  6541. SDMA0_GFX_RB_WPTR + SDMA1_REGISTER_OFFSET,
  6542. SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0));
  6543. if (r)
  6544. return r;
  6545. r = cik_cp_resume(rdev);
  6546. if (r)
  6547. return r;
  6548. r = cik_sdma_resume(rdev);
  6549. if (r)
  6550. return r;
  6551. ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
  6552. if (ring->ring_size) {
  6553. r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
  6554. UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
  6555. RADEON_CP_PACKET2);
  6556. if (!r)
  6557. r = uvd_v1_0_init(rdev);
  6558. if (r)
  6559. DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
  6560. }
  6561. r = radeon_ib_pool_init(rdev);
  6562. if (r) {
  6563. dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
  6564. return r;
  6565. }
  6566. r = radeon_vm_manager_init(rdev);
  6567. if (r) {
  6568. dev_err(rdev->dev, "vm manager initialization failed (%d).\n", r);
  6569. return r;
  6570. }
  6571. r = dce6_audio_init(rdev);
  6572. if (r)
  6573. return r;
  6574. return 0;
  6575. }
  6576. /**
  6577. * cik_resume - resume the asic to a functional state
  6578. *
  6579. * @rdev: radeon_device pointer
  6580. *
  6581. * Programs the asic to a functional state (CIK).
  6582. * Called at resume.
  6583. * Returns 0 for success, error for failure.
  6584. */
  6585. int cik_resume(struct radeon_device *rdev)
  6586. {
  6587. int r;
  6588. /* post card */
  6589. atom_asic_init(rdev->mode_info.atom_context);
  6590. /* init golden registers */
  6591. cik_init_golden_registers(rdev);
  6592. rdev->accel_working = true;
  6593. r = cik_startup(rdev);
  6594. if (r) {
  6595. DRM_ERROR("cik startup failed on resume\n");
  6596. rdev->accel_working = false;
  6597. return r;
  6598. }
  6599. return r;
  6600. }
  6601. /**
  6602. * cik_suspend - suspend the asic
  6603. *
  6604. * @rdev: radeon_device pointer
  6605. *
  6606. * Bring the chip into a state suitable for suspend (CIK).
  6607. * Called at suspend.
  6608. * Returns 0 for success.
  6609. */
  6610. int cik_suspend(struct radeon_device *rdev)
  6611. {
  6612. dce6_audio_fini(rdev);
  6613. radeon_vm_manager_fini(rdev);
  6614. cik_cp_enable(rdev, false);
  6615. cik_sdma_enable(rdev, false);
  6616. uvd_v1_0_fini(rdev);
  6617. radeon_uvd_suspend(rdev);
  6618. cik_fini_pg(rdev);
  6619. cik_fini_cg(rdev);
  6620. cik_irq_suspend(rdev);
  6621. radeon_wb_disable(rdev);
  6622. cik_pcie_gart_disable(rdev);
  6623. return 0;
  6624. }
  6625. /* Plan is to move initialization in that function and use
  6626. * helper function so that radeon_device_init pretty much
  6627. * do nothing more than calling asic specific function. This
  6628. * should also allow to remove a bunch of callback function
  6629. * like vram_info.
  6630. */
  6631. /**
  6632. * cik_init - asic specific driver and hw init
  6633. *
  6634. * @rdev: radeon_device pointer
  6635. *
  6636. * Setup asic specific driver variables and program the hw
  6637. * to a functional state (CIK).
  6638. * Called at driver startup.
  6639. * Returns 0 for success, errors for failure.
  6640. */
  6641. int cik_init(struct radeon_device *rdev)
  6642. {
  6643. struct radeon_ring *ring;
  6644. int r;
  6645. /* Read BIOS */
  6646. if (!radeon_get_bios(rdev)) {
  6647. if (ASIC_IS_AVIVO(rdev))
  6648. return -EINVAL;
  6649. }
  6650. /* Must be an ATOMBIOS */
  6651. if (!rdev->is_atom_bios) {
  6652. dev_err(rdev->dev, "Expecting atombios for cayman GPU\n");
  6653. return -EINVAL;
  6654. }
  6655. r = radeon_atombios_init(rdev);
  6656. if (r)
  6657. return r;
  6658. /* Post card if necessary */
  6659. if (!radeon_card_posted(rdev)) {
  6660. if (!rdev->bios) {
  6661. dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
  6662. return -EINVAL;
  6663. }
  6664. DRM_INFO("GPU not posted. posting now...\n");
  6665. atom_asic_init(rdev->mode_info.atom_context);
  6666. }
  6667. /* init golden registers */
  6668. cik_init_golden_registers(rdev);
  6669. /* Initialize scratch registers */
  6670. cik_scratch_init(rdev);
  6671. /* Initialize surface registers */
  6672. radeon_surface_init(rdev);
  6673. /* Initialize clocks */
  6674. radeon_get_clock_info(rdev->ddev);
  6675. /* Fence driver */
  6676. r = radeon_fence_driver_init(rdev);
  6677. if (r)
  6678. return r;
  6679. /* initialize memory controller */
  6680. r = cik_mc_init(rdev);
  6681. if (r)
  6682. return r;
  6683. /* Memory manager */
  6684. r = radeon_bo_init(rdev);
  6685. if (r)
  6686. return r;
  6687. ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
  6688. ring->ring_obj = NULL;
  6689. r600_ring_init(rdev, ring, 1024 * 1024);
  6690. ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
  6691. ring->ring_obj = NULL;
  6692. r600_ring_init(rdev, ring, 1024 * 1024);
  6693. r = radeon_doorbell_get(rdev, &ring->doorbell_page_num);
  6694. if (r)
  6695. return r;
  6696. ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
  6697. ring->ring_obj = NULL;
  6698. r600_ring_init(rdev, ring, 1024 * 1024);
  6699. r = radeon_doorbell_get(rdev, &ring->doorbell_page_num);
  6700. if (r)
  6701. return r;
  6702. ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
  6703. ring->ring_obj = NULL;
  6704. r600_ring_init(rdev, ring, 256 * 1024);
  6705. ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
  6706. ring->ring_obj = NULL;
  6707. r600_ring_init(rdev, ring, 256 * 1024);
  6708. r = radeon_uvd_init(rdev);
  6709. if (!r) {
  6710. ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
  6711. ring->ring_obj = NULL;
  6712. r600_ring_init(rdev, ring, 4096);
  6713. }
  6714. rdev->ih.ring_obj = NULL;
  6715. r600_ih_ring_init(rdev, 64 * 1024);
  6716. r = r600_pcie_gart_init(rdev);
  6717. if (r)
  6718. return r;
  6719. rdev->accel_working = true;
  6720. r = cik_startup(rdev);
  6721. if (r) {
  6722. dev_err(rdev->dev, "disabling GPU acceleration\n");
  6723. cik_cp_fini(rdev);
  6724. cik_sdma_fini(rdev);
  6725. cik_irq_fini(rdev);
  6726. sumo_rlc_fini(rdev);
  6727. cik_mec_fini(rdev);
  6728. radeon_wb_fini(rdev);
  6729. radeon_ib_pool_fini(rdev);
  6730. radeon_vm_manager_fini(rdev);
  6731. radeon_irq_kms_fini(rdev);
  6732. cik_pcie_gart_fini(rdev);
  6733. rdev->accel_working = false;
  6734. }
  6735. /* Don't start up if the MC ucode is missing.
  6736. * The default clocks and voltages before the MC ucode
  6737. * is loaded are not suffient for advanced operations.
  6738. */
  6739. if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) {
  6740. DRM_ERROR("radeon: MC ucode required for NI+.\n");
  6741. return -EINVAL;
  6742. }
  6743. return 0;
  6744. }
  6745. /**
  6746. * cik_fini - asic specific driver and hw fini
  6747. *
  6748. * @rdev: radeon_device pointer
  6749. *
  6750. * Tear down the asic specific driver variables and program the hw
  6751. * to an idle state (CIK).
  6752. * Called at driver unload.
  6753. */
  6754. void cik_fini(struct radeon_device *rdev)
  6755. {
  6756. cik_cp_fini(rdev);
  6757. cik_sdma_fini(rdev);
  6758. cik_fini_pg(rdev);
  6759. cik_fini_cg(rdev);
  6760. cik_irq_fini(rdev);
  6761. sumo_rlc_fini(rdev);
  6762. cik_mec_fini(rdev);
  6763. radeon_wb_fini(rdev);
  6764. radeon_vm_manager_fini(rdev);
  6765. radeon_ib_pool_fini(rdev);
  6766. radeon_irq_kms_fini(rdev);
  6767. uvd_v1_0_fini(rdev);
  6768. radeon_uvd_fini(rdev);
  6769. cik_pcie_gart_fini(rdev);
  6770. r600_vram_scratch_fini(rdev);
  6771. radeon_gem_fini(rdev);
  6772. radeon_fence_driver_fini(rdev);
  6773. radeon_bo_fini(rdev);
  6774. radeon_atombios_fini(rdev);
  6775. kfree(rdev->bios);
  6776. rdev->bios = NULL;
  6777. }
  6778. /* display watermark setup */
  6779. /**
  6780. * dce8_line_buffer_adjust - Set up the line buffer
  6781. *
  6782. * @rdev: radeon_device pointer
  6783. * @radeon_crtc: the selected display controller
  6784. * @mode: the current display mode on the selected display
  6785. * controller
  6786. *
  6787. * Setup up the line buffer allocation for
  6788. * the selected display controller (CIK).
  6789. * Returns the line buffer size in pixels.
  6790. */
  6791. static u32 dce8_line_buffer_adjust(struct radeon_device *rdev,
  6792. struct radeon_crtc *radeon_crtc,
  6793. struct drm_display_mode *mode)
  6794. {
  6795. u32 tmp, buffer_alloc, i;
  6796. u32 pipe_offset = radeon_crtc->crtc_id * 0x20;
  6797. /*
  6798. * Line Buffer Setup
  6799. * There are 6 line buffers, one for each display controllers.
  6800. * There are 3 partitions per LB. Select the number of partitions
  6801. * to enable based on the display width. For display widths larger
  6802. * than 4096, you need use to use 2 display controllers and combine
  6803. * them using the stereo blender.
  6804. */
  6805. if (radeon_crtc->base.enabled && mode) {
  6806. if (mode->crtc_hdisplay < 1920) {
  6807. tmp = 1;
  6808. buffer_alloc = 2;
  6809. } else if (mode->crtc_hdisplay < 2560) {
  6810. tmp = 2;
  6811. buffer_alloc = 2;
  6812. } else if (mode->crtc_hdisplay < 4096) {
  6813. tmp = 0;
  6814. buffer_alloc = (rdev->flags & RADEON_IS_IGP) ? 2 : 4;
  6815. } else {
  6816. DRM_DEBUG_KMS("Mode too big for LB!\n");
  6817. tmp = 0;
  6818. buffer_alloc = (rdev->flags & RADEON_IS_IGP) ? 2 : 4;
  6819. }
  6820. } else {
  6821. tmp = 1;
  6822. buffer_alloc = 0;
  6823. }
  6824. WREG32(LB_MEMORY_CTRL + radeon_crtc->crtc_offset,
  6825. LB_MEMORY_CONFIG(tmp) | LB_MEMORY_SIZE(0x6B0));
  6826. WREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
  6827. DMIF_BUFFERS_ALLOCATED(buffer_alloc));
  6828. for (i = 0; i < rdev->usec_timeout; i++) {
  6829. if (RREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
  6830. DMIF_BUFFERS_ALLOCATED_COMPLETED)
  6831. break;
  6832. udelay(1);
  6833. }
  6834. if (radeon_crtc->base.enabled && mode) {
  6835. switch (tmp) {
  6836. case 0:
  6837. default:
  6838. return 4096 * 2;
  6839. case 1:
  6840. return 1920 * 2;
  6841. case 2:
  6842. return 2560 * 2;
  6843. }
  6844. }
  6845. /* controller not enabled, so no lb used */
  6846. return 0;
  6847. }
  6848. /**
  6849. * cik_get_number_of_dram_channels - get the number of dram channels
  6850. *
  6851. * @rdev: radeon_device pointer
  6852. *
  6853. * Look up the number of video ram channels (CIK).
  6854. * Used for display watermark bandwidth calculations
  6855. * Returns the number of dram channels
  6856. */
  6857. static u32 cik_get_number_of_dram_channels(struct radeon_device *rdev)
  6858. {
  6859. u32 tmp = RREG32(MC_SHARED_CHMAP);
  6860. switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
  6861. case 0:
  6862. default:
  6863. return 1;
  6864. case 1:
  6865. return 2;
  6866. case 2:
  6867. return 4;
  6868. case 3:
  6869. return 8;
  6870. case 4:
  6871. return 3;
  6872. case 5:
  6873. return 6;
  6874. case 6:
  6875. return 10;
  6876. case 7:
  6877. return 12;
  6878. case 8:
  6879. return 16;
  6880. }
  6881. }
  6882. struct dce8_wm_params {
  6883. u32 dram_channels; /* number of dram channels */
  6884. u32 yclk; /* bandwidth per dram data pin in kHz */
  6885. u32 sclk; /* engine clock in kHz */
  6886. u32 disp_clk; /* display clock in kHz */
  6887. u32 src_width; /* viewport width */
  6888. u32 active_time; /* active display time in ns */
  6889. u32 blank_time; /* blank time in ns */
  6890. bool interlaced; /* mode is interlaced */
  6891. fixed20_12 vsc; /* vertical scale ratio */
  6892. u32 num_heads; /* number of active crtcs */
  6893. u32 bytes_per_pixel; /* bytes per pixel display + overlay */
  6894. u32 lb_size; /* line buffer allocated to pipe */
  6895. u32 vtaps; /* vertical scaler taps */
  6896. };
  6897. /**
  6898. * dce8_dram_bandwidth - get the dram bandwidth
  6899. *
  6900. * @wm: watermark calculation data
  6901. *
  6902. * Calculate the raw dram bandwidth (CIK).
  6903. * Used for display watermark bandwidth calculations
  6904. * Returns the dram bandwidth in MBytes/s
  6905. */
  6906. static u32 dce8_dram_bandwidth(struct dce8_wm_params *wm)
  6907. {
  6908. /* Calculate raw DRAM Bandwidth */
  6909. fixed20_12 dram_efficiency; /* 0.7 */
  6910. fixed20_12 yclk, dram_channels, bandwidth;
  6911. fixed20_12 a;
  6912. a.full = dfixed_const(1000);
  6913. yclk.full = dfixed_const(wm->yclk);
  6914. yclk.full = dfixed_div(yclk, a);
  6915. dram_channels.full = dfixed_const(wm->dram_channels * 4);
  6916. a.full = dfixed_const(10);
  6917. dram_efficiency.full = dfixed_const(7);
  6918. dram_efficiency.full = dfixed_div(dram_efficiency, a);
  6919. bandwidth.full = dfixed_mul(dram_channels, yclk);
  6920. bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
  6921. return dfixed_trunc(bandwidth);
  6922. }
  6923. /**
  6924. * dce8_dram_bandwidth_for_display - get the dram bandwidth for display
  6925. *
  6926. * @wm: watermark calculation data
  6927. *
  6928. * Calculate the dram bandwidth used for display (CIK).
  6929. * Used for display watermark bandwidth calculations
  6930. * Returns the dram bandwidth for display in MBytes/s
  6931. */
  6932. static u32 dce8_dram_bandwidth_for_display(struct dce8_wm_params *wm)
  6933. {
  6934. /* Calculate DRAM Bandwidth and the part allocated to display. */
  6935. fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
  6936. fixed20_12 yclk, dram_channels, bandwidth;
  6937. fixed20_12 a;
  6938. a.full = dfixed_const(1000);
  6939. yclk.full = dfixed_const(wm->yclk);
  6940. yclk.full = dfixed_div(yclk, a);
  6941. dram_channels.full = dfixed_const(wm->dram_channels * 4);
  6942. a.full = dfixed_const(10);
  6943. disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
  6944. disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
  6945. bandwidth.full = dfixed_mul(dram_channels, yclk);
  6946. bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
  6947. return dfixed_trunc(bandwidth);
  6948. }
  6949. /**
  6950. * dce8_data_return_bandwidth - get the data return bandwidth
  6951. *
  6952. * @wm: watermark calculation data
  6953. *
  6954. * Calculate the data return bandwidth used for display (CIK).
  6955. * Used for display watermark bandwidth calculations
  6956. * Returns the data return bandwidth in MBytes/s
  6957. */
  6958. static u32 dce8_data_return_bandwidth(struct dce8_wm_params *wm)
  6959. {
  6960. /* Calculate the display Data return Bandwidth */
  6961. fixed20_12 return_efficiency; /* 0.8 */
  6962. fixed20_12 sclk, bandwidth;
  6963. fixed20_12 a;
  6964. a.full = dfixed_const(1000);
  6965. sclk.full = dfixed_const(wm->sclk);
  6966. sclk.full = dfixed_div(sclk, a);
  6967. a.full = dfixed_const(10);
  6968. return_efficiency.full = dfixed_const(8);
  6969. return_efficiency.full = dfixed_div(return_efficiency, a);
  6970. a.full = dfixed_const(32);
  6971. bandwidth.full = dfixed_mul(a, sclk);
  6972. bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
  6973. return dfixed_trunc(bandwidth);
  6974. }
  6975. /**
  6976. * dce8_dmif_request_bandwidth - get the dmif bandwidth
  6977. *
  6978. * @wm: watermark calculation data
  6979. *
  6980. * Calculate the dmif bandwidth used for display (CIK).
  6981. * Used for display watermark bandwidth calculations
  6982. * Returns the dmif bandwidth in MBytes/s
  6983. */
  6984. static u32 dce8_dmif_request_bandwidth(struct dce8_wm_params *wm)
  6985. {
  6986. /* Calculate the DMIF Request Bandwidth */
  6987. fixed20_12 disp_clk_request_efficiency; /* 0.8 */
  6988. fixed20_12 disp_clk, bandwidth;
  6989. fixed20_12 a, b;
  6990. a.full = dfixed_const(1000);
  6991. disp_clk.full = dfixed_const(wm->disp_clk);
  6992. disp_clk.full = dfixed_div(disp_clk, a);
  6993. a.full = dfixed_const(32);
  6994. b.full = dfixed_mul(a, disp_clk);
  6995. a.full = dfixed_const(10);
  6996. disp_clk_request_efficiency.full = dfixed_const(8);
  6997. disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
  6998. bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency);
  6999. return dfixed_trunc(bandwidth);
  7000. }
  7001. /**
  7002. * dce8_available_bandwidth - get the min available bandwidth
  7003. *
  7004. * @wm: watermark calculation data
  7005. *
  7006. * Calculate the min available bandwidth used for display (CIK).
  7007. * Used for display watermark bandwidth calculations
  7008. * Returns the min available bandwidth in MBytes/s
  7009. */
  7010. static u32 dce8_available_bandwidth(struct dce8_wm_params *wm)
  7011. {
  7012. /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
  7013. u32 dram_bandwidth = dce8_dram_bandwidth(wm);
  7014. u32 data_return_bandwidth = dce8_data_return_bandwidth(wm);
  7015. u32 dmif_req_bandwidth = dce8_dmif_request_bandwidth(wm);
  7016. return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
  7017. }
  7018. /**
  7019. * dce8_average_bandwidth - get the average available bandwidth
  7020. *
  7021. * @wm: watermark calculation data
  7022. *
  7023. * Calculate the average available bandwidth used for display (CIK).
  7024. * Used for display watermark bandwidth calculations
  7025. * Returns the average available bandwidth in MBytes/s
  7026. */
  7027. static u32 dce8_average_bandwidth(struct dce8_wm_params *wm)
  7028. {
  7029. /* Calculate the display mode Average Bandwidth
  7030. * DisplayMode should contain the source and destination dimensions,
  7031. * timing, etc.
  7032. */
  7033. fixed20_12 bpp;
  7034. fixed20_12 line_time;
  7035. fixed20_12 src_width;
  7036. fixed20_12 bandwidth;
  7037. fixed20_12 a;
  7038. a.full = dfixed_const(1000);
  7039. line_time.full = dfixed_const(wm->active_time + wm->blank_time);
  7040. line_time.full = dfixed_div(line_time, a);
  7041. bpp.full = dfixed_const(wm->bytes_per_pixel);
  7042. src_width.full = dfixed_const(wm->src_width);
  7043. bandwidth.full = dfixed_mul(src_width, bpp);
  7044. bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
  7045. bandwidth.full = dfixed_div(bandwidth, line_time);
  7046. return dfixed_trunc(bandwidth);
  7047. }
  7048. /**
  7049. * dce8_latency_watermark - get the latency watermark
  7050. *
  7051. * @wm: watermark calculation data
  7052. *
  7053. * Calculate the latency watermark (CIK).
  7054. * Used for display watermark bandwidth calculations
  7055. * Returns the latency watermark in ns
  7056. */
  7057. static u32 dce8_latency_watermark(struct dce8_wm_params *wm)
  7058. {
  7059. /* First calculate the latency in ns */
  7060. u32 mc_latency = 2000; /* 2000 ns. */
  7061. u32 available_bandwidth = dce8_available_bandwidth(wm);
  7062. u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
  7063. u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
  7064. u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
  7065. u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
  7066. (wm->num_heads * cursor_line_pair_return_time);
  7067. u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
  7068. u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
  7069. u32 tmp, dmif_size = 12288;
  7070. fixed20_12 a, b, c;
  7071. if (wm->num_heads == 0)
  7072. return 0;
  7073. a.full = dfixed_const(2);
  7074. b.full = dfixed_const(1);
  7075. if ((wm->vsc.full > a.full) ||
  7076. ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
  7077. (wm->vtaps >= 5) ||
  7078. ((wm->vsc.full >= a.full) && wm->interlaced))
  7079. max_src_lines_per_dst_line = 4;
  7080. else
  7081. max_src_lines_per_dst_line = 2;
  7082. a.full = dfixed_const(available_bandwidth);
  7083. b.full = dfixed_const(wm->num_heads);
  7084. a.full = dfixed_div(a, b);
  7085. b.full = dfixed_const(mc_latency + 512);
  7086. c.full = dfixed_const(wm->disp_clk);
  7087. b.full = dfixed_div(b, c);
  7088. c.full = dfixed_const(dmif_size);
  7089. b.full = dfixed_div(c, b);
  7090. tmp = min(dfixed_trunc(a), dfixed_trunc(b));
  7091. b.full = dfixed_const(1000);
  7092. c.full = dfixed_const(wm->disp_clk);
  7093. b.full = dfixed_div(c, b);
  7094. c.full = dfixed_const(wm->bytes_per_pixel);
  7095. b.full = dfixed_mul(b, c);
  7096. lb_fill_bw = min(tmp, dfixed_trunc(b));
  7097. a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
  7098. b.full = dfixed_const(1000);
  7099. c.full = dfixed_const(lb_fill_bw);
  7100. b.full = dfixed_div(c, b);
  7101. a.full = dfixed_div(a, b);
  7102. line_fill_time = dfixed_trunc(a);
  7103. if (line_fill_time < wm->active_time)
  7104. return latency;
  7105. else
  7106. return latency + (line_fill_time - wm->active_time);
  7107. }
  7108. /**
  7109. * dce8_average_bandwidth_vs_dram_bandwidth_for_display - check
  7110. * average and available dram bandwidth
  7111. *
  7112. * @wm: watermark calculation data
  7113. *
  7114. * Check if the display average bandwidth fits in the display
  7115. * dram bandwidth (CIK).
  7116. * Used for display watermark bandwidth calculations
  7117. * Returns true if the display fits, false if not.
  7118. */
  7119. static bool dce8_average_bandwidth_vs_dram_bandwidth_for_display(struct dce8_wm_params *wm)
  7120. {
  7121. if (dce8_average_bandwidth(wm) <=
  7122. (dce8_dram_bandwidth_for_display(wm) / wm->num_heads))
  7123. return true;
  7124. else
  7125. return false;
  7126. }
  7127. /**
  7128. * dce8_average_bandwidth_vs_available_bandwidth - check
  7129. * average and available bandwidth
  7130. *
  7131. * @wm: watermark calculation data
  7132. *
  7133. * Check if the display average bandwidth fits in the display
  7134. * available bandwidth (CIK).
  7135. * Used for display watermark bandwidth calculations
  7136. * Returns true if the display fits, false if not.
  7137. */
  7138. static bool dce8_average_bandwidth_vs_available_bandwidth(struct dce8_wm_params *wm)
  7139. {
  7140. if (dce8_average_bandwidth(wm) <=
  7141. (dce8_available_bandwidth(wm) / wm->num_heads))
  7142. return true;
  7143. else
  7144. return false;
  7145. }
  7146. /**
  7147. * dce8_check_latency_hiding - check latency hiding
  7148. *
  7149. * @wm: watermark calculation data
  7150. *
  7151. * Check latency hiding (CIK).
  7152. * Used for display watermark bandwidth calculations
  7153. * Returns true if the display fits, false if not.
  7154. */
  7155. static bool dce8_check_latency_hiding(struct dce8_wm_params *wm)
  7156. {
  7157. u32 lb_partitions = wm->lb_size / wm->src_width;
  7158. u32 line_time = wm->active_time + wm->blank_time;
  7159. u32 latency_tolerant_lines;
  7160. u32 latency_hiding;
  7161. fixed20_12 a;
  7162. a.full = dfixed_const(1);
  7163. if (wm->vsc.full > a.full)
  7164. latency_tolerant_lines = 1;
  7165. else {
  7166. if (lb_partitions <= (wm->vtaps + 1))
  7167. latency_tolerant_lines = 1;
  7168. else
  7169. latency_tolerant_lines = 2;
  7170. }
  7171. latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
  7172. if (dce8_latency_watermark(wm) <= latency_hiding)
  7173. return true;
  7174. else
  7175. return false;
  7176. }
  7177. /**
  7178. * dce8_program_watermarks - program display watermarks
  7179. *
  7180. * @rdev: radeon_device pointer
  7181. * @radeon_crtc: the selected display controller
  7182. * @lb_size: line buffer size
  7183. * @num_heads: number of display controllers in use
  7184. *
  7185. * Calculate and program the display watermarks for the
  7186. * selected display controller (CIK).
  7187. */
  7188. static void dce8_program_watermarks(struct radeon_device *rdev,
  7189. struct radeon_crtc *radeon_crtc,
  7190. u32 lb_size, u32 num_heads)
  7191. {
  7192. struct drm_display_mode *mode = &radeon_crtc->base.mode;
  7193. struct dce8_wm_params wm_low, wm_high;
  7194. u32 pixel_period;
  7195. u32 line_time = 0;
  7196. u32 latency_watermark_a = 0, latency_watermark_b = 0;
  7197. u32 tmp, wm_mask;
  7198. if (radeon_crtc->base.enabled && num_heads && mode) {
  7199. pixel_period = 1000000 / (u32)mode->clock;
  7200. line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
  7201. /* watermark for high clocks */
  7202. if ((rdev->pm.pm_method == PM_METHOD_DPM) &&
  7203. rdev->pm.dpm_enabled) {
  7204. wm_high.yclk =
  7205. radeon_dpm_get_mclk(rdev, false) * 10;
  7206. wm_high.sclk =
  7207. radeon_dpm_get_sclk(rdev, false) * 10;
  7208. } else {
  7209. wm_high.yclk = rdev->pm.current_mclk * 10;
  7210. wm_high.sclk = rdev->pm.current_sclk * 10;
  7211. }
  7212. wm_high.disp_clk = mode->clock;
  7213. wm_high.src_width = mode->crtc_hdisplay;
  7214. wm_high.active_time = mode->crtc_hdisplay * pixel_period;
  7215. wm_high.blank_time = line_time - wm_high.active_time;
  7216. wm_high.interlaced = false;
  7217. if (mode->flags & DRM_MODE_FLAG_INTERLACE)
  7218. wm_high.interlaced = true;
  7219. wm_high.vsc = radeon_crtc->vsc;
  7220. wm_high.vtaps = 1;
  7221. if (radeon_crtc->rmx_type != RMX_OFF)
  7222. wm_high.vtaps = 2;
  7223. wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
  7224. wm_high.lb_size = lb_size;
  7225. wm_high.dram_channels = cik_get_number_of_dram_channels(rdev);
  7226. wm_high.num_heads = num_heads;
  7227. /* set for high clocks */
  7228. latency_watermark_a = min(dce8_latency_watermark(&wm_high), (u32)65535);
  7229. /* possibly force display priority to high */
  7230. /* should really do this at mode validation time... */
  7231. if (!dce8_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
  7232. !dce8_average_bandwidth_vs_available_bandwidth(&wm_high) ||
  7233. !dce8_check_latency_hiding(&wm_high) ||
  7234. (rdev->disp_priority == 2)) {
  7235. DRM_DEBUG_KMS("force priority to high\n");
  7236. }
  7237. /* watermark for low clocks */
  7238. if ((rdev->pm.pm_method == PM_METHOD_DPM) &&
  7239. rdev->pm.dpm_enabled) {
  7240. wm_low.yclk =
  7241. radeon_dpm_get_mclk(rdev, true) * 10;
  7242. wm_low.sclk =
  7243. radeon_dpm_get_sclk(rdev, true) * 10;
  7244. } else {
  7245. wm_low.yclk = rdev->pm.current_mclk * 10;
  7246. wm_low.sclk = rdev->pm.current_sclk * 10;
  7247. }
  7248. wm_low.disp_clk = mode->clock;
  7249. wm_low.src_width = mode->crtc_hdisplay;
  7250. wm_low.active_time = mode->crtc_hdisplay * pixel_period;
  7251. wm_low.blank_time = line_time - wm_low.active_time;
  7252. wm_low.interlaced = false;
  7253. if (mode->flags & DRM_MODE_FLAG_INTERLACE)
  7254. wm_low.interlaced = true;
  7255. wm_low.vsc = radeon_crtc->vsc;
  7256. wm_low.vtaps = 1;
  7257. if (radeon_crtc->rmx_type != RMX_OFF)
  7258. wm_low.vtaps = 2;
  7259. wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
  7260. wm_low.lb_size = lb_size;
  7261. wm_low.dram_channels = cik_get_number_of_dram_channels(rdev);
  7262. wm_low.num_heads = num_heads;
  7263. /* set for low clocks */
  7264. latency_watermark_b = min(dce8_latency_watermark(&wm_low), (u32)65535);
  7265. /* possibly force display priority to high */
  7266. /* should really do this at mode validation time... */
  7267. if (!dce8_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
  7268. !dce8_average_bandwidth_vs_available_bandwidth(&wm_low) ||
  7269. !dce8_check_latency_hiding(&wm_low) ||
  7270. (rdev->disp_priority == 2)) {
  7271. DRM_DEBUG_KMS("force priority to high\n");
  7272. }
  7273. }
  7274. /* select wm A */
  7275. wm_mask = RREG32(DPG_WATERMARK_MASK_CONTROL + radeon_crtc->crtc_offset);
  7276. tmp = wm_mask;
  7277. tmp &= ~LATENCY_WATERMARK_MASK(3);
  7278. tmp |= LATENCY_WATERMARK_MASK(1);
  7279. WREG32(DPG_WATERMARK_MASK_CONTROL + radeon_crtc->crtc_offset, tmp);
  7280. WREG32(DPG_PIPE_LATENCY_CONTROL + radeon_crtc->crtc_offset,
  7281. (LATENCY_LOW_WATERMARK(latency_watermark_a) |
  7282. LATENCY_HIGH_WATERMARK(line_time)));
  7283. /* select wm B */
  7284. tmp = RREG32(DPG_WATERMARK_MASK_CONTROL + radeon_crtc->crtc_offset);
  7285. tmp &= ~LATENCY_WATERMARK_MASK(3);
  7286. tmp |= LATENCY_WATERMARK_MASK(2);
  7287. WREG32(DPG_WATERMARK_MASK_CONTROL + radeon_crtc->crtc_offset, tmp);
  7288. WREG32(DPG_PIPE_LATENCY_CONTROL + radeon_crtc->crtc_offset,
  7289. (LATENCY_LOW_WATERMARK(latency_watermark_b) |
  7290. LATENCY_HIGH_WATERMARK(line_time)));
  7291. /* restore original selection */
  7292. WREG32(DPG_WATERMARK_MASK_CONTROL + radeon_crtc->crtc_offset, wm_mask);
  7293. /* save values for DPM */
  7294. radeon_crtc->line_time = line_time;
  7295. radeon_crtc->wm_high = latency_watermark_a;
  7296. radeon_crtc->wm_low = latency_watermark_b;
  7297. }
  7298. /**
  7299. * dce8_bandwidth_update - program display watermarks
  7300. *
  7301. * @rdev: radeon_device pointer
  7302. *
  7303. * Calculate and program the display watermarks and line
  7304. * buffer allocation (CIK).
  7305. */
  7306. void dce8_bandwidth_update(struct radeon_device *rdev)
  7307. {
  7308. struct drm_display_mode *mode = NULL;
  7309. u32 num_heads = 0, lb_size;
  7310. int i;
  7311. radeon_update_display_priority(rdev);
  7312. for (i = 0; i < rdev->num_crtc; i++) {
  7313. if (rdev->mode_info.crtcs[i]->base.enabled)
  7314. num_heads++;
  7315. }
  7316. for (i = 0; i < rdev->num_crtc; i++) {
  7317. mode = &rdev->mode_info.crtcs[i]->base.mode;
  7318. lb_size = dce8_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i], mode);
  7319. dce8_program_watermarks(rdev, rdev->mode_info.crtcs[i], lb_size, num_heads);
  7320. }
  7321. }
  7322. /**
  7323. * cik_get_gpu_clock_counter - return GPU clock counter snapshot
  7324. *
  7325. * @rdev: radeon_device pointer
  7326. *
  7327. * Fetches a GPU clock counter snapshot (SI).
  7328. * Returns the 64 bit clock counter snapshot.
  7329. */
  7330. uint64_t cik_get_gpu_clock_counter(struct radeon_device *rdev)
  7331. {
  7332. uint64_t clock;
  7333. mutex_lock(&rdev->gpu_clock_mutex);
  7334. WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1);
  7335. clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) |
  7336. ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
  7337. mutex_unlock(&rdev->gpu_clock_mutex);
  7338. return clock;
  7339. }
  7340. static int cik_set_uvd_clock(struct radeon_device *rdev, u32 clock,
  7341. u32 cntl_reg, u32 status_reg)
  7342. {
  7343. int r, i;
  7344. struct atom_clock_dividers dividers;
  7345. uint32_t tmp;
  7346. r = radeon_atom_get_clock_dividers(rdev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
  7347. clock, false, &dividers);
  7348. if (r)
  7349. return r;
  7350. tmp = RREG32_SMC(cntl_reg);
  7351. tmp &= ~(DCLK_DIR_CNTL_EN|DCLK_DIVIDER_MASK);
  7352. tmp |= dividers.post_divider;
  7353. WREG32_SMC(cntl_reg, tmp);
  7354. for (i = 0; i < 100; i++) {
  7355. if (RREG32_SMC(status_reg) & DCLK_STATUS)
  7356. break;
  7357. mdelay(10);
  7358. }
  7359. if (i == 100)
  7360. return -ETIMEDOUT;
  7361. return 0;
  7362. }
  7363. int cik_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
  7364. {
  7365. int r = 0;
  7366. r = cik_set_uvd_clock(rdev, vclk, CG_VCLK_CNTL, CG_VCLK_STATUS);
  7367. if (r)
  7368. return r;
  7369. r = cik_set_uvd_clock(rdev, dclk, CG_DCLK_CNTL, CG_DCLK_STATUS);
  7370. return r;
  7371. }
  7372. static void cik_pcie_gen3_enable(struct radeon_device *rdev)
  7373. {
  7374. struct pci_dev *root = rdev->pdev->bus->self;
  7375. int bridge_pos, gpu_pos;
  7376. u32 speed_cntl, mask, current_data_rate;
  7377. int ret, i;
  7378. u16 tmp16;
  7379. if (radeon_pcie_gen2 == 0)
  7380. return;
  7381. if (rdev->flags & RADEON_IS_IGP)
  7382. return;
  7383. if (!(rdev->flags & RADEON_IS_PCIE))
  7384. return;
  7385. ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
  7386. if (ret != 0)
  7387. return;
  7388. if (!(mask & (DRM_PCIE_SPEED_50 | DRM_PCIE_SPEED_80)))
  7389. return;
  7390. speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
  7391. current_data_rate = (speed_cntl & LC_CURRENT_DATA_RATE_MASK) >>
  7392. LC_CURRENT_DATA_RATE_SHIFT;
  7393. if (mask & DRM_PCIE_SPEED_80) {
  7394. if (current_data_rate == 2) {
  7395. DRM_INFO("PCIE gen 3 link speeds already enabled\n");
  7396. return;
  7397. }
  7398. DRM_INFO("enabling PCIE gen 3 link speeds, disable with radeon.pcie_gen2=0\n");
  7399. } else if (mask & DRM_PCIE_SPEED_50) {
  7400. if (current_data_rate == 1) {
  7401. DRM_INFO("PCIE gen 2 link speeds already enabled\n");
  7402. return;
  7403. }
  7404. DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
  7405. }
  7406. bridge_pos = pci_pcie_cap(root);
  7407. if (!bridge_pos)
  7408. return;
  7409. gpu_pos = pci_pcie_cap(rdev->pdev);
  7410. if (!gpu_pos)
  7411. return;
  7412. if (mask & DRM_PCIE_SPEED_80) {
  7413. /* re-try equalization if gen3 is not already enabled */
  7414. if (current_data_rate != 2) {
  7415. u16 bridge_cfg, gpu_cfg;
  7416. u16 bridge_cfg2, gpu_cfg2;
  7417. u32 max_lw, current_lw, tmp;
  7418. pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
  7419. pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
  7420. tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD;
  7421. pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
  7422. tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD;
  7423. pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
  7424. tmp = RREG32_PCIE_PORT(PCIE_LC_STATUS1);
  7425. max_lw = (tmp & LC_DETECTED_LINK_WIDTH_MASK) >> LC_DETECTED_LINK_WIDTH_SHIFT;
  7426. current_lw = (tmp & LC_OPERATING_LINK_WIDTH_MASK) >> LC_OPERATING_LINK_WIDTH_SHIFT;
  7427. if (current_lw < max_lw) {
  7428. tmp = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
  7429. if (tmp & LC_RENEGOTIATION_SUPPORT) {
  7430. tmp &= ~(LC_LINK_WIDTH_MASK | LC_UPCONFIGURE_DIS);
  7431. tmp |= (max_lw << LC_LINK_WIDTH_SHIFT);
  7432. tmp |= LC_UPCONFIGURE_SUPPORT | LC_RENEGOTIATE_EN | LC_RECONFIG_NOW;
  7433. WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, tmp);
  7434. }
  7435. }
  7436. for (i = 0; i < 10; i++) {
  7437. /* check status */
  7438. pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_DEVSTA, &tmp16);
  7439. if (tmp16 & PCI_EXP_DEVSTA_TRPND)
  7440. break;
  7441. pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
  7442. pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
  7443. pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &bridge_cfg2);
  7444. pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &gpu_cfg2);
  7445. tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
  7446. tmp |= LC_SET_QUIESCE;
  7447. WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
  7448. tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
  7449. tmp |= LC_REDO_EQ;
  7450. WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
  7451. mdelay(100);
  7452. /* linkctl */
  7453. pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &tmp16);
  7454. tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
  7455. tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD);
  7456. pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
  7457. pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &tmp16);
  7458. tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
  7459. tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD);
  7460. pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
  7461. /* linkctl2 */
  7462. pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &tmp16);
  7463. tmp16 &= ~((1 << 4) | (7 << 9));
  7464. tmp16 |= (bridge_cfg2 & ((1 << 4) | (7 << 9)));
  7465. pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, tmp16);
  7466. pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
  7467. tmp16 &= ~((1 << 4) | (7 << 9));
  7468. tmp16 |= (gpu_cfg2 & ((1 << 4) | (7 << 9)));
  7469. pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
  7470. tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
  7471. tmp &= ~LC_SET_QUIESCE;
  7472. WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
  7473. }
  7474. }
  7475. }
  7476. /* set the link speed */
  7477. speed_cntl |= LC_FORCE_EN_SW_SPEED_CHANGE | LC_FORCE_DIS_HW_SPEED_CHANGE;
  7478. speed_cntl &= ~LC_FORCE_DIS_SW_SPEED_CHANGE;
  7479. WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
  7480. pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
  7481. tmp16 &= ~0xf;
  7482. if (mask & DRM_PCIE_SPEED_80)
  7483. tmp16 |= 3; /* gen3 */
  7484. else if (mask & DRM_PCIE_SPEED_50)
  7485. tmp16 |= 2; /* gen2 */
  7486. else
  7487. tmp16 |= 1; /* gen1 */
  7488. pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
  7489. speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
  7490. speed_cntl |= LC_INITIATE_LINK_SPEED_CHANGE;
  7491. WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
  7492. for (i = 0; i < rdev->usec_timeout; i++) {
  7493. speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
  7494. if ((speed_cntl & LC_INITIATE_LINK_SPEED_CHANGE) == 0)
  7495. break;
  7496. udelay(1);
  7497. }
  7498. }
  7499. static void cik_program_aspm(struct radeon_device *rdev)
  7500. {
  7501. u32 data, orig;
  7502. bool disable_l0s = false, disable_l1 = false, disable_plloff_in_l1 = false;
  7503. bool disable_clkreq = false;
  7504. if (radeon_aspm == 0)
  7505. return;
  7506. /* XXX double check IGPs */
  7507. if (rdev->flags & RADEON_IS_IGP)
  7508. return;
  7509. if (!(rdev->flags & RADEON_IS_PCIE))
  7510. return;
  7511. orig = data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL);
  7512. data &= ~LC_XMIT_N_FTS_MASK;
  7513. data |= LC_XMIT_N_FTS(0x24) | LC_XMIT_N_FTS_OVERRIDE_EN;
  7514. if (orig != data)
  7515. WREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL, data);
  7516. orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL3);
  7517. data |= LC_GO_TO_RECOVERY;
  7518. if (orig != data)
  7519. WREG32_PCIE_PORT(PCIE_LC_CNTL3, data);
  7520. orig = data = RREG32_PCIE_PORT(PCIE_P_CNTL);
  7521. data |= P_IGNORE_EDB_ERR;
  7522. if (orig != data)
  7523. WREG32_PCIE_PORT(PCIE_P_CNTL, data);
  7524. orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL);
  7525. data &= ~(LC_L0S_INACTIVITY_MASK | LC_L1_INACTIVITY_MASK);
  7526. data |= LC_PMI_TO_L1_DIS;
  7527. if (!disable_l0s)
  7528. data |= LC_L0S_INACTIVITY(7);
  7529. if (!disable_l1) {
  7530. data |= LC_L1_INACTIVITY(7);
  7531. data &= ~LC_PMI_TO_L1_DIS;
  7532. if (orig != data)
  7533. WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
  7534. if (!disable_plloff_in_l1) {
  7535. bool clk_req_support;
  7536. orig = data = RREG32_PCIE_PORT(PB0_PIF_PWRDOWN_0);
  7537. data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
  7538. data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
  7539. if (orig != data)
  7540. WREG32_PCIE_PORT(PB0_PIF_PWRDOWN_0, data);
  7541. orig = data = RREG32_PCIE_PORT(PB0_PIF_PWRDOWN_1);
  7542. data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
  7543. data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
  7544. if (orig != data)
  7545. WREG32_PCIE_PORT(PB0_PIF_PWRDOWN_1, data);
  7546. orig = data = RREG32_PCIE_PORT(PB1_PIF_PWRDOWN_0);
  7547. data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
  7548. data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
  7549. if (orig != data)
  7550. WREG32_PCIE_PORT(PB1_PIF_PWRDOWN_0, data);
  7551. orig = data = RREG32_PCIE_PORT(PB1_PIF_PWRDOWN_1);
  7552. data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
  7553. data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
  7554. if (orig != data)
  7555. WREG32_PCIE_PORT(PB1_PIF_PWRDOWN_1, data);
  7556. orig = data = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
  7557. data &= ~LC_DYN_LANES_PWR_STATE_MASK;
  7558. data |= LC_DYN_LANES_PWR_STATE(3);
  7559. if (orig != data)
  7560. WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, data);
  7561. if (!disable_clkreq) {
  7562. struct pci_dev *root = rdev->pdev->bus->self;
  7563. u32 lnkcap;
  7564. clk_req_support = false;
  7565. pcie_capability_read_dword(root, PCI_EXP_LNKCAP, &lnkcap);
  7566. if (lnkcap & PCI_EXP_LNKCAP_CLKPM)
  7567. clk_req_support = true;
  7568. } else {
  7569. clk_req_support = false;
  7570. }
  7571. if (clk_req_support) {
  7572. orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL2);
  7573. data |= LC_ALLOW_PDWN_IN_L1 | LC_ALLOW_PDWN_IN_L23;
  7574. if (orig != data)
  7575. WREG32_PCIE_PORT(PCIE_LC_CNTL2, data);
  7576. orig = data = RREG32_SMC(THM_CLK_CNTL);
  7577. data &= ~(CMON_CLK_SEL_MASK | TMON_CLK_SEL_MASK);
  7578. data |= CMON_CLK_SEL(1) | TMON_CLK_SEL(1);
  7579. if (orig != data)
  7580. WREG32_SMC(THM_CLK_CNTL, data);
  7581. orig = data = RREG32_SMC(MISC_CLK_CTRL);
  7582. data &= ~(DEEP_SLEEP_CLK_SEL_MASK | ZCLK_SEL_MASK);
  7583. data |= DEEP_SLEEP_CLK_SEL(1) | ZCLK_SEL(1);
  7584. if (orig != data)
  7585. WREG32_SMC(MISC_CLK_CTRL, data);
  7586. orig = data = RREG32_SMC(CG_CLKPIN_CNTL);
  7587. data &= ~BCLK_AS_XCLK;
  7588. if (orig != data)
  7589. WREG32_SMC(CG_CLKPIN_CNTL, data);
  7590. orig = data = RREG32_SMC(CG_CLKPIN_CNTL_2);
  7591. data &= ~FORCE_BIF_REFCLK_EN;
  7592. if (orig != data)
  7593. WREG32_SMC(CG_CLKPIN_CNTL_2, data);
  7594. orig = data = RREG32_SMC(MPLL_BYPASSCLK_SEL);
  7595. data &= ~MPLL_CLKOUT_SEL_MASK;
  7596. data |= MPLL_CLKOUT_SEL(4);
  7597. if (orig != data)
  7598. WREG32_SMC(MPLL_BYPASSCLK_SEL, data);
  7599. }
  7600. }
  7601. } else {
  7602. if (orig != data)
  7603. WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
  7604. }
  7605. orig = data = RREG32_PCIE_PORT(PCIE_CNTL2);
  7606. data |= SLV_MEM_LS_EN | MST_MEM_LS_EN | REPLAY_MEM_LS_EN;
  7607. if (orig != data)
  7608. WREG32_PCIE_PORT(PCIE_CNTL2, data);
  7609. if (!disable_l0s) {
  7610. data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL);
  7611. if((data & LC_N_FTS_MASK) == LC_N_FTS_MASK) {
  7612. data = RREG32_PCIE_PORT(PCIE_LC_STATUS1);
  7613. if ((data & LC_REVERSE_XMIT) && (data & LC_REVERSE_RCVR)) {
  7614. orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL);
  7615. data &= ~LC_L0S_INACTIVITY_MASK;
  7616. if (orig != data)
  7617. WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
  7618. }
  7619. }
  7620. }
  7621. }