cik.c 232 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875687668776878687968806881688268836884688568866887688868896890689168926893689468956896689768986899690069016902690369046905690669076908690969106911691269136914691569166917691869196920692169226923692469256926692769286929693069316932693369346935693669376938693969406941694269436944694569466947694869496950695169526953695469556956695769586959696069616962696369646965696669676968696969706971697269736974697569766977697869796980698169826983698469856986698769886989699069916992699369946995699669976998699970007001700270037004700570067007700870097010701170127013701470157016701770187019702070217022702370247025702670277028702970307031703270337034703570367037703870397040704170427043704470457046704770487049705070517052705370547055705670577058705970607061706270637064706570667067706870697070707170727073707470757076707770787079708070817082708370847085708670877088708970907091709270937094709570967097709870997100710171027103710471057106710771087109711071117112711371147115711671177118711971207121712271237124712571267127712871297130713171327133713471357136713771387139714071417142714371447145714671477148714971507151715271537154715571567157715871597160716171627163716471657166716771687169717071717172717371747175717671777178717971807181718271837184718571867187718871897190719171927193719471957196719771987199720072017202720372047205720672077208720972107211721272137214721572167217721872197220722172227223722472257226722772287229723072317232723372347235723672377238723972407241724272437244724572467247724872497250725172527253725472557256725772587259726072617262726372647265726672677268726972707271727272737274727572767277727872797280728172827283728472857286728772887289729072917292729372947295729672977298729973007301730273037304730573067307730873097310731173127313731473157316731773187319732073217322732373247325732673277328732973307331733273337334733573367337733873397340734173427343734473457346734773487349735073517352735373547355735673577358735973607361736273637364736573667367736873697370737173727373737473757376737773787379738073817382738373847385738673877388738973907391739273937394739573967397739873997400740174027403740474057406740774087409741074117412741374147415741674177418741974207421742274237424742574267427742874297430743174327433743474357436743774387439744074417442744374447445744674477448744974507451745274537454745574567457745874597460746174627463746474657466746774687469747074717472747374747475747674777478747974807481748274837484748574867487748874897490749174927493749474957496749774987499750075017502750375047505750675077508750975107511751275137514751575167517751875197520752175227523752475257526752775287529753075317532753375347535753675377538753975407541754275437544754575467547754875497550755175527553755475557556755775587559756075617562756375647565756675677568756975707571757275737574757575767577757875797580758175827583758475857586758775887589759075917592759375947595759675977598759976007601760276037604760576067607760876097610761176127613761476157616761776187619762076217622762376247625762676277628762976307631763276337634763576367637763876397640764176427643764476457646764776487649765076517652765376547655765676577658765976607661766276637664766576667667766876697670767176727673767476757676767776787679768076817682768376847685768676877688768976907691769276937694769576967697769876997700770177027703770477057706770777087709771077117712771377147715771677177718771977207721772277237724772577267727772877297730773177327733773477357736773777387739774077417742774377447745774677477748774977507751775277537754775577567757775877597760776177627763776477657766776777687769777077717772777377747775777677777778777977807781778277837784778577867787778877897790779177927793779477957796779777987799780078017802780378047805780678077808780978107811781278137814781578167817781878197820782178227823782478257826782778287829783078317832783378347835783678377838783978407841784278437844784578467847784878497850785178527853785478557856785778587859786078617862786378647865786678677868786978707871787278737874787578767877787878797880788178827883788478857886788778887889789078917892789378947895789678977898789979007901790279037904790579067907790879097910791179127913791479157916791779187919792079217922792379247925792679277928792979307931793279337934793579367937793879397940794179427943794479457946794779487949795079517952795379547955795679577958795979607961796279637964796579667967796879697970797179727973797479757976797779787979798079817982798379847985798679877988798979907991799279937994799579967997799879998000800180028003800480058006800780088009801080118012801380148015801680178018801980208021802280238024802580268027802880298030803180328033803480358036803780388039804080418042804380448045804680478048804980508051805280538054805580568057805880598060806180628063806480658066806780688069807080718072807380748075807680778078807980808081808280838084808580868087808880898090809180928093809480958096809780988099810081018102810381048105810681078108810981108111811281138114811581168117811881198120812181228123812481258126812781288129813081318132813381348135813681378138813981408141814281438144814581468147814881498150815181528153815481558156815781588159816081618162816381648165816681678168816981708171817281738174817581768177817881798180818181828183818481858186818781888189819081918192819381948195819681978198819982008201820282038204820582068207820882098210821182128213821482158216821782188219822082218222822382248225822682278228822982308231823282338234823582368237823882398240824182428243824482458246824782488249825082518252825382548255825682578258825982608261826282638264826582668267826882698270827182728273827482758276827782788279828082818282828382848285828682878288828982908291829282938294829582968297829882998300830183028303830483058306830783088309831083118312831383148315831683178318831983208321832283238324832583268327832883298330833183328333833483358336833783388339834083418342834383448345834683478348834983508351835283538354835583568357835883598360836183628363836483658366836783688369837083718372837383748375837683778378837983808381
  1. /*
  2. * Copyright 2012 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. * Authors: Alex Deucher
  23. */
  24. #include <linux/firmware.h>
  25. #include <linux/slab.h>
  26. #include <linux/module.h>
  27. #include "drmP.h"
  28. #include "radeon.h"
  29. #include "radeon_asic.h"
  30. #include "cikd.h"
  31. #include "atom.h"
  32. #include "cik_blit_shaders.h"
  33. #include "radeon_ucode.h"
  34. #include "clearstate_ci.h"
  35. MODULE_FIRMWARE("radeon/BONAIRE_pfp.bin");
  36. MODULE_FIRMWARE("radeon/BONAIRE_me.bin");
  37. MODULE_FIRMWARE("radeon/BONAIRE_ce.bin");
  38. MODULE_FIRMWARE("radeon/BONAIRE_mec.bin");
  39. MODULE_FIRMWARE("radeon/BONAIRE_mc.bin");
  40. MODULE_FIRMWARE("radeon/BONAIRE_rlc.bin");
  41. MODULE_FIRMWARE("radeon/BONAIRE_sdma.bin");
  42. MODULE_FIRMWARE("radeon/BONAIRE_smc.bin");
  43. MODULE_FIRMWARE("radeon/KAVERI_pfp.bin");
  44. MODULE_FIRMWARE("radeon/KAVERI_me.bin");
  45. MODULE_FIRMWARE("radeon/KAVERI_ce.bin");
  46. MODULE_FIRMWARE("radeon/KAVERI_mec.bin");
  47. MODULE_FIRMWARE("radeon/KAVERI_rlc.bin");
  48. MODULE_FIRMWARE("radeon/KAVERI_sdma.bin");
  49. MODULE_FIRMWARE("radeon/KABINI_pfp.bin");
  50. MODULE_FIRMWARE("radeon/KABINI_me.bin");
  51. MODULE_FIRMWARE("radeon/KABINI_ce.bin");
  52. MODULE_FIRMWARE("radeon/KABINI_mec.bin");
  53. MODULE_FIRMWARE("radeon/KABINI_rlc.bin");
  54. MODULE_FIRMWARE("radeon/KABINI_sdma.bin");
  55. extern int r600_ih_ring_alloc(struct radeon_device *rdev);
  56. extern void r600_ih_ring_fini(struct radeon_device *rdev);
  57. extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save);
  58. extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save);
  59. extern bool evergreen_is_display_hung(struct radeon_device *rdev);
  60. extern void sumo_rlc_fini(struct radeon_device *rdev);
  61. extern int sumo_rlc_init(struct radeon_device *rdev);
  62. extern void si_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
  63. extern void si_rlc_reset(struct radeon_device *rdev);
  64. extern void si_init_uvd_internal_cg(struct radeon_device *rdev);
  65. extern int cik_sdma_resume(struct radeon_device *rdev);
  66. extern void cik_sdma_enable(struct radeon_device *rdev, bool enable);
  67. extern void cik_sdma_fini(struct radeon_device *rdev);
  68. extern void cik_sdma_vm_set_page(struct radeon_device *rdev,
  69. struct radeon_ib *ib,
  70. uint64_t pe,
  71. uint64_t addr, unsigned count,
  72. uint32_t incr, uint32_t flags);
  73. static void cik_rlc_stop(struct radeon_device *rdev);
  74. static void cik_pcie_gen3_enable(struct radeon_device *rdev);
  75. static void cik_program_aspm(struct radeon_device *rdev);
  76. static void cik_init_pg(struct radeon_device *rdev);
  77. static void cik_init_cg(struct radeon_device *rdev);
  78. static void cik_enable_gui_idle_interrupt(struct radeon_device *rdev,
  79. bool enable);
  80. /* get temperature in millidegrees */
  81. int ci_get_temp(struct radeon_device *rdev)
  82. {
  83. u32 temp;
  84. int actual_temp = 0;
  85. temp = (RREG32_SMC(CG_MULT_THERMAL_STATUS) & CTF_TEMP_MASK) >>
  86. CTF_TEMP_SHIFT;
  87. if (temp & 0x200)
  88. actual_temp = 255;
  89. else
  90. actual_temp = temp & 0x1ff;
  91. actual_temp = actual_temp * 1000;
  92. return actual_temp;
  93. }
  94. /* get temperature in millidegrees */
  95. int kv_get_temp(struct radeon_device *rdev)
  96. {
  97. u32 temp;
  98. int actual_temp = 0;
  99. temp = RREG32_SMC(0xC0300E0C);
  100. if (temp)
  101. actual_temp = (temp / 8) - 49;
  102. else
  103. actual_temp = 0;
  104. actual_temp = actual_temp * 1000;
  105. return actual_temp;
  106. }
  107. /*
  108. * Indirect registers accessor
  109. */
  110. u32 cik_pciep_rreg(struct radeon_device *rdev, u32 reg)
  111. {
  112. u32 r;
  113. WREG32(PCIE_INDEX, reg);
  114. (void)RREG32(PCIE_INDEX);
  115. r = RREG32(PCIE_DATA);
  116. return r;
  117. }
  118. void cik_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
  119. {
  120. WREG32(PCIE_INDEX, reg);
  121. (void)RREG32(PCIE_INDEX);
  122. WREG32(PCIE_DATA, v);
  123. (void)RREG32(PCIE_DATA);
  124. }
  125. static const u32 spectre_rlc_save_restore_register_list[] =
  126. {
  127. (0x0e00 << 16) | (0xc12c >> 2),
  128. 0x00000000,
  129. (0x0e00 << 16) | (0xc140 >> 2),
  130. 0x00000000,
  131. (0x0e00 << 16) | (0xc150 >> 2),
  132. 0x00000000,
  133. (0x0e00 << 16) | (0xc15c >> 2),
  134. 0x00000000,
  135. (0x0e00 << 16) | (0xc168 >> 2),
  136. 0x00000000,
  137. (0x0e00 << 16) | (0xc170 >> 2),
  138. 0x00000000,
  139. (0x0e00 << 16) | (0xc178 >> 2),
  140. 0x00000000,
  141. (0x0e00 << 16) | (0xc204 >> 2),
  142. 0x00000000,
  143. (0x0e00 << 16) | (0xc2b4 >> 2),
  144. 0x00000000,
  145. (0x0e00 << 16) | (0xc2b8 >> 2),
  146. 0x00000000,
  147. (0x0e00 << 16) | (0xc2bc >> 2),
  148. 0x00000000,
  149. (0x0e00 << 16) | (0xc2c0 >> 2),
  150. 0x00000000,
  151. (0x0e00 << 16) | (0x8228 >> 2),
  152. 0x00000000,
  153. (0x0e00 << 16) | (0x829c >> 2),
  154. 0x00000000,
  155. (0x0e00 << 16) | (0x869c >> 2),
  156. 0x00000000,
  157. (0x0600 << 16) | (0x98f4 >> 2),
  158. 0x00000000,
  159. (0x0e00 << 16) | (0x98f8 >> 2),
  160. 0x00000000,
  161. (0x0e00 << 16) | (0x9900 >> 2),
  162. 0x00000000,
  163. (0x0e00 << 16) | (0xc260 >> 2),
  164. 0x00000000,
  165. (0x0e00 << 16) | (0x90e8 >> 2),
  166. 0x00000000,
  167. (0x0e00 << 16) | (0x3c000 >> 2),
  168. 0x00000000,
  169. (0x0e00 << 16) | (0x3c00c >> 2),
  170. 0x00000000,
  171. (0x0e00 << 16) | (0x8c1c >> 2),
  172. 0x00000000,
  173. (0x0e00 << 16) | (0x9700 >> 2),
  174. 0x00000000,
  175. (0x0e00 << 16) | (0xcd20 >> 2),
  176. 0x00000000,
  177. (0x4e00 << 16) | (0xcd20 >> 2),
  178. 0x00000000,
  179. (0x5e00 << 16) | (0xcd20 >> 2),
  180. 0x00000000,
  181. (0x6e00 << 16) | (0xcd20 >> 2),
  182. 0x00000000,
  183. (0x7e00 << 16) | (0xcd20 >> 2),
  184. 0x00000000,
  185. (0x8e00 << 16) | (0xcd20 >> 2),
  186. 0x00000000,
  187. (0x9e00 << 16) | (0xcd20 >> 2),
  188. 0x00000000,
  189. (0xae00 << 16) | (0xcd20 >> 2),
  190. 0x00000000,
  191. (0xbe00 << 16) | (0xcd20 >> 2),
  192. 0x00000000,
  193. (0x0e00 << 16) | (0x89bc >> 2),
  194. 0x00000000,
  195. (0x0e00 << 16) | (0x8900 >> 2),
  196. 0x00000000,
  197. 0x3,
  198. (0x0e00 << 16) | (0xc130 >> 2),
  199. 0x00000000,
  200. (0x0e00 << 16) | (0xc134 >> 2),
  201. 0x00000000,
  202. (0x0e00 << 16) | (0xc1fc >> 2),
  203. 0x00000000,
  204. (0x0e00 << 16) | (0xc208 >> 2),
  205. 0x00000000,
  206. (0x0e00 << 16) | (0xc264 >> 2),
  207. 0x00000000,
  208. (0x0e00 << 16) | (0xc268 >> 2),
  209. 0x00000000,
  210. (0x0e00 << 16) | (0xc26c >> 2),
  211. 0x00000000,
  212. (0x0e00 << 16) | (0xc270 >> 2),
  213. 0x00000000,
  214. (0x0e00 << 16) | (0xc274 >> 2),
  215. 0x00000000,
  216. (0x0e00 << 16) | (0xc278 >> 2),
  217. 0x00000000,
  218. (0x0e00 << 16) | (0xc27c >> 2),
  219. 0x00000000,
  220. (0x0e00 << 16) | (0xc280 >> 2),
  221. 0x00000000,
  222. (0x0e00 << 16) | (0xc284 >> 2),
  223. 0x00000000,
  224. (0x0e00 << 16) | (0xc288 >> 2),
  225. 0x00000000,
  226. (0x0e00 << 16) | (0xc28c >> 2),
  227. 0x00000000,
  228. (0x0e00 << 16) | (0xc290 >> 2),
  229. 0x00000000,
  230. (0x0e00 << 16) | (0xc294 >> 2),
  231. 0x00000000,
  232. (0x0e00 << 16) | (0xc298 >> 2),
  233. 0x00000000,
  234. (0x0e00 << 16) | (0xc29c >> 2),
  235. 0x00000000,
  236. (0x0e00 << 16) | (0xc2a0 >> 2),
  237. 0x00000000,
  238. (0x0e00 << 16) | (0xc2a4 >> 2),
  239. 0x00000000,
  240. (0x0e00 << 16) | (0xc2a8 >> 2),
  241. 0x00000000,
  242. (0x0e00 << 16) | (0xc2ac >> 2),
  243. 0x00000000,
  244. (0x0e00 << 16) | (0xc2b0 >> 2),
  245. 0x00000000,
  246. (0x0e00 << 16) | (0x301d0 >> 2),
  247. 0x00000000,
  248. (0x0e00 << 16) | (0x30238 >> 2),
  249. 0x00000000,
  250. (0x0e00 << 16) | (0x30250 >> 2),
  251. 0x00000000,
  252. (0x0e00 << 16) | (0x30254 >> 2),
  253. 0x00000000,
  254. (0x0e00 << 16) | (0x30258 >> 2),
  255. 0x00000000,
  256. (0x0e00 << 16) | (0x3025c >> 2),
  257. 0x00000000,
  258. (0x4e00 << 16) | (0xc900 >> 2),
  259. 0x00000000,
  260. (0x5e00 << 16) | (0xc900 >> 2),
  261. 0x00000000,
  262. (0x6e00 << 16) | (0xc900 >> 2),
  263. 0x00000000,
  264. (0x7e00 << 16) | (0xc900 >> 2),
  265. 0x00000000,
  266. (0x8e00 << 16) | (0xc900 >> 2),
  267. 0x00000000,
  268. (0x9e00 << 16) | (0xc900 >> 2),
  269. 0x00000000,
  270. (0xae00 << 16) | (0xc900 >> 2),
  271. 0x00000000,
  272. (0xbe00 << 16) | (0xc900 >> 2),
  273. 0x00000000,
  274. (0x4e00 << 16) | (0xc904 >> 2),
  275. 0x00000000,
  276. (0x5e00 << 16) | (0xc904 >> 2),
  277. 0x00000000,
  278. (0x6e00 << 16) | (0xc904 >> 2),
  279. 0x00000000,
  280. (0x7e00 << 16) | (0xc904 >> 2),
  281. 0x00000000,
  282. (0x8e00 << 16) | (0xc904 >> 2),
  283. 0x00000000,
  284. (0x9e00 << 16) | (0xc904 >> 2),
  285. 0x00000000,
  286. (0xae00 << 16) | (0xc904 >> 2),
  287. 0x00000000,
  288. (0xbe00 << 16) | (0xc904 >> 2),
  289. 0x00000000,
  290. (0x4e00 << 16) | (0xc908 >> 2),
  291. 0x00000000,
  292. (0x5e00 << 16) | (0xc908 >> 2),
  293. 0x00000000,
  294. (0x6e00 << 16) | (0xc908 >> 2),
  295. 0x00000000,
  296. (0x7e00 << 16) | (0xc908 >> 2),
  297. 0x00000000,
  298. (0x8e00 << 16) | (0xc908 >> 2),
  299. 0x00000000,
  300. (0x9e00 << 16) | (0xc908 >> 2),
  301. 0x00000000,
  302. (0xae00 << 16) | (0xc908 >> 2),
  303. 0x00000000,
  304. (0xbe00 << 16) | (0xc908 >> 2),
  305. 0x00000000,
  306. (0x4e00 << 16) | (0xc90c >> 2),
  307. 0x00000000,
  308. (0x5e00 << 16) | (0xc90c >> 2),
  309. 0x00000000,
  310. (0x6e00 << 16) | (0xc90c >> 2),
  311. 0x00000000,
  312. (0x7e00 << 16) | (0xc90c >> 2),
  313. 0x00000000,
  314. (0x8e00 << 16) | (0xc90c >> 2),
  315. 0x00000000,
  316. (0x9e00 << 16) | (0xc90c >> 2),
  317. 0x00000000,
  318. (0xae00 << 16) | (0xc90c >> 2),
  319. 0x00000000,
  320. (0xbe00 << 16) | (0xc90c >> 2),
  321. 0x00000000,
  322. (0x4e00 << 16) | (0xc910 >> 2),
  323. 0x00000000,
  324. (0x5e00 << 16) | (0xc910 >> 2),
  325. 0x00000000,
  326. (0x6e00 << 16) | (0xc910 >> 2),
  327. 0x00000000,
  328. (0x7e00 << 16) | (0xc910 >> 2),
  329. 0x00000000,
  330. (0x8e00 << 16) | (0xc910 >> 2),
  331. 0x00000000,
  332. (0x9e00 << 16) | (0xc910 >> 2),
  333. 0x00000000,
  334. (0xae00 << 16) | (0xc910 >> 2),
  335. 0x00000000,
  336. (0xbe00 << 16) | (0xc910 >> 2),
  337. 0x00000000,
  338. (0x0e00 << 16) | (0xc99c >> 2),
  339. 0x00000000,
  340. (0x0e00 << 16) | (0x9834 >> 2),
  341. 0x00000000,
  342. (0x0000 << 16) | (0x30f00 >> 2),
  343. 0x00000000,
  344. (0x0001 << 16) | (0x30f00 >> 2),
  345. 0x00000000,
  346. (0x0000 << 16) | (0x30f04 >> 2),
  347. 0x00000000,
  348. (0x0001 << 16) | (0x30f04 >> 2),
  349. 0x00000000,
  350. (0x0000 << 16) | (0x30f08 >> 2),
  351. 0x00000000,
  352. (0x0001 << 16) | (0x30f08 >> 2),
  353. 0x00000000,
  354. (0x0000 << 16) | (0x30f0c >> 2),
  355. 0x00000000,
  356. (0x0001 << 16) | (0x30f0c >> 2),
  357. 0x00000000,
  358. (0x0600 << 16) | (0x9b7c >> 2),
  359. 0x00000000,
  360. (0x0e00 << 16) | (0x8a14 >> 2),
  361. 0x00000000,
  362. (0x0e00 << 16) | (0x8a18 >> 2),
  363. 0x00000000,
  364. (0x0600 << 16) | (0x30a00 >> 2),
  365. 0x00000000,
  366. (0x0e00 << 16) | (0x8bf0 >> 2),
  367. 0x00000000,
  368. (0x0e00 << 16) | (0x8bcc >> 2),
  369. 0x00000000,
  370. (0x0e00 << 16) | (0x8b24 >> 2),
  371. 0x00000000,
  372. (0x0e00 << 16) | (0x30a04 >> 2),
  373. 0x00000000,
  374. (0x0600 << 16) | (0x30a10 >> 2),
  375. 0x00000000,
  376. (0x0600 << 16) | (0x30a14 >> 2),
  377. 0x00000000,
  378. (0x0600 << 16) | (0x30a18 >> 2),
  379. 0x00000000,
  380. (0x0600 << 16) | (0x30a2c >> 2),
  381. 0x00000000,
  382. (0x0e00 << 16) | (0xc700 >> 2),
  383. 0x00000000,
  384. (0x0e00 << 16) | (0xc704 >> 2),
  385. 0x00000000,
  386. (0x0e00 << 16) | (0xc708 >> 2),
  387. 0x00000000,
  388. (0x0e00 << 16) | (0xc768 >> 2),
  389. 0x00000000,
  390. (0x0400 << 16) | (0xc770 >> 2),
  391. 0x00000000,
  392. (0x0400 << 16) | (0xc774 >> 2),
  393. 0x00000000,
  394. (0x0400 << 16) | (0xc778 >> 2),
  395. 0x00000000,
  396. (0x0400 << 16) | (0xc77c >> 2),
  397. 0x00000000,
  398. (0x0400 << 16) | (0xc780 >> 2),
  399. 0x00000000,
  400. (0x0400 << 16) | (0xc784 >> 2),
  401. 0x00000000,
  402. (0x0400 << 16) | (0xc788 >> 2),
  403. 0x00000000,
  404. (0x0400 << 16) | (0xc78c >> 2),
  405. 0x00000000,
  406. (0x0400 << 16) | (0xc798 >> 2),
  407. 0x00000000,
  408. (0x0400 << 16) | (0xc79c >> 2),
  409. 0x00000000,
  410. (0x0400 << 16) | (0xc7a0 >> 2),
  411. 0x00000000,
  412. (0x0400 << 16) | (0xc7a4 >> 2),
  413. 0x00000000,
  414. (0x0400 << 16) | (0xc7a8 >> 2),
  415. 0x00000000,
  416. (0x0400 << 16) | (0xc7ac >> 2),
  417. 0x00000000,
  418. (0x0400 << 16) | (0xc7b0 >> 2),
  419. 0x00000000,
  420. (0x0400 << 16) | (0xc7b4 >> 2),
  421. 0x00000000,
  422. (0x0e00 << 16) | (0x9100 >> 2),
  423. 0x00000000,
  424. (0x0e00 << 16) | (0x3c010 >> 2),
  425. 0x00000000,
  426. (0x0e00 << 16) | (0x92a8 >> 2),
  427. 0x00000000,
  428. (0x0e00 << 16) | (0x92ac >> 2),
  429. 0x00000000,
  430. (0x0e00 << 16) | (0x92b4 >> 2),
  431. 0x00000000,
  432. (0x0e00 << 16) | (0x92b8 >> 2),
  433. 0x00000000,
  434. (0x0e00 << 16) | (0x92bc >> 2),
  435. 0x00000000,
  436. (0x0e00 << 16) | (0x92c0 >> 2),
  437. 0x00000000,
  438. (0x0e00 << 16) | (0x92c4 >> 2),
  439. 0x00000000,
  440. (0x0e00 << 16) | (0x92c8 >> 2),
  441. 0x00000000,
  442. (0x0e00 << 16) | (0x92cc >> 2),
  443. 0x00000000,
  444. (0x0e00 << 16) | (0x92d0 >> 2),
  445. 0x00000000,
  446. (0x0e00 << 16) | (0x8c00 >> 2),
  447. 0x00000000,
  448. (0x0e00 << 16) | (0x8c04 >> 2),
  449. 0x00000000,
  450. (0x0e00 << 16) | (0x8c20 >> 2),
  451. 0x00000000,
  452. (0x0e00 << 16) | (0x8c38 >> 2),
  453. 0x00000000,
  454. (0x0e00 << 16) | (0x8c3c >> 2),
  455. 0x00000000,
  456. (0x0e00 << 16) | (0xae00 >> 2),
  457. 0x00000000,
  458. (0x0e00 << 16) | (0x9604 >> 2),
  459. 0x00000000,
  460. (0x0e00 << 16) | (0xac08 >> 2),
  461. 0x00000000,
  462. (0x0e00 << 16) | (0xac0c >> 2),
  463. 0x00000000,
  464. (0x0e00 << 16) | (0xac10 >> 2),
  465. 0x00000000,
  466. (0x0e00 << 16) | (0xac14 >> 2),
  467. 0x00000000,
  468. (0x0e00 << 16) | (0xac58 >> 2),
  469. 0x00000000,
  470. (0x0e00 << 16) | (0xac68 >> 2),
  471. 0x00000000,
  472. (0x0e00 << 16) | (0xac6c >> 2),
  473. 0x00000000,
  474. (0x0e00 << 16) | (0xac70 >> 2),
  475. 0x00000000,
  476. (0x0e00 << 16) | (0xac74 >> 2),
  477. 0x00000000,
  478. (0x0e00 << 16) | (0xac78 >> 2),
  479. 0x00000000,
  480. (0x0e00 << 16) | (0xac7c >> 2),
  481. 0x00000000,
  482. (0x0e00 << 16) | (0xac80 >> 2),
  483. 0x00000000,
  484. (0x0e00 << 16) | (0xac84 >> 2),
  485. 0x00000000,
  486. (0x0e00 << 16) | (0xac88 >> 2),
  487. 0x00000000,
  488. (0x0e00 << 16) | (0xac8c >> 2),
  489. 0x00000000,
  490. (0x0e00 << 16) | (0x970c >> 2),
  491. 0x00000000,
  492. (0x0e00 << 16) | (0x9714 >> 2),
  493. 0x00000000,
  494. (0x0e00 << 16) | (0x9718 >> 2),
  495. 0x00000000,
  496. (0x0e00 << 16) | (0x971c >> 2),
  497. 0x00000000,
  498. (0x0e00 << 16) | (0x31068 >> 2),
  499. 0x00000000,
  500. (0x4e00 << 16) | (0x31068 >> 2),
  501. 0x00000000,
  502. (0x5e00 << 16) | (0x31068 >> 2),
  503. 0x00000000,
  504. (0x6e00 << 16) | (0x31068 >> 2),
  505. 0x00000000,
  506. (0x7e00 << 16) | (0x31068 >> 2),
  507. 0x00000000,
  508. (0x8e00 << 16) | (0x31068 >> 2),
  509. 0x00000000,
  510. (0x9e00 << 16) | (0x31068 >> 2),
  511. 0x00000000,
  512. (0xae00 << 16) | (0x31068 >> 2),
  513. 0x00000000,
  514. (0xbe00 << 16) | (0x31068 >> 2),
  515. 0x00000000,
  516. (0x0e00 << 16) | (0xcd10 >> 2),
  517. 0x00000000,
  518. (0x0e00 << 16) | (0xcd14 >> 2),
  519. 0x00000000,
  520. (0x0e00 << 16) | (0x88b0 >> 2),
  521. 0x00000000,
  522. (0x0e00 << 16) | (0x88b4 >> 2),
  523. 0x00000000,
  524. (0x0e00 << 16) | (0x88b8 >> 2),
  525. 0x00000000,
  526. (0x0e00 << 16) | (0x88bc >> 2),
  527. 0x00000000,
  528. (0x0400 << 16) | (0x89c0 >> 2),
  529. 0x00000000,
  530. (0x0e00 << 16) | (0x88c4 >> 2),
  531. 0x00000000,
  532. (0x0e00 << 16) | (0x88c8 >> 2),
  533. 0x00000000,
  534. (0x0e00 << 16) | (0x88d0 >> 2),
  535. 0x00000000,
  536. (0x0e00 << 16) | (0x88d4 >> 2),
  537. 0x00000000,
  538. (0x0e00 << 16) | (0x88d8 >> 2),
  539. 0x00000000,
  540. (0x0e00 << 16) | (0x8980 >> 2),
  541. 0x00000000,
  542. (0x0e00 << 16) | (0x30938 >> 2),
  543. 0x00000000,
  544. (0x0e00 << 16) | (0x3093c >> 2),
  545. 0x00000000,
  546. (0x0e00 << 16) | (0x30940 >> 2),
  547. 0x00000000,
  548. (0x0e00 << 16) | (0x89a0 >> 2),
  549. 0x00000000,
  550. (0x0e00 << 16) | (0x30900 >> 2),
  551. 0x00000000,
  552. (0x0e00 << 16) | (0x30904 >> 2),
  553. 0x00000000,
  554. (0x0e00 << 16) | (0x89b4 >> 2),
  555. 0x00000000,
  556. (0x0e00 << 16) | (0x3c210 >> 2),
  557. 0x00000000,
  558. (0x0e00 << 16) | (0x3c214 >> 2),
  559. 0x00000000,
  560. (0x0e00 << 16) | (0x3c218 >> 2),
  561. 0x00000000,
  562. (0x0e00 << 16) | (0x8904 >> 2),
  563. 0x00000000,
  564. 0x5,
  565. (0x0e00 << 16) | (0x8c28 >> 2),
  566. (0x0e00 << 16) | (0x8c2c >> 2),
  567. (0x0e00 << 16) | (0x8c30 >> 2),
  568. (0x0e00 << 16) | (0x8c34 >> 2),
  569. (0x0e00 << 16) | (0x9600 >> 2),
  570. };
  571. static const u32 kalindi_rlc_save_restore_register_list[] =
  572. {
  573. (0x0e00 << 16) | (0xc12c >> 2),
  574. 0x00000000,
  575. (0x0e00 << 16) | (0xc140 >> 2),
  576. 0x00000000,
  577. (0x0e00 << 16) | (0xc150 >> 2),
  578. 0x00000000,
  579. (0x0e00 << 16) | (0xc15c >> 2),
  580. 0x00000000,
  581. (0x0e00 << 16) | (0xc168 >> 2),
  582. 0x00000000,
  583. (0x0e00 << 16) | (0xc170 >> 2),
  584. 0x00000000,
  585. (0x0e00 << 16) | (0xc204 >> 2),
  586. 0x00000000,
  587. (0x0e00 << 16) | (0xc2b4 >> 2),
  588. 0x00000000,
  589. (0x0e00 << 16) | (0xc2b8 >> 2),
  590. 0x00000000,
  591. (0x0e00 << 16) | (0xc2bc >> 2),
  592. 0x00000000,
  593. (0x0e00 << 16) | (0xc2c0 >> 2),
  594. 0x00000000,
  595. (0x0e00 << 16) | (0x8228 >> 2),
  596. 0x00000000,
  597. (0x0e00 << 16) | (0x829c >> 2),
  598. 0x00000000,
  599. (0x0e00 << 16) | (0x869c >> 2),
  600. 0x00000000,
  601. (0x0600 << 16) | (0x98f4 >> 2),
  602. 0x00000000,
  603. (0x0e00 << 16) | (0x98f8 >> 2),
  604. 0x00000000,
  605. (0x0e00 << 16) | (0x9900 >> 2),
  606. 0x00000000,
  607. (0x0e00 << 16) | (0xc260 >> 2),
  608. 0x00000000,
  609. (0x0e00 << 16) | (0x90e8 >> 2),
  610. 0x00000000,
  611. (0x0e00 << 16) | (0x3c000 >> 2),
  612. 0x00000000,
  613. (0x0e00 << 16) | (0x3c00c >> 2),
  614. 0x00000000,
  615. (0x0e00 << 16) | (0x8c1c >> 2),
  616. 0x00000000,
  617. (0x0e00 << 16) | (0x9700 >> 2),
  618. 0x00000000,
  619. (0x0e00 << 16) | (0xcd20 >> 2),
  620. 0x00000000,
  621. (0x4e00 << 16) | (0xcd20 >> 2),
  622. 0x00000000,
  623. (0x5e00 << 16) | (0xcd20 >> 2),
  624. 0x00000000,
  625. (0x6e00 << 16) | (0xcd20 >> 2),
  626. 0x00000000,
  627. (0x7e00 << 16) | (0xcd20 >> 2),
  628. 0x00000000,
  629. (0x0e00 << 16) | (0x89bc >> 2),
  630. 0x00000000,
  631. (0x0e00 << 16) | (0x8900 >> 2),
  632. 0x00000000,
  633. 0x3,
  634. (0x0e00 << 16) | (0xc130 >> 2),
  635. 0x00000000,
  636. (0x0e00 << 16) | (0xc134 >> 2),
  637. 0x00000000,
  638. (0x0e00 << 16) | (0xc1fc >> 2),
  639. 0x00000000,
  640. (0x0e00 << 16) | (0xc208 >> 2),
  641. 0x00000000,
  642. (0x0e00 << 16) | (0xc264 >> 2),
  643. 0x00000000,
  644. (0x0e00 << 16) | (0xc268 >> 2),
  645. 0x00000000,
  646. (0x0e00 << 16) | (0xc26c >> 2),
  647. 0x00000000,
  648. (0x0e00 << 16) | (0xc270 >> 2),
  649. 0x00000000,
  650. (0x0e00 << 16) | (0xc274 >> 2),
  651. 0x00000000,
  652. (0x0e00 << 16) | (0xc28c >> 2),
  653. 0x00000000,
  654. (0x0e00 << 16) | (0xc290 >> 2),
  655. 0x00000000,
  656. (0x0e00 << 16) | (0xc294 >> 2),
  657. 0x00000000,
  658. (0x0e00 << 16) | (0xc298 >> 2),
  659. 0x00000000,
  660. (0x0e00 << 16) | (0xc2a0 >> 2),
  661. 0x00000000,
  662. (0x0e00 << 16) | (0xc2a4 >> 2),
  663. 0x00000000,
  664. (0x0e00 << 16) | (0xc2a8 >> 2),
  665. 0x00000000,
  666. (0x0e00 << 16) | (0xc2ac >> 2),
  667. 0x00000000,
  668. (0x0e00 << 16) | (0x301d0 >> 2),
  669. 0x00000000,
  670. (0x0e00 << 16) | (0x30238 >> 2),
  671. 0x00000000,
  672. (0x0e00 << 16) | (0x30250 >> 2),
  673. 0x00000000,
  674. (0x0e00 << 16) | (0x30254 >> 2),
  675. 0x00000000,
  676. (0x0e00 << 16) | (0x30258 >> 2),
  677. 0x00000000,
  678. (0x0e00 << 16) | (0x3025c >> 2),
  679. 0x00000000,
  680. (0x4e00 << 16) | (0xc900 >> 2),
  681. 0x00000000,
  682. (0x5e00 << 16) | (0xc900 >> 2),
  683. 0x00000000,
  684. (0x6e00 << 16) | (0xc900 >> 2),
  685. 0x00000000,
  686. (0x7e00 << 16) | (0xc900 >> 2),
  687. 0x00000000,
  688. (0x4e00 << 16) | (0xc904 >> 2),
  689. 0x00000000,
  690. (0x5e00 << 16) | (0xc904 >> 2),
  691. 0x00000000,
  692. (0x6e00 << 16) | (0xc904 >> 2),
  693. 0x00000000,
  694. (0x7e00 << 16) | (0xc904 >> 2),
  695. 0x00000000,
  696. (0x4e00 << 16) | (0xc908 >> 2),
  697. 0x00000000,
  698. (0x5e00 << 16) | (0xc908 >> 2),
  699. 0x00000000,
  700. (0x6e00 << 16) | (0xc908 >> 2),
  701. 0x00000000,
  702. (0x7e00 << 16) | (0xc908 >> 2),
  703. 0x00000000,
  704. (0x4e00 << 16) | (0xc90c >> 2),
  705. 0x00000000,
  706. (0x5e00 << 16) | (0xc90c >> 2),
  707. 0x00000000,
  708. (0x6e00 << 16) | (0xc90c >> 2),
  709. 0x00000000,
  710. (0x7e00 << 16) | (0xc90c >> 2),
  711. 0x00000000,
  712. (0x4e00 << 16) | (0xc910 >> 2),
  713. 0x00000000,
  714. (0x5e00 << 16) | (0xc910 >> 2),
  715. 0x00000000,
  716. (0x6e00 << 16) | (0xc910 >> 2),
  717. 0x00000000,
  718. (0x7e00 << 16) | (0xc910 >> 2),
  719. 0x00000000,
  720. (0x0e00 << 16) | (0xc99c >> 2),
  721. 0x00000000,
  722. (0x0e00 << 16) | (0x9834 >> 2),
  723. 0x00000000,
  724. (0x0000 << 16) | (0x30f00 >> 2),
  725. 0x00000000,
  726. (0x0000 << 16) | (0x30f04 >> 2),
  727. 0x00000000,
  728. (0x0000 << 16) | (0x30f08 >> 2),
  729. 0x00000000,
  730. (0x0000 << 16) | (0x30f0c >> 2),
  731. 0x00000000,
  732. (0x0600 << 16) | (0x9b7c >> 2),
  733. 0x00000000,
  734. (0x0e00 << 16) | (0x8a14 >> 2),
  735. 0x00000000,
  736. (0x0e00 << 16) | (0x8a18 >> 2),
  737. 0x00000000,
  738. (0x0600 << 16) | (0x30a00 >> 2),
  739. 0x00000000,
  740. (0x0e00 << 16) | (0x8bf0 >> 2),
  741. 0x00000000,
  742. (0x0e00 << 16) | (0x8bcc >> 2),
  743. 0x00000000,
  744. (0x0e00 << 16) | (0x8b24 >> 2),
  745. 0x00000000,
  746. (0x0e00 << 16) | (0x30a04 >> 2),
  747. 0x00000000,
  748. (0x0600 << 16) | (0x30a10 >> 2),
  749. 0x00000000,
  750. (0x0600 << 16) | (0x30a14 >> 2),
  751. 0x00000000,
  752. (0x0600 << 16) | (0x30a18 >> 2),
  753. 0x00000000,
  754. (0x0600 << 16) | (0x30a2c >> 2),
  755. 0x00000000,
  756. (0x0e00 << 16) | (0xc700 >> 2),
  757. 0x00000000,
  758. (0x0e00 << 16) | (0xc704 >> 2),
  759. 0x00000000,
  760. (0x0e00 << 16) | (0xc708 >> 2),
  761. 0x00000000,
  762. (0x0e00 << 16) | (0xc768 >> 2),
  763. 0x00000000,
  764. (0x0400 << 16) | (0xc770 >> 2),
  765. 0x00000000,
  766. (0x0400 << 16) | (0xc774 >> 2),
  767. 0x00000000,
  768. (0x0400 << 16) | (0xc798 >> 2),
  769. 0x00000000,
  770. (0x0400 << 16) | (0xc79c >> 2),
  771. 0x00000000,
  772. (0x0e00 << 16) | (0x9100 >> 2),
  773. 0x00000000,
  774. (0x0e00 << 16) | (0x3c010 >> 2),
  775. 0x00000000,
  776. (0x0e00 << 16) | (0x8c00 >> 2),
  777. 0x00000000,
  778. (0x0e00 << 16) | (0x8c04 >> 2),
  779. 0x00000000,
  780. (0x0e00 << 16) | (0x8c20 >> 2),
  781. 0x00000000,
  782. (0x0e00 << 16) | (0x8c38 >> 2),
  783. 0x00000000,
  784. (0x0e00 << 16) | (0x8c3c >> 2),
  785. 0x00000000,
  786. (0x0e00 << 16) | (0xae00 >> 2),
  787. 0x00000000,
  788. (0x0e00 << 16) | (0x9604 >> 2),
  789. 0x00000000,
  790. (0x0e00 << 16) | (0xac08 >> 2),
  791. 0x00000000,
  792. (0x0e00 << 16) | (0xac0c >> 2),
  793. 0x00000000,
  794. (0x0e00 << 16) | (0xac10 >> 2),
  795. 0x00000000,
  796. (0x0e00 << 16) | (0xac14 >> 2),
  797. 0x00000000,
  798. (0x0e00 << 16) | (0xac58 >> 2),
  799. 0x00000000,
  800. (0x0e00 << 16) | (0xac68 >> 2),
  801. 0x00000000,
  802. (0x0e00 << 16) | (0xac6c >> 2),
  803. 0x00000000,
  804. (0x0e00 << 16) | (0xac70 >> 2),
  805. 0x00000000,
  806. (0x0e00 << 16) | (0xac74 >> 2),
  807. 0x00000000,
  808. (0x0e00 << 16) | (0xac78 >> 2),
  809. 0x00000000,
  810. (0x0e00 << 16) | (0xac7c >> 2),
  811. 0x00000000,
  812. (0x0e00 << 16) | (0xac80 >> 2),
  813. 0x00000000,
  814. (0x0e00 << 16) | (0xac84 >> 2),
  815. 0x00000000,
  816. (0x0e00 << 16) | (0xac88 >> 2),
  817. 0x00000000,
  818. (0x0e00 << 16) | (0xac8c >> 2),
  819. 0x00000000,
  820. (0x0e00 << 16) | (0x970c >> 2),
  821. 0x00000000,
  822. (0x0e00 << 16) | (0x9714 >> 2),
  823. 0x00000000,
  824. (0x0e00 << 16) | (0x9718 >> 2),
  825. 0x00000000,
  826. (0x0e00 << 16) | (0x971c >> 2),
  827. 0x00000000,
  828. (0x0e00 << 16) | (0x31068 >> 2),
  829. 0x00000000,
  830. (0x4e00 << 16) | (0x31068 >> 2),
  831. 0x00000000,
  832. (0x5e00 << 16) | (0x31068 >> 2),
  833. 0x00000000,
  834. (0x6e00 << 16) | (0x31068 >> 2),
  835. 0x00000000,
  836. (0x7e00 << 16) | (0x31068 >> 2),
  837. 0x00000000,
  838. (0x0e00 << 16) | (0xcd10 >> 2),
  839. 0x00000000,
  840. (0x0e00 << 16) | (0xcd14 >> 2),
  841. 0x00000000,
  842. (0x0e00 << 16) | (0x88b0 >> 2),
  843. 0x00000000,
  844. (0x0e00 << 16) | (0x88b4 >> 2),
  845. 0x00000000,
  846. (0x0e00 << 16) | (0x88b8 >> 2),
  847. 0x00000000,
  848. (0x0e00 << 16) | (0x88bc >> 2),
  849. 0x00000000,
  850. (0x0400 << 16) | (0x89c0 >> 2),
  851. 0x00000000,
  852. (0x0e00 << 16) | (0x88c4 >> 2),
  853. 0x00000000,
  854. (0x0e00 << 16) | (0x88c8 >> 2),
  855. 0x00000000,
  856. (0x0e00 << 16) | (0x88d0 >> 2),
  857. 0x00000000,
  858. (0x0e00 << 16) | (0x88d4 >> 2),
  859. 0x00000000,
  860. (0x0e00 << 16) | (0x88d8 >> 2),
  861. 0x00000000,
  862. (0x0e00 << 16) | (0x8980 >> 2),
  863. 0x00000000,
  864. (0x0e00 << 16) | (0x30938 >> 2),
  865. 0x00000000,
  866. (0x0e00 << 16) | (0x3093c >> 2),
  867. 0x00000000,
  868. (0x0e00 << 16) | (0x30940 >> 2),
  869. 0x00000000,
  870. (0x0e00 << 16) | (0x89a0 >> 2),
  871. 0x00000000,
  872. (0x0e00 << 16) | (0x30900 >> 2),
  873. 0x00000000,
  874. (0x0e00 << 16) | (0x30904 >> 2),
  875. 0x00000000,
  876. (0x0e00 << 16) | (0x89b4 >> 2),
  877. 0x00000000,
  878. (0x0e00 << 16) | (0x3e1fc >> 2),
  879. 0x00000000,
  880. (0x0e00 << 16) | (0x3c210 >> 2),
  881. 0x00000000,
  882. (0x0e00 << 16) | (0x3c214 >> 2),
  883. 0x00000000,
  884. (0x0e00 << 16) | (0x3c218 >> 2),
  885. 0x00000000,
  886. (0x0e00 << 16) | (0x8904 >> 2),
  887. 0x00000000,
  888. 0x5,
  889. (0x0e00 << 16) | (0x8c28 >> 2),
  890. (0x0e00 << 16) | (0x8c2c >> 2),
  891. (0x0e00 << 16) | (0x8c30 >> 2),
  892. (0x0e00 << 16) | (0x8c34 >> 2),
  893. (0x0e00 << 16) | (0x9600 >> 2),
  894. };
  895. static const u32 bonaire_golden_spm_registers[] =
  896. {
  897. 0x30800, 0xe0ffffff, 0xe0000000
  898. };
  899. static const u32 bonaire_golden_common_registers[] =
  900. {
  901. 0xc770, 0xffffffff, 0x00000800,
  902. 0xc774, 0xffffffff, 0x00000800,
  903. 0xc798, 0xffffffff, 0x00007fbf,
  904. 0xc79c, 0xffffffff, 0x00007faf
  905. };
  906. static const u32 bonaire_golden_registers[] =
  907. {
  908. 0x3354, 0x00000333, 0x00000333,
  909. 0x3350, 0x000c0fc0, 0x00040200,
  910. 0x9a10, 0x00010000, 0x00058208,
  911. 0x3c000, 0xffff1fff, 0x00140000,
  912. 0x3c200, 0xfdfc0fff, 0x00000100,
  913. 0x3c234, 0x40000000, 0x40000200,
  914. 0x9830, 0xffffffff, 0x00000000,
  915. 0x9834, 0xf00fffff, 0x00000400,
  916. 0x9838, 0x0002021c, 0x00020200,
  917. 0xc78, 0x00000080, 0x00000000,
  918. 0x5bb0, 0x000000f0, 0x00000070,
  919. 0x5bc0, 0xf0311fff, 0x80300000,
  920. 0x98f8, 0x73773777, 0x12010001,
  921. 0x350c, 0x00810000, 0x408af000,
  922. 0x7030, 0x31000111, 0x00000011,
  923. 0x2f48, 0x73773777, 0x12010001,
  924. 0x220c, 0x00007fb6, 0x0021a1b1,
  925. 0x2210, 0x00007fb6, 0x002021b1,
  926. 0x2180, 0x00007fb6, 0x00002191,
  927. 0x2218, 0x00007fb6, 0x002121b1,
  928. 0x221c, 0x00007fb6, 0x002021b1,
  929. 0x21dc, 0x00007fb6, 0x00002191,
  930. 0x21e0, 0x00007fb6, 0x00002191,
  931. 0x3628, 0x0000003f, 0x0000000a,
  932. 0x362c, 0x0000003f, 0x0000000a,
  933. 0x2ae4, 0x00073ffe, 0x000022a2,
  934. 0x240c, 0x000007ff, 0x00000000,
  935. 0x8a14, 0xf000003f, 0x00000007,
  936. 0x8bf0, 0x00002001, 0x00000001,
  937. 0x8b24, 0xffffffff, 0x00ffffff,
  938. 0x30a04, 0x0000ff0f, 0x00000000,
  939. 0x28a4c, 0x07ffffff, 0x06000000,
  940. 0x4d8, 0x00000fff, 0x00000100,
  941. 0x3e78, 0x00000001, 0x00000002,
  942. 0x9100, 0x03000000, 0x0362c688,
  943. 0x8c00, 0x000000ff, 0x00000001,
  944. 0xe40, 0x00001fff, 0x00001fff,
  945. 0x9060, 0x0000007f, 0x00000020,
  946. 0x9508, 0x00010000, 0x00010000,
  947. 0xac14, 0x000003ff, 0x000000f3,
  948. 0xac0c, 0xffffffff, 0x00001032
  949. };
  950. static const u32 bonaire_mgcg_cgcg_init[] =
  951. {
  952. 0xc420, 0xffffffff, 0xfffffffc,
  953. 0x30800, 0xffffffff, 0xe0000000,
  954. 0x3c2a0, 0xffffffff, 0x00000100,
  955. 0x3c208, 0xffffffff, 0x00000100,
  956. 0x3c2c0, 0xffffffff, 0xc0000100,
  957. 0x3c2c8, 0xffffffff, 0xc0000100,
  958. 0x3c2c4, 0xffffffff, 0xc0000100,
  959. 0x55e4, 0xffffffff, 0x00600100,
  960. 0x3c280, 0xffffffff, 0x00000100,
  961. 0x3c214, 0xffffffff, 0x06000100,
  962. 0x3c220, 0xffffffff, 0x00000100,
  963. 0x3c218, 0xffffffff, 0x06000100,
  964. 0x3c204, 0xffffffff, 0x00000100,
  965. 0x3c2e0, 0xffffffff, 0x00000100,
  966. 0x3c224, 0xffffffff, 0x00000100,
  967. 0x3c200, 0xffffffff, 0x00000100,
  968. 0x3c230, 0xffffffff, 0x00000100,
  969. 0x3c234, 0xffffffff, 0x00000100,
  970. 0x3c250, 0xffffffff, 0x00000100,
  971. 0x3c254, 0xffffffff, 0x00000100,
  972. 0x3c258, 0xffffffff, 0x00000100,
  973. 0x3c25c, 0xffffffff, 0x00000100,
  974. 0x3c260, 0xffffffff, 0x00000100,
  975. 0x3c27c, 0xffffffff, 0x00000100,
  976. 0x3c278, 0xffffffff, 0x00000100,
  977. 0x3c210, 0xffffffff, 0x06000100,
  978. 0x3c290, 0xffffffff, 0x00000100,
  979. 0x3c274, 0xffffffff, 0x00000100,
  980. 0x3c2b4, 0xffffffff, 0x00000100,
  981. 0x3c2b0, 0xffffffff, 0x00000100,
  982. 0x3c270, 0xffffffff, 0x00000100,
  983. 0x30800, 0xffffffff, 0xe0000000,
  984. 0x3c020, 0xffffffff, 0x00010000,
  985. 0x3c024, 0xffffffff, 0x00030002,
  986. 0x3c028, 0xffffffff, 0x00040007,
  987. 0x3c02c, 0xffffffff, 0x00060005,
  988. 0x3c030, 0xffffffff, 0x00090008,
  989. 0x3c034, 0xffffffff, 0x00010000,
  990. 0x3c038, 0xffffffff, 0x00030002,
  991. 0x3c03c, 0xffffffff, 0x00040007,
  992. 0x3c040, 0xffffffff, 0x00060005,
  993. 0x3c044, 0xffffffff, 0x00090008,
  994. 0x3c048, 0xffffffff, 0x00010000,
  995. 0x3c04c, 0xffffffff, 0x00030002,
  996. 0x3c050, 0xffffffff, 0x00040007,
  997. 0x3c054, 0xffffffff, 0x00060005,
  998. 0x3c058, 0xffffffff, 0x00090008,
  999. 0x3c05c, 0xffffffff, 0x00010000,
  1000. 0x3c060, 0xffffffff, 0x00030002,
  1001. 0x3c064, 0xffffffff, 0x00040007,
  1002. 0x3c068, 0xffffffff, 0x00060005,
  1003. 0x3c06c, 0xffffffff, 0x00090008,
  1004. 0x3c070, 0xffffffff, 0x00010000,
  1005. 0x3c074, 0xffffffff, 0x00030002,
  1006. 0x3c078, 0xffffffff, 0x00040007,
  1007. 0x3c07c, 0xffffffff, 0x00060005,
  1008. 0x3c080, 0xffffffff, 0x00090008,
  1009. 0x3c084, 0xffffffff, 0x00010000,
  1010. 0x3c088, 0xffffffff, 0x00030002,
  1011. 0x3c08c, 0xffffffff, 0x00040007,
  1012. 0x3c090, 0xffffffff, 0x00060005,
  1013. 0x3c094, 0xffffffff, 0x00090008,
  1014. 0x3c098, 0xffffffff, 0x00010000,
  1015. 0x3c09c, 0xffffffff, 0x00030002,
  1016. 0x3c0a0, 0xffffffff, 0x00040007,
  1017. 0x3c0a4, 0xffffffff, 0x00060005,
  1018. 0x3c0a8, 0xffffffff, 0x00090008,
  1019. 0x3c000, 0xffffffff, 0x96e00200,
  1020. 0x8708, 0xffffffff, 0x00900100,
  1021. 0xc424, 0xffffffff, 0x0020003f,
  1022. 0x38, 0xffffffff, 0x0140001c,
  1023. 0x3c, 0x000f0000, 0x000f0000,
  1024. 0x220, 0xffffffff, 0xC060000C,
  1025. 0x224, 0xc0000fff, 0x00000100,
  1026. 0xf90, 0xffffffff, 0x00000100,
  1027. 0xf98, 0x00000101, 0x00000000,
  1028. 0x20a8, 0xffffffff, 0x00000104,
  1029. 0x55e4, 0xff000fff, 0x00000100,
  1030. 0x30cc, 0xc0000fff, 0x00000104,
  1031. 0xc1e4, 0x00000001, 0x00000001,
  1032. 0xd00c, 0xff000ff0, 0x00000100,
  1033. 0xd80c, 0xff000ff0, 0x00000100
  1034. };
  1035. static const u32 spectre_golden_spm_registers[] =
  1036. {
  1037. 0x30800, 0xe0ffffff, 0xe0000000
  1038. };
  1039. static const u32 spectre_golden_common_registers[] =
  1040. {
  1041. 0xc770, 0xffffffff, 0x00000800,
  1042. 0xc774, 0xffffffff, 0x00000800,
  1043. 0xc798, 0xffffffff, 0x00007fbf,
  1044. 0xc79c, 0xffffffff, 0x00007faf
  1045. };
  1046. static const u32 spectre_golden_registers[] =
  1047. {
  1048. 0x3c000, 0xffff1fff, 0x96940200,
  1049. 0x3c00c, 0xffff0001, 0xff000000,
  1050. 0x3c200, 0xfffc0fff, 0x00000100,
  1051. 0x6ed8, 0x00010101, 0x00010000,
  1052. 0x9834, 0xf00fffff, 0x00000400,
  1053. 0x9838, 0xfffffffc, 0x00020200,
  1054. 0x5bb0, 0x000000f0, 0x00000070,
  1055. 0x5bc0, 0xf0311fff, 0x80300000,
  1056. 0x98f8, 0x73773777, 0x12010001,
  1057. 0x9b7c, 0x00ff0000, 0x00fc0000,
  1058. 0x2f48, 0x73773777, 0x12010001,
  1059. 0x8a14, 0xf000003f, 0x00000007,
  1060. 0x8b24, 0xffffffff, 0x00ffffff,
  1061. 0x28350, 0x3f3f3fff, 0x00000082,
  1062. 0x28355, 0x0000003f, 0x00000000,
  1063. 0x3e78, 0x00000001, 0x00000002,
  1064. 0x913c, 0xffff03df, 0x00000004,
  1065. 0xc768, 0x00000008, 0x00000008,
  1066. 0x8c00, 0x000008ff, 0x00000800,
  1067. 0x9508, 0x00010000, 0x00010000,
  1068. 0xac0c, 0xffffffff, 0x54763210,
  1069. 0x214f8, 0x01ff01ff, 0x00000002,
  1070. 0x21498, 0x007ff800, 0x00200000,
  1071. 0x2015c, 0xffffffff, 0x00000f40,
  1072. 0x30934, 0xffffffff, 0x00000001
  1073. };
  1074. static const u32 spectre_mgcg_cgcg_init[] =
  1075. {
  1076. 0xc420, 0xffffffff, 0xfffffffc,
  1077. 0x30800, 0xffffffff, 0xe0000000,
  1078. 0x3c2a0, 0xffffffff, 0x00000100,
  1079. 0x3c208, 0xffffffff, 0x00000100,
  1080. 0x3c2c0, 0xffffffff, 0x00000100,
  1081. 0x3c2c8, 0xffffffff, 0x00000100,
  1082. 0x3c2c4, 0xffffffff, 0x00000100,
  1083. 0x55e4, 0xffffffff, 0x00600100,
  1084. 0x3c280, 0xffffffff, 0x00000100,
  1085. 0x3c214, 0xffffffff, 0x06000100,
  1086. 0x3c220, 0xffffffff, 0x00000100,
  1087. 0x3c218, 0xffffffff, 0x06000100,
  1088. 0x3c204, 0xffffffff, 0x00000100,
  1089. 0x3c2e0, 0xffffffff, 0x00000100,
  1090. 0x3c224, 0xffffffff, 0x00000100,
  1091. 0x3c200, 0xffffffff, 0x00000100,
  1092. 0x3c230, 0xffffffff, 0x00000100,
  1093. 0x3c234, 0xffffffff, 0x00000100,
  1094. 0x3c250, 0xffffffff, 0x00000100,
  1095. 0x3c254, 0xffffffff, 0x00000100,
  1096. 0x3c258, 0xffffffff, 0x00000100,
  1097. 0x3c25c, 0xffffffff, 0x00000100,
  1098. 0x3c260, 0xffffffff, 0x00000100,
  1099. 0x3c27c, 0xffffffff, 0x00000100,
  1100. 0x3c278, 0xffffffff, 0x00000100,
  1101. 0x3c210, 0xffffffff, 0x06000100,
  1102. 0x3c290, 0xffffffff, 0x00000100,
  1103. 0x3c274, 0xffffffff, 0x00000100,
  1104. 0x3c2b4, 0xffffffff, 0x00000100,
  1105. 0x3c2b0, 0xffffffff, 0x00000100,
  1106. 0x3c270, 0xffffffff, 0x00000100,
  1107. 0x30800, 0xffffffff, 0xe0000000,
  1108. 0x3c020, 0xffffffff, 0x00010000,
  1109. 0x3c024, 0xffffffff, 0x00030002,
  1110. 0x3c028, 0xffffffff, 0x00040007,
  1111. 0x3c02c, 0xffffffff, 0x00060005,
  1112. 0x3c030, 0xffffffff, 0x00090008,
  1113. 0x3c034, 0xffffffff, 0x00010000,
  1114. 0x3c038, 0xffffffff, 0x00030002,
  1115. 0x3c03c, 0xffffffff, 0x00040007,
  1116. 0x3c040, 0xffffffff, 0x00060005,
  1117. 0x3c044, 0xffffffff, 0x00090008,
  1118. 0x3c048, 0xffffffff, 0x00010000,
  1119. 0x3c04c, 0xffffffff, 0x00030002,
  1120. 0x3c050, 0xffffffff, 0x00040007,
  1121. 0x3c054, 0xffffffff, 0x00060005,
  1122. 0x3c058, 0xffffffff, 0x00090008,
  1123. 0x3c05c, 0xffffffff, 0x00010000,
  1124. 0x3c060, 0xffffffff, 0x00030002,
  1125. 0x3c064, 0xffffffff, 0x00040007,
  1126. 0x3c068, 0xffffffff, 0x00060005,
  1127. 0x3c06c, 0xffffffff, 0x00090008,
  1128. 0x3c070, 0xffffffff, 0x00010000,
  1129. 0x3c074, 0xffffffff, 0x00030002,
  1130. 0x3c078, 0xffffffff, 0x00040007,
  1131. 0x3c07c, 0xffffffff, 0x00060005,
  1132. 0x3c080, 0xffffffff, 0x00090008,
  1133. 0x3c084, 0xffffffff, 0x00010000,
  1134. 0x3c088, 0xffffffff, 0x00030002,
  1135. 0x3c08c, 0xffffffff, 0x00040007,
  1136. 0x3c090, 0xffffffff, 0x00060005,
  1137. 0x3c094, 0xffffffff, 0x00090008,
  1138. 0x3c098, 0xffffffff, 0x00010000,
  1139. 0x3c09c, 0xffffffff, 0x00030002,
  1140. 0x3c0a0, 0xffffffff, 0x00040007,
  1141. 0x3c0a4, 0xffffffff, 0x00060005,
  1142. 0x3c0a8, 0xffffffff, 0x00090008,
  1143. 0x3c0ac, 0xffffffff, 0x00010000,
  1144. 0x3c0b0, 0xffffffff, 0x00030002,
  1145. 0x3c0b4, 0xffffffff, 0x00040007,
  1146. 0x3c0b8, 0xffffffff, 0x00060005,
  1147. 0x3c0bc, 0xffffffff, 0x00090008,
  1148. 0x3c000, 0xffffffff, 0x96e00200,
  1149. 0x8708, 0xffffffff, 0x00900100,
  1150. 0xc424, 0xffffffff, 0x0020003f,
  1151. 0x38, 0xffffffff, 0x0140001c,
  1152. 0x3c, 0x000f0000, 0x000f0000,
  1153. 0x220, 0xffffffff, 0xC060000C,
  1154. 0x224, 0xc0000fff, 0x00000100,
  1155. 0xf90, 0xffffffff, 0x00000100,
  1156. 0xf98, 0x00000101, 0x00000000,
  1157. 0x20a8, 0xffffffff, 0x00000104,
  1158. 0x55e4, 0xff000fff, 0x00000100,
  1159. 0x30cc, 0xc0000fff, 0x00000104,
  1160. 0xc1e4, 0x00000001, 0x00000001,
  1161. 0xd00c, 0xff000ff0, 0x00000100,
  1162. 0xd80c, 0xff000ff0, 0x00000100
  1163. };
  1164. static const u32 kalindi_golden_spm_registers[] =
  1165. {
  1166. 0x30800, 0xe0ffffff, 0xe0000000
  1167. };
  1168. static const u32 kalindi_golden_common_registers[] =
  1169. {
  1170. 0xc770, 0xffffffff, 0x00000800,
  1171. 0xc774, 0xffffffff, 0x00000800,
  1172. 0xc798, 0xffffffff, 0x00007fbf,
  1173. 0xc79c, 0xffffffff, 0x00007faf
  1174. };
  1175. static const u32 kalindi_golden_registers[] =
  1176. {
  1177. 0x3c000, 0xffffdfff, 0x6e944040,
  1178. 0x55e4, 0xff607fff, 0xfc000100,
  1179. 0x3c220, 0xff000fff, 0x00000100,
  1180. 0x3c224, 0xff000fff, 0x00000100,
  1181. 0x3c200, 0xfffc0fff, 0x00000100,
  1182. 0x6ed8, 0x00010101, 0x00010000,
  1183. 0x9830, 0xffffffff, 0x00000000,
  1184. 0x9834, 0xf00fffff, 0x00000400,
  1185. 0x5bb0, 0x000000f0, 0x00000070,
  1186. 0x5bc0, 0xf0311fff, 0x80300000,
  1187. 0x98f8, 0x73773777, 0x12010001,
  1188. 0x98fc, 0xffffffff, 0x00000010,
  1189. 0x9b7c, 0x00ff0000, 0x00fc0000,
  1190. 0x8030, 0x00001f0f, 0x0000100a,
  1191. 0x2f48, 0x73773777, 0x12010001,
  1192. 0x2408, 0x000fffff, 0x000c007f,
  1193. 0x8a14, 0xf000003f, 0x00000007,
  1194. 0x8b24, 0x3fff3fff, 0x00ffcfff,
  1195. 0x30a04, 0x0000ff0f, 0x00000000,
  1196. 0x28a4c, 0x07ffffff, 0x06000000,
  1197. 0x4d8, 0x00000fff, 0x00000100,
  1198. 0x3e78, 0x00000001, 0x00000002,
  1199. 0xc768, 0x00000008, 0x00000008,
  1200. 0x8c00, 0x000000ff, 0x00000003,
  1201. 0x214f8, 0x01ff01ff, 0x00000002,
  1202. 0x21498, 0x007ff800, 0x00200000,
  1203. 0x2015c, 0xffffffff, 0x00000f40,
  1204. 0x88c4, 0x001f3ae3, 0x00000082,
  1205. 0x88d4, 0x0000001f, 0x00000010,
  1206. 0x30934, 0xffffffff, 0x00000000
  1207. };
  1208. static const u32 kalindi_mgcg_cgcg_init[] =
  1209. {
  1210. 0xc420, 0xffffffff, 0xfffffffc,
  1211. 0x30800, 0xffffffff, 0xe0000000,
  1212. 0x3c2a0, 0xffffffff, 0x00000100,
  1213. 0x3c208, 0xffffffff, 0x00000100,
  1214. 0x3c2c0, 0xffffffff, 0x00000100,
  1215. 0x3c2c8, 0xffffffff, 0x00000100,
  1216. 0x3c2c4, 0xffffffff, 0x00000100,
  1217. 0x55e4, 0xffffffff, 0x00600100,
  1218. 0x3c280, 0xffffffff, 0x00000100,
  1219. 0x3c214, 0xffffffff, 0x06000100,
  1220. 0x3c220, 0xffffffff, 0x00000100,
  1221. 0x3c218, 0xffffffff, 0x06000100,
  1222. 0x3c204, 0xffffffff, 0x00000100,
  1223. 0x3c2e0, 0xffffffff, 0x00000100,
  1224. 0x3c224, 0xffffffff, 0x00000100,
  1225. 0x3c200, 0xffffffff, 0x00000100,
  1226. 0x3c230, 0xffffffff, 0x00000100,
  1227. 0x3c234, 0xffffffff, 0x00000100,
  1228. 0x3c250, 0xffffffff, 0x00000100,
  1229. 0x3c254, 0xffffffff, 0x00000100,
  1230. 0x3c258, 0xffffffff, 0x00000100,
  1231. 0x3c25c, 0xffffffff, 0x00000100,
  1232. 0x3c260, 0xffffffff, 0x00000100,
  1233. 0x3c27c, 0xffffffff, 0x00000100,
  1234. 0x3c278, 0xffffffff, 0x00000100,
  1235. 0x3c210, 0xffffffff, 0x06000100,
  1236. 0x3c290, 0xffffffff, 0x00000100,
  1237. 0x3c274, 0xffffffff, 0x00000100,
  1238. 0x3c2b4, 0xffffffff, 0x00000100,
  1239. 0x3c2b0, 0xffffffff, 0x00000100,
  1240. 0x3c270, 0xffffffff, 0x00000100,
  1241. 0x30800, 0xffffffff, 0xe0000000,
  1242. 0x3c020, 0xffffffff, 0x00010000,
  1243. 0x3c024, 0xffffffff, 0x00030002,
  1244. 0x3c028, 0xffffffff, 0x00040007,
  1245. 0x3c02c, 0xffffffff, 0x00060005,
  1246. 0x3c030, 0xffffffff, 0x00090008,
  1247. 0x3c034, 0xffffffff, 0x00010000,
  1248. 0x3c038, 0xffffffff, 0x00030002,
  1249. 0x3c03c, 0xffffffff, 0x00040007,
  1250. 0x3c040, 0xffffffff, 0x00060005,
  1251. 0x3c044, 0xffffffff, 0x00090008,
  1252. 0x3c000, 0xffffffff, 0x96e00200,
  1253. 0x8708, 0xffffffff, 0x00900100,
  1254. 0xc424, 0xffffffff, 0x0020003f,
  1255. 0x38, 0xffffffff, 0x0140001c,
  1256. 0x3c, 0x000f0000, 0x000f0000,
  1257. 0x220, 0xffffffff, 0xC060000C,
  1258. 0x224, 0xc0000fff, 0x00000100,
  1259. 0x20a8, 0xffffffff, 0x00000104,
  1260. 0x55e4, 0xff000fff, 0x00000100,
  1261. 0x30cc, 0xc0000fff, 0x00000104,
  1262. 0xc1e4, 0x00000001, 0x00000001,
  1263. 0xd00c, 0xff000ff0, 0x00000100,
  1264. 0xd80c, 0xff000ff0, 0x00000100
  1265. };
  1266. static void cik_init_golden_registers(struct radeon_device *rdev)
  1267. {
  1268. switch (rdev->family) {
  1269. case CHIP_BONAIRE:
  1270. radeon_program_register_sequence(rdev,
  1271. bonaire_mgcg_cgcg_init,
  1272. (const u32)ARRAY_SIZE(bonaire_mgcg_cgcg_init));
  1273. radeon_program_register_sequence(rdev,
  1274. bonaire_golden_registers,
  1275. (const u32)ARRAY_SIZE(bonaire_golden_registers));
  1276. radeon_program_register_sequence(rdev,
  1277. bonaire_golden_common_registers,
  1278. (const u32)ARRAY_SIZE(bonaire_golden_common_registers));
  1279. radeon_program_register_sequence(rdev,
  1280. bonaire_golden_spm_registers,
  1281. (const u32)ARRAY_SIZE(bonaire_golden_spm_registers));
  1282. break;
  1283. case CHIP_KABINI:
  1284. radeon_program_register_sequence(rdev,
  1285. kalindi_mgcg_cgcg_init,
  1286. (const u32)ARRAY_SIZE(kalindi_mgcg_cgcg_init));
  1287. radeon_program_register_sequence(rdev,
  1288. kalindi_golden_registers,
  1289. (const u32)ARRAY_SIZE(kalindi_golden_registers));
  1290. radeon_program_register_sequence(rdev,
  1291. kalindi_golden_common_registers,
  1292. (const u32)ARRAY_SIZE(kalindi_golden_common_registers));
  1293. radeon_program_register_sequence(rdev,
  1294. kalindi_golden_spm_registers,
  1295. (const u32)ARRAY_SIZE(kalindi_golden_spm_registers));
  1296. break;
  1297. case CHIP_KAVERI:
  1298. radeon_program_register_sequence(rdev,
  1299. spectre_mgcg_cgcg_init,
  1300. (const u32)ARRAY_SIZE(spectre_mgcg_cgcg_init));
  1301. radeon_program_register_sequence(rdev,
  1302. spectre_golden_registers,
  1303. (const u32)ARRAY_SIZE(spectre_golden_registers));
  1304. radeon_program_register_sequence(rdev,
  1305. spectre_golden_common_registers,
  1306. (const u32)ARRAY_SIZE(spectre_golden_common_registers));
  1307. radeon_program_register_sequence(rdev,
  1308. spectre_golden_spm_registers,
  1309. (const u32)ARRAY_SIZE(spectre_golden_spm_registers));
  1310. break;
  1311. default:
  1312. break;
  1313. }
  1314. }
  1315. /**
  1316. * cik_get_xclk - get the xclk
  1317. *
  1318. * @rdev: radeon_device pointer
  1319. *
  1320. * Returns the reference clock used by the gfx engine
  1321. * (CIK).
  1322. */
  1323. u32 cik_get_xclk(struct radeon_device *rdev)
  1324. {
  1325. u32 reference_clock = rdev->clock.spll.reference_freq;
  1326. if (rdev->flags & RADEON_IS_IGP) {
  1327. if (RREG32_SMC(GENERAL_PWRMGT) & GPU_COUNTER_CLK)
  1328. return reference_clock / 2;
  1329. } else {
  1330. if (RREG32_SMC(CG_CLKPIN_CNTL) & XTALIN_DIVIDE)
  1331. return reference_clock / 4;
  1332. }
  1333. return reference_clock;
  1334. }
  1335. /**
  1336. * cik_mm_rdoorbell - read a doorbell dword
  1337. *
  1338. * @rdev: radeon_device pointer
  1339. * @offset: byte offset into the aperture
  1340. *
  1341. * Returns the value in the doorbell aperture at the
  1342. * requested offset (CIK).
  1343. */
  1344. u32 cik_mm_rdoorbell(struct radeon_device *rdev, u32 offset)
  1345. {
  1346. if (offset < rdev->doorbell.size) {
  1347. return readl(((void __iomem *)rdev->doorbell.ptr) + offset);
  1348. } else {
  1349. DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", offset);
  1350. return 0;
  1351. }
  1352. }
  1353. /**
  1354. * cik_mm_wdoorbell - write a doorbell dword
  1355. *
  1356. * @rdev: radeon_device pointer
  1357. * @offset: byte offset into the aperture
  1358. * @v: value to write
  1359. *
  1360. * Writes @v to the doorbell aperture at the
  1361. * requested offset (CIK).
  1362. */
  1363. void cik_mm_wdoorbell(struct radeon_device *rdev, u32 offset, u32 v)
  1364. {
  1365. if (offset < rdev->doorbell.size) {
  1366. writel(v, ((void __iomem *)rdev->doorbell.ptr) + offset);
  1367. } else {
  1368. DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", offset);
  1369. }
  1370. }
  1371. #define BONAIRE_IO_MC_REGS_SIZE 36
  1372. static const u32 bonaire_io_mc_regs[BONAIRE_IO_MC_REGS_SIZE][2] =
  1373. {
  1374. {0x00000070, 0x04400000},
  1375. {0x00000071, 0x80c01803},
  1376. {0x00000072, 0x00004004},
  1377. {0x00000073, 0x00000100},
  1378. {0x00000074, 0x00ff0000},
  1379. {0x00000075, 0x34000000},
  1380. {0x00000076, 0x08000014},
  1381. {0x00000077, 0x00cc08ec},
  1382. {0x00000078, 0x00000400},
  1383. {0x00000079, 0x00000000},
  1384. {0x0000007a, 0x04090000},
  1385. {0x0000007c, 0x00000000},
  1386. {0x0000007e, 0x4408a8e8},
  1387. {0x0000007f, 0x00000304},
  1388. {0x00000080, 0x00000000},
  1389. {0x00000082, 0x00000001},
  1390. {0x00000083, 0x00000002},
  1391. {0x00000084, 0xf3e4f400},
  1392. {0x00000085, 0x052024e3},
  1393. {0x00000087, 0x00000000},
  1394. {0x00000088, 0x01000000},
  1395. {0x0000008a, 0x1c0a0000},
  1396. {0x0000008b, 0xff010000},
  1397. {0x0000008d, 0xffffefff},
  1398. {0x0000008e, 0xfff3efff},
  1399. {0x0000008f, 0xfff3efbf},
  1400. {0x00000092, 0xf7ffffff},
  1401. {0x00000093, 0xffffff7f},
  1402. {0x00000095, 0x00101101},
  1403. {0x00000096, 0x00000fff},
  1404. {0x00000097, 0x00116fff},
  1405. {0x00000098, 0x60010000},
  1406. {0x00000099, 0x10010000},
  1407. {0x0000009a, 0x00006000},
  1408. {0x0000009b, 0x00001000},
  1409. {0x0000009f, 0x00b48000}
  1410. };
  1411. /**
  1412. * cik_srbm_select - select specific register instances
  1413. *
  1414. * @rdev: radeon_device pointer
  1415. * @me: selected ME (micro engine)
  1416. * @pipe: pipe
  1417. * @queue: queue
  1418. * @vmid: VMID
  1419. *
  1420. * Switches the currently active registers instances. Some
  1421. * registers are instanced per VMID, others are instanced per
  1422. * me/pipe/queue combination.
  1423. */
  1424. static void cik_srbm_select(struct radeon_device *rdev,
  1425. u32 me, u32 pipe, u32 queue, u32 vmid)
  1426. {
  1427. u32 srbm_gfx_cntl = (PIPEID(pipe & 0x3) |
  1428. MEID(me & 0x3) |
  1429. VMID(vmid & 0xf) |
  1430. QUEUEID(queue & 0x7));
  1431. WREG32(SRBM_GFX_CNTL, srbm_gfx_cntl);
  1432. }
  1433. /* ucode loading */
  1434. /**
  1435. * ci_mc_load_microcode - load MC ucode into the hw
  1436. *
  1437. * @rdev: radeon_device pointer
  1438. *
  1439. * Load the GDDR MC ucode into the hw (CIK).
  1440. * Returns 0 on success, error on failure.
  1441. */
  1442. static int ci_mc_load_microcode(struct radeon_device *rdev)
  1443. {
  1444. const __be32 *fw_data;
  1445. u32 running, blackout = 0;
  1446. u32 *io_mc_regs;
  1447. int i, ucode_size, regs_size;
  1448. if (!rdev->mc_fw)
  1449. return -EINVAL;
  1450. switch (rdev->family) {
  1451. case CHIP_BONAIRE:
  1452. default:
  1453. io_mc_regs = (u32 *)&bonaire_io_mc_regs;
  1454. ucode_size = CIK_MC_UCODE_SIZE;
  1455. regs_size = BONAIRE_IO_MC_REGS_SIZE;
  1456. break;
  1457. }
  1458. running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
  1459. if (running == 0) {
  1460. if (running) {
  1461. blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
  1462. WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);
  1463. }
  1464. /* reset the engine and set to writable */
  1465. WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
  1466. WREG32(MC_SEQ_SUP_CNTL, 0x00000010);
  1467. /* load mc io regs */
  1468. for (i = 0; i < regs_size; i++) {
  1469. WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]);
  1470. WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]);
  1471. }
  1472. /* load the MC ucode */
  1473. fw_data = (const __be32 *)rdev->mc_fw->data;
  1474. for (i = 0; i < ucode_size; i++)
  1475. WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++));
  1476. /* put the engine back into the active state */
  1477. WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
  1478. WREG32(MC_SEQ_SUP_CNTL, 0x00000004);
  1479. WREG32(MC_SEQ_SUP_CNTL, 0x00000001);
  1480. /* wait for training to complete */
  1481. for (i = 0; i < rdev->usec_timeout; i++) {
  1482. if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL) & TRAIN_DONE_D0)
  1483. break;
  1484. udelay(1);
  1485. }
  1486. for (i = 0; i < rdev->usec_timeout; i++) {
  1487. if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL) & TRAIN_DONE_D1)
  1488. break;
  1489. udelay(1);
  1490. }
  1491. if (running)
  1492. WREG32(MC_SHARED_BLACKOUT_CNTL, blackout);
  1493. }
  1494. return 0;
  1495. }
  1496. /**
  1497. * cik_init_microcode - load ucode images from disk
  1498. *
  1499. * @rdev: radeon_device pointer
  1500. *
  1501. * Use the firmware interface to load the ucode images into
  1502. * the driver (not loaded into hw).
  1503. * Returns 0 on success, error on failure.
  1504. */
  1505. static int cik_init_microcode(struct radeon_device *rdev)
  1506. {
  1507. const char *chip_name;
  1508. size_t pfp_req_size, me_req_size, ce_req_size,
  1509. mec_req_size, rlc_req_size, mc_req_size,
  1510. sdma_req_size, smc_req_size;
  1511. char fw_name[30];
  1512. int err;
  1513. DRM_DEBUG("\n");
  1514. switch (rdev->family) {
  1515. case CHIP_BONAIRE:
  1516. chip_name = "BONAIRE";
  1517. pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
  1518. me_req_size = CIK_ME_UCODE_SIZE * 4;
  1519. ce_req_size = CIK_CE_UCODE_SIZE * 4;
  1520. mec_req_size = CIK_MEC_UCODE_SIZE * 4;
  1521. rlc_req_size = BONAIRE_RLC_UCODE_SIZE * 4;
  1522. mc_req_size = CIK_MC_UCODE_SIZE * 4;
  1523. sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
  1524. smc_req_size = ALIGN(BONAIRE_SMC_UCODE_SIZE, 4);
  1525. break;
  1526. case CHIP_KAVERI:
  1527. chip_name = "KAVERI";
  1528. pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
  1529. me_req_size = CIK_ME_UCODE_SIZE * 4;
  1530. ce_req_size = CIK_CE_UCODE_SIZE * 4;
  1531. mec_req_size = CIK_MEC_UCODE_SIZE * 4;
  1532. rlc_req_size = KV_RLC_UCODE_SIZE * 4;
  1533. sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
  1534. break;
  1535. case CHIP_KABINI:
  1536. chip_name = "KABINI";
  1537. pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
  1538. me_req_size = CIK_ME_UCODE_SIZE * 4;
  1539. ce_req_size = CIK_CE_UCODE_SIZE * 4;
  1540. mec_req_size = CIK_MEC_UCODE_SIZE * 4;
  1541. rlc_req_size = KB_RLC_UCODE_SIZE * 4;
  1542. sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
  1543. break;
  1544. default: BUG();
  1545. }
  1546. DRM_INFO("Loading %s Microcode\n", chip_name);
  1547. snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
  1548. err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
  1549. if (err)
  1550. goto out;
  1551. if (rdev->pfp_fw->size != pfp_req_size) {
  1552. printk(KERN_ERR
  1553. "cik_cp: Bogus length %zu in firmware \"%s\"\n",
  1554. rdev->pfp_fw->size, fw_name);
  1555. err = -EINVAL;
  1556. goto out;
  1557. }
  1558. snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
  1559. err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
  1560. if (err)
  1561. goto out;
  1562. if (rdev->me_fw->size != me_req_size) {
  1563. printk(KERN_ERR
  1564. "cik_cp: Bogus length %zu in firmware \"%s\"\n",
  1565. rdev->me_fw->size, fw_name);
  1566. err = -EINVAL;
  1567. }
  1568. snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name);
  1569. err = request_firmware(&rdev->ce_fw, fw_name, rdev->dev);
  1570. if (err)
  1571. goto out;
  1572. if (rdev->ce_fw->size != ce_req_size) {
  1573. printk(KERN_ERR
  1574. "cik_cp: Bogus length %zu in firmware \"%s\"\n",
  1575. rdev->ce_fw->size, fw_name);
  1576. err = -EINVAL;
  1577. }
  1578. snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec.bin", chip_name);
  1579. err = request_firmware(&rdev->mec_fw, fw_name, rdev->dev);
  1580. if (err)
  1581. goto out;
  1582. if (rdev->mec_fw->size != mec_req_size) {
  1583. printk(KERN_ERR
  1584. "cik_cp: Bogus length %zu in firmware \"%s\"\n",
  1585. rdev->mec_fw->size, fw_name);
  1586. err = -EINVAL;
  1587. }
  1588. snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", chip_name);
  1589. err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
  1590. if (err)
  1591. goto out;
  1592. if (rdev->rlc_fw->size != rlc_req_size) {
  1593. printk(KERN_ERR
  1594. "cik_rlc: Bogus length %zu in firmware \"%s\"\n",
  1595. rdev->rlc_fw->size, fw_name);
  1596. err = -EINVAL;
  1597. }
  1598. snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma.bin", chip_name);
  1599. err = request_firmware(&rdev->sdma_fw, fw_name, rdev->dev);
  1600. if (err)
  1601. goto out;
  1602. if (rdev->sdma_fw->size != sdma_req_size) {
  1603. printk(KERN_ERR
  1604. "cik_sdma: Bogus length %zu in firmware \"%s\"\n",
  1605. rdev->sdma_fw->size, fw_name);
  1606. err = -EINVAL;
  1607. }
  1608. /* No SMC, MC ucode on APUs */
  1609. if (!(rdev->flags & RADEON_IS_IGP)) {
  1610. snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
  1611. err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
  1612. if (err)
  1613. goto out;
  1614. if (rdev->mc_fw->size != mc_req_size) {
  1615. printk(KERN_ERR
  1616. "cik_mc: Bogus length %zu in firmware \"%s\"\n",
  1617. rdev->mc_fw->size, fw_name);
  1618. err = -EINVAL;
  1619. }
  1620. snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
  1621. err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
  1622. if (err) {
  1623. printk(KERN_ERR
  1624. "smc: error loading firmware \"%s\"\n",
  1625. fw_name);
  1626. release_firmware(rdev->smc_fw);
  1627. rdev->smc_fw = NULL;
  1628. } else if (rdev->smc_fw->size != smc_req_size) {
  1629. printk(KERN_ERR
  1630. "cik_smc: Bogus length %zu in firmware \"%s\"\n",
  1631. rdev->smc_fw->size, fw_name);
  1632. err = -EINVAL;
  1633. }
  1634. }
  1635. out:
  1636. if (err) {
  1637. if (err != -EINVAL)
  1638. printk(KERN_ERR
  1639. "cik_cp: Failed to load firmware \"%s\"\n",
  1640. fw_name);
  1641. release_firmware(rdev->pfp_fw);
  1642. rdev->pfp_fw = NULL;
  1643. release_firmware(rdev->me_fw);
  1644. rdev->me_fw = NULL;
  1645. release_firmware(rdev->ce_fw);
  1646. rdev->ce_fw = NULL;
  1647. release_firmware(rdev->rlc_fw);
  1648. rdev->rlc_fw = NULL;
  1649. release_firmware(rdev->mc_fw);
  1650. rdev->mc_fw = NULL;
  1651. release_firmware(rdev->smc_fw);
  1652. rdev->smc_fw = NULL;
  1653. }
  1654. return err;
  1655. }
  1656. /*
  1657. * Core functions
  1658. */
  1659. /**
  1660. * cik_tiling_mode_table_init - init the hw tiling table
  1661. *
  1662. * @rdev: radeon_device pointer
  1663. *
  1664. * Starting with SI, the tiling setup is done globally in a
  1665. * set of 32 tiling modes. Rather than selecting each set of
  1666. * parameters per surface as on older asics, we just select
  1667. * which index in the tiling table we want to use, and the
  1668. * surface uses those parameters (CIK).
  1669. */
  1670. static void cik_tiling_mode_table_init(struct radeon_device *rdev)
  1671. {
  1672. const u32 num_tile_mode_states = 32;
  1673. const u32 num_secondary_tile_mode_states = 16;
  1674. u32 reg_offset, gb_tile_moden, split_equal_to_row_size;
  1675. u32 num_pipe_configs;
  1676. u32 num_rbs = rdev->config.cik.max_backends_per_se *
  1677. rdev->config.cik.max_shader_engines;
  1678. switch (rdev->config.cik.mem_row_size_in_kb) {
  1679. case 1:
  1680. split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_1KB;
  1681. break;
  1682. case 2:
  1683. default:
  1684. split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_2KB;
  1685. break;
  1686. case 4:
  1687. split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_4KB;
  1688. break;
  1689. }
  1690. num_pipe_configs = rdev->config.cik.max_tile_pipes;
  1691. if (num_pipe_configs > 8)
  1692. num_pipe_configs = 8; /* ??? */
  1693. if (num_pipe_configs == 8) {
  1694. for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
  1695. switch (reg_offset) {
  1696. case 0:
  1697. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  1698. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  1699. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  1700. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
  1701. break;
  1702. case 1:
  1703. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  1704. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  1705. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  1706. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
  1707. break;
  1708. case 2:
  1709. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  1710. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  1711. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  1712. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
  1713. break;
  1714. case 3:
  1715. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  1716. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  1717. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  1718. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
  1719. break;
  1720. case 4:
  1721. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  1722. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  1723. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  1724. TILE_SPLIT(split_equal_to_row_size));
  1725. break;
  1726. case 5:
  1727. gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  1728. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
  1729. break;
  1730. case 6:
  1731. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  1732. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  1733. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  1734. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
  1735. break;
  1736. case 7:
  1737. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  1738. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  1739. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  1740. TILE_SPLIT(split_equal_to_row_size));
  1741. break;
  1742. case 8:
  1743. gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
  1744. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16));
  1745. break;
  1746. case 9:
  1747. gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  1748. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
  1749. break;
  1750. case 10:
  1751. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  1752. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
  1753. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  1754. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  1755. break;
  1756. case 11:
  1757. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  1758. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
  1759. PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
  1760. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  1761. break;
  1762. case 12:
  1763. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  1764. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
  1765. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  1766. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  1767. break;
  1768. case 13:
  1769. gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  1770. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
  1771. break;
  1772. case 14:
  1773. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  1774. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  1775. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  1776. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  1777. break;
  1778. case 16:
  1779. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  1780. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  1781. PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
  1782. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  1783. break;
  1784. case 17:
  1785. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  1786. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  1787. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  1788. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  1789. break;
  1790. case 27:
  1791. gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  1792. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
  1793. break;
  1794. case 28:
  1795. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  1796. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
  1797. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  1798. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  1799. break;
  1800. case 29:
  1801. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  1802. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
  1803. PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
  1804. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  1805. break;
  1806. case 30:
  1807. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  1808. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
  1809. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  1810. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  1811. break;
  1812. default:
  1813. gb_tile_moden = 0;
  1814. break;
  1815. }
  1816. rdev->config.cik.tile_mode_array[reg_offset] = gb_tile_moden;
  1817. WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
  1818. }
  1819. for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
  1820. switch (reg_offset) {
  1821. case 0:
  1822. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  1823. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
  1824. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  1825. NUM_BANKS(ADDR_SURF_16_BANK));
  1826. break;
  1827. case 1:
  1828. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  1829. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
  1830. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  1831. NUM_BANKS(ADDR_SURF_16_BANK));
  1832. break;
  1833. case 2:
  1834. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  1835. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  1836. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  1837. NUM_BANKS(ADDR_SURF_16_BANK));
  1838. break;
  1839. case 3:
  1840. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  1841. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  1842. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  1843. NUM_BANKS(ADDR_SURF_16_BANK));
  1844. break;
  1845. case 4:
  1846. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  1847. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  1848. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
  1849. NUM_BANKS(ADDR_SURF_8_BANK));
  1850. break;
  1851. case 5:
  1852. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  1853. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  1854. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
  1855. NUM_BANKS(ADDR_SURF_4_BANK));
  1856. break;
  1857. case 6:
  1858. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  1859. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  1860. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
  1861. NUM_BANKS(ADDR_SURF_2_BANK));
  1862. break;
  1863. case 8:
  1864. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  1865. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
  1866. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  1867. NUM_BANKS(ADDR_SURF_16_BANK));
  1868. break;
  1869. case 9:
  1870. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  1871. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
  1872. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  1873. NUM_BANKS(ADDR_SURF_16_BANK));
  1874. break;
  1875. case 10:
  1876. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  1877. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
  1878. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  1879. NUM_BANKS(ADDR_SURF_16_BANK));
  1880. break;
  1881. case 11:
  1882. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  1883. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  1884. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  1885. NUM_BANKS(ADDR_SURF_16_BANK));
  1886. break;
  1887. case 12:
  1888. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  1889. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  1890. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
  1891. NUM_BANKS(ADDR_SURF_8_BANK));
  1892. break;
  1893. case 13:
  1894. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  1895. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  1896. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
  1897. NUM_BANKS(ADDR_SURF_4_BANK));
  1898. break;
  1899. case 14:
  1900. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  1901. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  1902. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
  1903. NUM_BANKS(ADDR_SURF_2_BANK));
  1904. break;
  1905. default:
  1906. gb_tile_moden = 0;
  1907. break;
  1908. }
  1909. WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
  1910. }
  1911. } else if (num_pipe_configs == 4) {
  1912. if (num_rbs == 4) {
  1913. for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
  1914. switch (reg_offset) {
  1915. case 0:
  1916. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  1917. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  1918. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  1919. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
  1920. break;
  1921. case 1:
  1922. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  1923. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  1924. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  1925. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
  1926. break;
  1927. case 2:
  1928. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  1929. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  1930. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  1931. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
  1932. break;
  1933. case 3:
  1934. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  1935. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  1936. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  1937. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
  1938. break;
  1939. case 4:
  1940. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  1941. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  1942. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  1943. TILE_SPLIT(split_equal_to_row_size));
  1944. break;
  1945. case 5:
  1946. gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  1947. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
  1948. break;
  1949. case 6:
  1950. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  1951. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  1952. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  1953. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
  1954. break;
  1955. case 7:
  1956. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  1957. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  1958. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  1959. TILE_SPLIT(split_equal_to_row_size));
  1960. break;
  1961. case 8:
  1962. gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
  1963. PIPE_CONFIG(ADDR_SURF_P4_16x16));
  1964. break;
  1965. case 9:
  1966. gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  1967. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
  1968. break;
  1969. case 10:
  1970. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  1971. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
  1972. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  1973. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  1974. break;
  1975. case 11:
  1976. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  1977. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
  1978. PIPE_CONFIG(ADDR_SURF_P4_8x16) |
  1979. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  1980. break;
  1981. case 12:
  1982. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  1983. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
  1984. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  1985. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  1986. break;
  1987. case 13:
  1988. gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  1989. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
  1990. break;
  1991. case 14:
  1992. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  1993. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  1994. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  1995. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  1996. break;
  1997. case 16:
  1998. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  1999. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  2000. PIPE_CONFIG(ADDR_SURF_P4_8x16) |
  2001. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2002. break;
  2003. case 17:
  2004. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  2005. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  2006. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  2007. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2008. break;
  2009. case 27:
  2010. gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  2011. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
  2012. break;
  2013. case 28:
  2014. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  2015. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
  2016. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  2017. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2018. break;
  2019. case 29:
  2020. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  2021. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
  2022. PIPE_CONFIG(ADDR_SURF_P4_8x16) |
  2023. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2024. break;
  2025. case 30:
  2026. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  2027. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
  2028. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  2029. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2030. break;
  2031. default:
  2032. gb_tile_moden = 0;
  2033. break;
  2034. }
  2035. rdev->config.cik.tile_mode_array[reg_offset] = gb_tile_moden;
  2036. WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
  2037. }
  2038. } else if (num_rbs < 4) {
  2039. for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
  2040. switch (reg_offset) {
  2041. case 0:
  2042. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2043. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2044. PIPE_CONFIG(ADDR_SURF_P4_8x16) |
  2045. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
  2046. break;
  2047. case 1:
  2048. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2049. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2050. PIPE_CONFIG(ADDR_SURF_P4_8x16) |
  2051. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
  2052. break;
  2053. case 2:
  2054. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2055. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2056. PIPE_CONFIG(ADDR_SURF_P4_8x16) |
  2057. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
  2058. break;
  2059. case 3:
  2060. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2061. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2062. PIPE_CONFIG(ADDR_SURF_P4_8x16) |
  2063. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
  2064. break;
  2065. case 4:
  2066. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2067. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2068. PIPE_CONFIG(ADDR_SURF_P4_8x16) |
  2069. TILE_SPLIT(split_equal_to_row_size));
  2070. break;
  2071. case 5:
  2072. gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  2073. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
  2074. break;
  2075. case 6:
  2076. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  2077. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2078. PIPE_CONFIG(ADDR_SURF_P4_8x16) |
  2079. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
  2080. break;
  2081. case 7:
  2082. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  2083. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2084. PIPE_CONFIG(ADDR_SURF_P4_8x16) |
  2085. TILE_SPLIT(split_equal_to_row_size));
  2086. break;
  2087. case 8:
  2088. gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
  2089. PIPE_CONFIG(ADDR_SURF_P4_8x16));
  2090. break;
  2091. case 9:
  2092. gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  2093. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
  2094. break;
  2095. case 10:
  2096. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2097. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
  2098. PIPE_CONFIG(ADDR_SURF_P4_8x16) |
  2099. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2100. break;
  2101. case 11:
  2102. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  2103. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
  2104. PIPE_CONFIG(ADDR_SURF_P4_8x16) |
  2105. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2106. break;
  2107. case 12:
  2108. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  2109. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
  2110. PIPE_CONFIG(ADDR_SURF_P4_8x16) |
  2111. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2112. break;
  2113. case 13:
  2114. gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  2115. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
  2116. break;
  2117. case 14:
  2118. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2119. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  2120. PIPE_CONFIG(ADDR_SURF_P4_8x16) |
  2121. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2122. break;
  2123. case 16:
  2124. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  2125. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  2126. PIPE_CONFIG(ADDR_SURF_P4_8x16) |
  2127. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2128. break;
  2129. case 17:
  2130. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  2131. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  2132. PIPE_CONFIG(ADDR_SURF_P4_8x16) |
  2133. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2134. break;
  2135. case 27:
  2136. gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  2137. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
  2138. break;
  2139. case 28:
  2140. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  2141. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
  2142. PIPE_CONFIG(ADDR_SURF_P4_8x16) |
  2143. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2144. break;
  2145. case 29:
  2146. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  2147. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
  2148. PIPE_CONFIG(ADDR_SURF_P4_8x16) |
  2149. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2150. break;
  2151. case 30:
  2152. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  2153. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
  2154. PIPE_CONFIG(ADDR_SURF_P4_8x16) |
  2155. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2156. break;
  2157. default:
  2158. gb_tile_moden = 0;
  2159. break;
  2160. }
  2161. rdev->config.cik.tile_mode_array[reg_offset] = gb_tile_moden;
  2162. WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
  2163. }
  2164. }
  2165. for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
  2166. switch (reg_offset) {
  2167. case 0:
  2168. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2169. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
  2170. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2171. NUM_BANKS(ADDR_SURF_16_BANK));
  2172. break;
  2173. case 1:
  2174. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2175. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
  2176. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2177. NUM_BANKS(ADDR_SURF_16_BANK));
  2178. break;
  2179. case 2:
  2180. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2181. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2182. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  2183. NUM_BANKS(ADDR_SURF_16_BANK));
  2184. break;
  2185. case 3:
  2186. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2187. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2188. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  2189. NUM_BANKS(ADDR_SURF_16_BANK));
  2190. break;
  2191. case 4:
  2192. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2193. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2194. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  2195. NUM_BANKS(ADDR_SURF_16_BANK));
  2196. break;
  2197. case 5:
  2198. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2199. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2200. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  2201. NUM_BANKS(ADDR_SURF_8_BANK));
  2202. break;
  2203. case 6:
  2204. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2205. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2206. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
  2207. NUM_BANKS(ADDR_SURF_4_BANK));
  2208. break;
  2209. case 8:
  2210. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
  2211. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
  2212. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2213. NUM_BANKS(ADDR_SURF_16_BANK));
  2214. break;
  2215. case 9:
  2216. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
  2217. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
  2218. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2219. NUM_BANKS(ADDR_SURF_16_BANK));
  2220. break;
  2221. case 10:
  2222. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2223. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
  2224. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2225. NUM_BANKS(ADDR_SURF_16_BANK));
  2226. break;
  2227. case 11:
  2228. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2229. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
  2230. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2231. NUM_BANKS(ADDR_SURF_16_BANK));
  2232. break;
  2233. case 12:
  2234. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2235. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2236. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  2237. NUM_BANKS(ADDR_SURF_16_BANK));
  2238. break;
  2239. case 13:
  2240. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2241. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2242. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  2243. NUM_BANKS(ADDR_SURF_8_BANK));
  2244. break;
  2245. case 14:
  2246. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2247. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2248. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
  2249. NUM_BANKS(ADDR_SURF_4_BANK));
  2250. break;
  2251. default:
  2252. gb_tile_moden = 0;
  2253. break;
  2254. }
  2255. WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
  2256. }
  2257. } else if (num_pipe_configs == 2) {
  2258. for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
  2259. switch (reg_offset) {
  2260. case 0:
  2261. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2262. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2263. PIPE_CONFIG(ADDR_SURF_P2) |
  2264. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
  2265. break;
  2266. case 1:
  2267. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2268. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2269. PIPE_CONFIG(ADDR_SURF_P2) |
  2270. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
  2271. break;
  2272. case 2:
  2273. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2274. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2275. PIPE_CONFIG(ADDR_SURF_P2) |
  2276. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
  2277. break;
  2278. case 3:
  2279. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2280. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2281. PIPE_CONFIG(ADDR_SURF_P2) |
  2282. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
  2283. break;
  2284. case 4:
  2285. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2286. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2287. PIPE_CONFIG(ADDR_SURF_P2) |
  2288. TILE_SPLIT(split_equal_to_row_size));
  2289. break;
  2290. case 5:
  2291. gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  2292. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
  2293. break;
  2294. case 6:
  2295. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  2296. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2297. PIPE_CONFIG(ADDR_SURF_P2) |
  2298. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
  2299. break;
  2300. case 7:
  2301. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  2302. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2303. PIPE_CONFIG(ADDR_SURF_P2) |
  2304. TILE_SPLIT(split_equal_to_row_size));
  2305. break;
  2306. case 8:
  2307. gb_tile_moden = ARRAY_MODE(ARRAY_LINEAR_ALIGNED);
  2308. break;
  2309. case 9:
  2310. gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  2311. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
  2312. break;
  2313. case 10:
  2314. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2315. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
  2316. PIPE_CONFIG(ADDR_SURF_P2) |
  2317. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2318. break;
  2319. case 11:
  2320. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  2321. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
  2322. PIPE_CONFIG(ADDR_SURF_P2) |
  2323. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2324. break;
  2325. case 12:
  2326. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  2327. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
  2328. PIPE_CONFIG(ADDR_SURF_P2) |
  2329. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2330. break;
  2331. case 13:
  2332. gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  2333. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
  2334. break;
  2335. case 14:
  2336. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2337. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  2338. PIPE_CONFIG(ADDR_SURF_P2) |
  2339. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2340. break;
  2341. case 16:
  2342. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  2343. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  2344. PIPE_CONFIG(ADDR_SURF_P2) |
  2345. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2346. break;
  2347. case 17:
  2348. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  2349. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  2350. PIPE_CONFIG(ADDR_SURF_P2) |
  2351. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2352. break;
  2353. case 27:
  2354. gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  2355. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
  2356. break;
  2357. case 28:
  2358. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  2359. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
  2360. PIPE_CONFIG(ADDR_SURF_P2) |
  2361. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2362. break;
  2363. case 29:
  2364. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  2365. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
  2366. PIPE_CONFIG(ADDR_SURF_P2) |
  2367. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2368. break;
  2369. case 30:
  2370. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  2371. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
  2372. PIPE_CONFIG(ADDR_SURF_P2) |
  2373. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2374. break;
  2375. default:
  2376. gb_tile_moden = 0;
  2377. break;
  2378. }
  2379. rdev->config.cik.tile_mode_array[reg_offset] = gb_tile_moden;
  2380. WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
  2381. }
  2382. for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
  2383. switch (reg_offset) {
  2384. case 0:
  2385. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
  2386. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
  2387. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2388. NUM_BANKS(ADDR_SURF_16_BANK));
  2389. break;
  2390. case 1:
  2391. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
  2392. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
  2393. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2394. NUM_BANKS(ADDR_SURF_16_BANK));
  2395. break;
  2396. case 2:
  2397. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2398. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
  2399. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2400. NUM_BANKS(ADDR_SURF_16_BANK));
  2401. break;
  2402. case 3:
  2403. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2404. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2405. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2406. NUM_BANKS(ADDR_SURF_16_BANK));
  2407. break;
  2408. case 4:
  2409. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2410. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2411. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2412. NUM_BANKS(ADDR_SURF_16_BANK));
  2413. break;
  2414. case 5:
  2415. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2416. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2417. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2418. NUM_BANKS(ADDR_SURF_16_BANK));
  2419. break;
  2420. case 6:
  2421. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2422. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2423. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  2424. NUM_BANKS(ADDR_SURF_8_BANK));
  2425. break;
  2426. case 8:
  2427. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
  2428. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
  2429. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2430. NUM_BANKS(ADDR_SURF_16_BANK));
  2431. break;
  2432. case 9:
  2433. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
  2434. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
  2435. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2436. NUM_BANKS(ADDR_SURF_16_BANK));
  2437. break;
  2438. case 10:
  2439. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
  2440. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
  2441. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2442. NUM_BANKS(ADDR_SURF_16_BANK));
  2443. break;
  2444. case 11:
  2445. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
  2446. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
  2447. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2448. NUM_BANKS(ADDR_SURF_16_BANK));
  2449. break;
  2450. case 12:
  2451. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2452. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
  2453. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2454. NUM_BANKS(ADDR_SURF_16_BANK));
  2455. break;
  2456. case 13:
  2457. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2458. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2459. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2460. NUM_BANKS(ADDR_SURF_16_BANK));
  2461. break;
  2462. case 14:
  2463. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2464. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2465. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  2466. NUM_BANKS(ADDR_SURF_8_BANK));
  2467. break;
  2468. default:
  2469. gb_tile_moden = 0;
  2470. break;
  2471. }
  2472. WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
  2473. }
  2474. } else
  2475. DRM_ERROR("unknown num pipe config: 0x%x\n", num_pipe_configs);
  2476. }
  2477. /**
  2478. * cik_select_se_sh - select which SE, SH to address
  2479. *
  2480. * @rdev: radeon_device pointer
  2481. * @se_num: shader engine to address
  2482. * @sh_num: sh block to address
  2483. *
  2484. * Select which SE, SH combinations to address. Certain
  2485. * registers are instanced per SE or SH. 0xffffffff means
  2486. * broadcast to all SEs or SHs (CIK).
  2487. */
  2488. static void cik_select_se_sh(struct radeon_device *rdev,
  2489. u32 se_num, u32 sh_num)
  2490. {
  2491. u32 data = INSTANCE_BROADCAST_WRITES;
  2492. if ((se_num == 0xffffffff) && (sh_num == 0xffffffff))
  2493. data |= SH_BROADCAST_WRITES | SE_BROADCAST_WRITES;
  2494. else if (se_num == 0xffffffff)
  2495. data |= SE_BROADCAST_WRITES | SH_INDEX(sh_num);
  2496. else if (sh_num == 0xffffffff)
  2497. data |= SH_BROADCAST_WRITES | SE_INDEX(se_num);
  2498. else
  2499. data |= SH_INDEX(sh_num) | SE_INDEX(se_num);
  2500. WREG32(GRBM_GFX_INDEX, data);
  2501. }
  2502. /**
  2503. * cik_create_bitmask - create a bitmask
  2504. *
  2505. * @bit_width: length of the mask
  2506. *
  2507. * create a variable length bit mask (CIK).
  2508. * Returns the bitmask.
  2509. */
  2510. static u32 cik_create_bitmask(u32 bit_width)
  2511. {
  2512. u32 i, mask = 0;
  2513. for (i = 0; i < bit_width; i++) {
  2514. mask <<= 1;
  2515. mask |= 1;
  2516. }
  2517. return mask;
  2518. }
  2519. /**
  2520. * cik_select_se_sh - select which SE, SH to address
  2521. *
  2522. * @rdev: radeon_device pointer
  2523. * @max_rb_num: max RBs (render backends) for the asic
  2524. * @se_num: number of SEs (shader engines) for the asic
  2525. * @sh_per_se: number of SH blocks per SE for the asic
  2526. *
  2527. * Calculates the bitmask of disabled RBs (CIK).
  2528. * Returns the disabled RB bitmask.
  2529. */
  2530. static u32 cik_get_rb_disabled(struct radeon_device *rdev,
  2531. u32 max_rb_num, u32 se_num,
  2532. u32 sh_per_se)
  2533. {
  2534. u32 data, mask;
  2535. data = RREG32(CC_RB_BACKEND_DISABLE);
  2536. if (data & 1)
  2537. data &= BACKEND_DISABLE_MASK;
  2538. else
  2539. data = 0;
  2540. data |= RREG32(GC_USER_RB_BACKEND_DISABLE);
  2541. data >>= BACKEND_DISABLE_SHIFT;
  2542. mask = cik_create_bitmask(max_rb_num / se_num / sh_per_se);
  2543. return data & mask;
  2544. }
  2545. /**
  2546. * cik_setup_rb - setup the RBs on the asic
  2547. *
  2548. * @rdev: radeon_device pointer
  2549. * @se_num: number of SEs (shader engines) for the asic
  2550. * @sh_per_se: number of SH blocks per SE for the asic
  2551. * @max_rb_num: max RBs (render backends) for the asic
  2552. *
  2553. * Configures per-SE/SH RB registers (CIK).
  2554. */
  2555. static void cik_setup_rb(struct radeon_device *rdev,
  2556. u32 se_num, u32 sh_per_se,
  2557. u32 max_rb_num)
  2558. {
  2559. int i, j;
  2560. u32 data, mask;
  2561. u32 disabled_rbs = 0;
  2562. u32 enabled_rbs = 0;
  2563. for (i = 0; i < se_num; i++) {
  2564. for (j = 0; j < sh_per_se; j++) {
  2565. cik_select_se_sh(rdev, i, j);
  2566. data = cik_get_rb_disabled(rdev, max_rb_num, se_num, sh_per_se);
  2567. disabled_rbs |= data << ((i * sh_per_se + j) * CIK_RB_BITMAP_WIDTH_PER_SH);
  2568. }
  2569. }
  2570. cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
  2571. mask = 1;
  2572. for (i = 0; i < max_rb_num; i++) {
  2573. if (!(disabled_rbs & mask))
  2574. enabled_rbs |= mask;
  2575. mask <<= 1;
  2576. }
  2577. for (i = 0; i < se_num; i++) {
  2578. cik_select_se_sh(rdev, i, 0xffffffff);
  2579. data = 0;
  2580. for (j = 0; j < sh_per_se; j++) {
  2581. switch (enabled_rbs & 3) {
  2582. case 1:
  2583. data |= (RASTER_CONFIG_RB_MAP_0 << (i * sh_per_se + j) * 2);
  2584. break;
  2585. case 2:
  2586. data |= (RASTER_CONFIG_RB_MAP_3 << (i * sh_per_se + j) * 2);
  2587. break;
  2588. case 3:
  2589. default:
  2590. data |= (RASTER_CONFIG_RB_MAP_2 << (i * sh_per_se + j) * 2);
  2591. break;
  2592. }
  2593. enabled_rbs >>= 2;
  2594. }
  2595. WREG32(PA_SC_RASTER_CONFIG, data);
  2596. }
  2597. cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
  2598. }
  2599. /**
  2600. * cik_gpu_init - setup the 3D engine
  2601. *
  2602. * @rdev: radeon_device pointer
  2603. *
  2604. * Configures the 3D engine and tiling configuration
  2605. * registers so that the 3D engine is usable.
  2606. */
  2607. static void cik_gpu_init(struct radeon_device *rdev)
  2608. {
  2609. u32 gb_addr_config = RREG32(GB_ADDR_CONFIG);
  2610. u32 mc_shared_chmap, mc_arb_ramcfg;
  2611. u32 hdp_host_path_cntl;
  2612. u32 tmp;
  2613. int i, j;
  2614. switch (rdev->family) {
  2615. case CHIP_BONAIRE:
  2616. rdev->config.cik.max_shader_engines = 2;
  2617. rdev->config.cik.max_tile_pipes = 4;
  2618. rdev->config.cik.max_cu_per_sh = 7;
  2619. rdev->config.cik.max_sh_per_se = 1;
  2620. rdev->config.cik.max_backends_per_se = 2;
  2621. rdev->config.cik.max_texture_channel_caches = 4;
  2622. rdev->config.cik.max_gprs = 256;
  2623. rdev->config.cik.max_gs_threads = 32;
  2624. rdev->config.cik.max_hw_contexts = 8;
  2625. rdev->config.cik.sc_prim_fifo_size_frontend = 0x20;
  2626. rdev->config.cik.sc_prim_fifo_size_backend = 0x100;
  2627. rdev->config.cik.sc_hiz_tile_fifo_size = 0x30;
  2628. rdev->config.cik.sc_earlyz_tile_fifo_size = 0x130;
  2629. gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
  2630. break;
  2631. case CHIP_KAVERI:
  2632. rdev->config.cik.max_shader_engines = 1;
  2633. rdev->config.cik.max_tile_pipes = 4;
  2634. if ((rdev->pdev->device == 0x1304) ||
  2635. (rdev->pdev->device == 0x1305) ||
  2636. (rdev->pdev->device == 0x130C) ||
  2637. (rdev->pdev->device == 0x130F) ||
  2638. (rdev->pdev->device == 0x1310) ||
  2639. (rdev->pdev->device == 0x1311) ||
  2640. (rdev->pdev->device == 0x131C)) {
  2641. rdev->config.cik.max_cu_per_sh = 8;
  2642. rdev->config.cik.max_backends_per_se = 2;
  2643. } else if ((rdev->pdev->device == 0x1309) ||
  2644. (rdev->pdev->device == 0x130A) ||
  2645. (rdev->pdev->device == 0x130D) ||
  2646. (rdev->pdev->device == 0x1313)) {
  2647. rdev->config.cik.max_cu_per_sh = 6;
  2648. rdev->config.cik.max_backends_per_se = 2;
  2649. } else if ((rdev->pdev->device == 0x1306) ||
  2650. (rdev->pdev->device == 0x1307) ||
  2651. (rdev->pdev->device == 0x130B) ||
  2652. (rdev->pdev->device == 0x130E) ||
  2653. (rdev->pdev->device == 0x1315) ||
  2654. (rdev->pdev->device == 0x131B)) {
  2655. rdev->config.cik.max_cu_per_sh = 4;
  2656. rdev->config.cik.max_backends_per_se = 1;
  2657. } else {
  2658. rdev->config.cik.max_cu_per_sh = 3;
  2659. rdev->config.cik.max_backends_per_se = 1;
  2660. }
  2661. rdev->config.cik.max_sh_per_se = 1;
  2662. rdev->config.cik.max_texture_channel_caches = 4;
  2663. rdev->config.cik.max_gprs = 256;
  2664. rdev->config.cik.max_gs_threads = 16;
  2665. rdev->config.cik.max_hw_contexts = 8;
  2666. rdev->config.cik.sc_prim_fifo_size_frontend = 0x20;
  2667. rdev->config.cik.sc_prim_fifo_size_backend = 0x100;
  2668. rdev->config.cik.sc_hiz_tile_fifo_size = 0x30;
  2669. rdev->config.cik.sc_earlyz_tile_fifo_size = 0x130;
  2670. gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
  2671. break;
  2672. case CHIP_KABINI:
  2673. default:
  2674. rdev->config.cik.max_shader_engines = 1;
  2675. rdev->config.cik.max_tile_pipes = 2;
  2676. rdev->config.cik.max_cu_per_sh = 2;
  2677. rdev->config.cik.max_sh_per_se = 1;
  2678. rdev->config.cik.max_backends_per_se = 1;
  2679. rdev->config.cik.max_texture_channel_caches = 2;
  2680. rdev->config.cik.max_gprs = 256;
  2681. rdev->config.cik.max_gs_threads = 16;
  2682. rdev->config.cik.max_hw_contexts = 8;
  2683. rdev->config.cik.sc_prim_fifo_size_frontend = 0x20;
  2684. rdev->config.cik.sc_prim_fifo_size_backend = 0x100;
  2685. rdev->config.cik.sc_hiz_tile_fifo_size = 0x30;
  2686. rdev->config.cik.sc_earlyz_tile_fifo_size = 0x130;
  2687. gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
  2688. break;
  2689. }
  2690. /* Initialize HDP */
  2691. for (i = 0, j = 0; i < 32; i++, j += 0x18) {
  2692. WREG32((0x2c14 + j), 0x00000000);
  2693. WREG32((0x2c18 + j), 0x00000000);
  2694. WREG32((0x2c1c + j), 0x00000000);
  2695. WREG32((0x2c20 + j), 0x00000000);
  2696. WREG32((0x2c24 + j), 0x00000000);
  2697. }
  2698. WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
  2699. WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN);
  2700. mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
  2701. mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
  2702. rdev->config.cik.num_tile_pipes = rdev->config.cik.max_tile_pipes;
  2703. rdev->config.cik.mem_max_burst_length_bytes = 256;
  2704. tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT;
  2705. rdev->config.cik.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
  2706. if (rdev->config.cik.mem_row_size_in_kb > 4)
  2707. rdev->config.cik.mem_row_size_in_kb = 4;
  2708. /* XXX use MC settings? */
  2709. rdev->config.cik.shader_engine_tile_size = 32;
  2710. rdev->config.cik.num_gpus = 1;
  2711. rdev->config.cik.multi_gpu_tile_size = 64;
  2712. /* fix up row size */
  2713. gb_addr_config &= ~ROW_SIZE_MASK;
  2714. switch (rdev->config.cik.mem_row_size_in_kb) {
  2715. case 1:
  2716. default:
  2717. gb_addr_config |= ROW_SIZE(0);
  2718. break;
  2719. case 2:
  2720. gb_addr_config |= ROW_SIZE(1);
  2721. break;
  2722. case 4:
  2723. gb_addr_config |= ROW_SIZE(2);
  2724. break;
  2725. }
  2726. /* setup tiling info dword. gb_addr_config is not adequate since it does
  2727. * not have bank info, so create a custom tiling dword.
  2728. * bits 3:0 num_pipes
  2729. * bits 7:4 num_banks
  2730. * bits 11:8 group_size
  2731. * bits 15:12 row_size
  2732. */
  2733. rdev->config.cik.tile_config = 0;
  2734. switch (rdev->config.cik.num_tile_pipes) {
  2735. case 1:
  2736. rdev->config.cik.tile_config |= (0 << 0);
  2737. break;
  2738. case 2:
  2739. rdev->config.cik.tile_config |= (1 << 0);
  2740. break;
  2741. case 4:
  2742. rdev->config.cik.tile_config |= (2 << 0);
  2743. break;
  2744. case 8:
  2745. default:
  2746. /* XXX what about 12? */
  2747. rdev->config.cik.tile_config |= (3 << 0);
  2748. break;
  2749. }
  2750. if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT)
  2751. rdev->config.cik.tile_config |= 1 << 4;
  2752. else
  2753. rdev->config.cik.tile_config |= 0 << 4;
  2754. rdev->config.cik.tile_config |=
  2755. ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
  2756. rdev->config.cik.tile_config |=
  2757. ((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12;
  2758. WREG32(GB_ADDR_CONFIG, gb_addr_config);
  2759. WREG32(HDP_ADDR_CONFIG, gb_addr_config);
  2760. WREG32(DMIF_ADDR_CALC, gb_addr_config);
  2761. WREG32(SDMA0_TILING_CONFIG + SDMA0_REGISTER_OFFSET, gb_addr_config & 0x70);
  2762. WREG32(SDMA0_TILING_CONFIG + SDMA1_REGISTER_OFFSET, gb_addr_config & 0x70);
  2763. WREG32(UVD_UDEC_ADDR_CONFIG, gb_addr_config);
  2764. WREG32(UVD_UDEC_DB_ADDR_CONFIG, gb_addr_config);
  2765. WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config);
  2766. cik_tiling_mode_table_init(rdev);
  2767. cik_setup_rb(rdev, rdev->config.cik.max_shader_engines,
  2768. rdev->config.cik.max_sh_per_se,
  2769. rdev->config.cik.max_backends_per_se);
  2770. /* set HW defaults for 3D engine */
  2771. WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60));
  2772. WREG32(SX_DEBUG_1, 0x20);
  2773. WREG32(TA_CNTL_AUX, 0x00010000);
  2774. tmp = RREG32(SPI_CONFIG_CNTL);
  2775. tmp |= 0x03000000;
  2776. WREG32(SPI_CONFIG_CNTL, tmp);
  2777. WREG32(SQ_CONFIG, 1);
  2778. WREG32(DB_DEBUG, 0);
  2779. tmp = RREG32(DB_DEBUG2) & ~0xf00fffff;
  2780. tmp |= 0x00000400;
  2781. WREG32(DB_DEBUG2, tmp);
  2782. tmp = RREG32(DB_DEBUG3) & ~0x0002021c;
  2783. tmp |= 0x00020200;
  2784. WREG32(DB_DEBUG3, tmp);
  2785. tmp = RREG32(CB_HW_CONTROL) & ~0x00010000;
  2786. tmp |= 0x00018208;
  2787. WREG32(CB_HW_CONTROL, tmp);
  2788. WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
  2789. WREG32(PA_SC_FIFO_SIZE, (SC_FRONTEND_PRIM_FIFO_SIZE(rdev->config.cik.sc_prim_fifo_size_frontend) |
  2790. SC_BACKEND_PRIM_FIFO_SIZE(rdev->config.cik.sc_prim_fifo_size_backend) |
  2791. SC_HIZ_TILE_FIFO_SIZE(rdev->config.cik.sc_hiz_tile_fifo_size) |
  2792. SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.cik.sc_earlyz_tile_fifo_size)));
  2793. WREG32(VGT_NUM_INSTANCES, 1);
  2794. WREG32(CP_PERFMON_CNTL, 0);
  2795. WREG32(SQ_CONFIG, 0);
  2796. WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
  2797. FORCE_EOV_MAX_REZ_CNT(255)));
  2798. WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC) |
  2799. AUTO_INVLD_EN(ES_AND_GS_AUTO));
  2800. WREG32(VGT_GS_VERTEX_REUSE, 16);
  2801. WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
  2802. tmp = RREG32(HDP_MISC_CNTL);
  2803. tmp |= HDP_FLUSH_INVALIDATE_CACHE;
  2804. WREG32(HDP_MISC_CNTL, tmp);
  2805. hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
  2806. WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
  2807. WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
  2808. WREG32(PA_SC_ENHANCE, ENABLE_PA_SC_OUT_OF_ORDER);
  2809. udelay(50);
  2810. }
  2811. /*
  2812. * GPU scratch registers helpers function.
  2813. */
  2814. /**
  2815. * cik_scratch_init - setup driver info for CP scratch regs
  2816. *
  2817. * @rdev: radeon_device pointer
  2818. *
  2819. * Set up the number and offset of the CP scratch registers.
  2820. * NOTE: use of CP scratch registers is a legacy inferface and
  2821. * is not used by default on newer asics (r6xx+). On newer asics,
  2822. * memory buffers are used for fences rather than scratch regs.
  2823. */
  2824. static void cik_scratch_init(struct radeon_device *rdev)
  2825. {
  2826. int i;
  2827. rdev->scratch.num_reg = 7;
  2828. rdev->scratch.reg_base = SCRATCH_REG0;
  2829. for (i = 0; i < rdev->scratch.num_reg; i++) {
  2830. rdev->scratch.free[i] = true;
  2831. rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
  2832. }
  2833. }
  2834. /**
  2835. * cik_ring_test - basic gfx ring test
  2836. *
  2837. * @rdev: radeon_device pointer
  2838. * @ring: radeon_ring structure holding ring information
  2839. *
  2840. * Allocate a scratch register and write to it using the gfx ring (CIK).
  2841. * Provides a basic gfx ring test to verify that the ring is working.
  2842. * Used by cik_cp_gfx_resume();
  2843. * Returns 0 on success, error on failure.
  2844. */
  2845. int cik_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
  2846. {
  2847. uint32_t scratch;
  2848. uint32_t tmp = 0;
  2849. unsigned i;
  2850. int r;
  2851. r = radeon_scratch_get(rdev, &scratch);
  2852. if (r) {
  2853. DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
  2854. return r;
  2855. }
  2856. WREG32(scratch, 0xCAFEDEAD);
  2857. r = radeon_ring_lock(rdev, ring, 3);
  2858. if (r) {
  2859. DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n", ring->idx, r);
  2860. radeon_scratch_free(rdev, scratch);
  2861. return r;
  2862. }
  2863. radeon_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
  2864. radeon_ring_write(ring, ((scratch - PACKET3_SET_UCONFIG_REG_START) >> 2));
  2865. radeon_ring_write(ring, 0xDEADBEEF);
  2866. radeon_ring_unlock_commit(rdev, ring);
  2867. for (i = 0; i < rdev->usec_timeout; i++) {
  2868. tmp = RREG32(scratch);
  2869. if (tmp == 0xDEADBEEF)
  2870. break;
  2871. DRM_UDELAY(1);
  2872. }
  2873. if (i < rdev->usec_timeout) {
  2874. DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
  2875. } else {
  2876. DRM_ERROR("radeon: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
  2877. ring->idx, scratch, tmp);
  2878. r = -EINVAL;
  2879. }
  2880. radeon_scratch_free(rdev, scratch);
  2881. return r;
  2882. }
  2883. /**
  2884. * cik_fence_gfx_ring_emit - emit a fence on the gfx ring
  2885. *
  2886. * @rdev: radeon_device pointer
  2887. * @fence: radeon fence object
  2888. *
  2889. * Emits a fence sequnce number on the gfx ring and flushes
  2890. * GPU caches.
  2891. */
  2892. void cik_fence_gfx_ring_emit(struct radeon_device *rdev,
  2893. struct radeon_fence *fence)
  2894. {
  2895. struct radeon_ring *ring = &rdev->ring[fence->ring];
  2896. u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
  2897. /* EVENT_WRITE_EOP - flush caches, send int */
  2898. radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
  2899. radeon_ring_write(ring, (EOP_TCL1_ACTION_EN |
  2900. EOP_TC_ACTION_EN |
  2901. EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
  2902. EVENT_INDEX(5)));
  2903. radeon_ring_write(ring, addr & 0xfffffffc);
  2904. radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | DATA_SEL(1) | INT_SEL(2));
  2905. radeon_ring_write(ring, fence->seq);
  2906. radeon_ring_write(ring, 0);
  2907. /* HDP flush */
  2908. /* We should be using the new WAIT_REG_MEM special op packet here
  2909. * but it causes the CP to hang
  2910. */
  2911. radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
  2912. radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
  2913. WRITE_DATA_DST_SEL(0)));
  2914. radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
  2915. radeon_ring_write(ring, 0);
  2916. radeon_ring_write(ring, 0);
  2917. }
  2918. /**
  2919. * cik_fence_compute_ring_emit - emit a fence on the compute ring
  2920. *
  2921. * @rdev: radeon_device pointer
  2922. * @fence: radeon fence object
  2923. *
  2924. * Emits a fence sequnce number on the compute ring and flushes
  2925. * GPU caches.
  2926. */
  2927. void cik_fence_compute_ring_emit(struct radeon_device *rdev,
  2928. struct radeon_fence *fence)
  2929. {
  2930. struct radeon_ring *ring = &rdev->ring[fence->ring];
  2931. u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
  2932. /* RELEASE_MEM - flush caches, send int */
  2933. radeon_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 5));
  2934. radeon_ring_write(ring, (EOP_TCL1_ACTION_EN |
  2935. EOP_TC_ACTION_EN |
  2936. EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
  2937. EVENT_INDEX(5)));
  2938. radeon_ring_write(ring, DATA_SEL(1) | INT_SEL(2));
  2939. radeon_ring_write(ring, addr & 0xfffffffc);
  2940. radeon_ring_write(ring, upper_32_bits(addr));
  2941. radeon_ring_write(ring, fence->seq);
  2942. radeon_ring_write(ring, 0);
  2943. /* HDP flush */
  2944. /* We should be using the new WAIT_REG_MEM special op packet here
  2945. * but it causes the CP to hang
  2946. */
  2947. radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
  2948. radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
  2949. WRITE_DATA_DST_SEL(0)));
  2950. radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
  2951. radeon_ring_write(ring, 0);
  2952. radeon_ring_write(ring, 0);
  2953. }
  2954. void cik_semaphore_ring_emit(struct radeon_device *rdev,
  2955. struct radeon_ring *ring,
  2956. struct radeon_semaphore *semaphore,
  2957. bool emit_wait)
  2958. {
  2959. uint64_t addr = semaphore->gpu_addr;
  2960. unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL;
  2961. radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
  2962. radeon_ring_write(ring, addr & 0xffffffff);
  2963. radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | sel);
  2964. }
  2965. /*
  2966. * IB stuff
  2967. */
  2968. /**
  2969. * cik_ring_ib_execute - emit an IB (Indirect Buffer) on the gfx ring
  2970. *
  2971. * @rdev: radeon_device pointer
  2972. * @ib: radeon indirect buffer object
  2973. *
  2974. * Emits an DE (drawing engine) or CE (constant engine) IB
  2975. * on the gfx ring. IBs are usually generated by userspace
  2976. * acceleration drivers and submitted to the kernel for
  2977. * sheduling on the ring. This function schedules the IB
  2978. * on the gfx ring for execution by the GPU.
  2979. */
  2980. void cik_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
  2981. {
  2982. struct radeon_ring *ring = &rdev->ring[ib->ring];
  2983. u32 header, control = INDIRECT_BUFFER_VALID;
  2984. if (ib->is_const_ib) {
  2985. /* set switch buffer packet before const IB */
  2986. radeon_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
  2987. radeon_ring_write(ring, 0);
  2988. header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
  2989. } else {
  2990. u32 next_rptr;
  2991. if (ring->rptr_save_reg) {
  2992. next_rptr = ring->wptr + 3 + 4;
  2993. radeon_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
  2994. radeon_ring_write(ring, ((ring->rptr_save_reg -
  2995. PACKET3_SET_UCONFIG_REG_START) >> 2));
  2996. radeon_ring_write(ring, next_rptr);
  2997. } else if (rdev->wb.enabled) {
  2998. next_rptr = ring->wptr + 5 + 4;
  2999. radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
  3000. radeon_ring_write(ring, WRITE_DATA_DST_SEL(1));
  3001. radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
  3002. radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
  3003. radeon_ring_write(ring, next_rptr);
  3004. }
  3005. header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
  3006. }
  3007. control |= ib->length_dw |
  3008. (ib->vm ? (ib->vm->id << 24) : 0);
  3009. radeon_ring_write(ring, header);
  3010. radeon_ring_write(ring,
  3011. #ifdef __BIG_ENDIAN
  3012. (2 << 0) |
  3013. #endif
  3014. (ib->gpu_addr & 0xFFFFFFFC));
  3015. radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
  3016. radeon_ring_write(ring, control);
  3017. }
  3018. /**
  3019. * cik_ib_test - basic gfx ring IB test
  3020. *
  3021. * @rdev: radeon_device pointer
  3022. * @ring: radeon_ring structure holding ring information
  3023. *
  3024. * Allocate an IB and execute it on the gfx ring (CIK).
  3025. * Provides a basic gfx ring test to verify that IBs are working.
  3026. * Returns 0 on success, error on failure.
  3027. */
  3028. int cik_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
  3029. {
  3030. struct radeon_ib ib;
  3031. uint32_t scratch;
  3032. uint32_t tmp = 0;
  3033. unsigned i;
  3034. int r;
  3035. r = radeon_scratch_get(rdev, &scratch);
  3036. if (r) {
  3037. DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
  3038. return r;
  3039. }
  3040. WREG32(scratch, 0xCAFEDEAD);
  3041. r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
  3042. if (r) {
  3043. DRM_ERROR("radeon: failed to get ib (%d).\n", r);
  3044. return r;
  3045. }
  3046. ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
  3047. ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START) >> 2);
  3048. ib.ptr[2] = 0xDEADBEEF;
  3049. ib.length_dw = 3;
  3050. r = radeon_ib_schedule(rdev, &ib, NULL);
  3051. if (r) {
  3052. radeon_scratch_free(rdev, scratch);
  3053. radeon_ib_free(rdev, &ib);
  3054. DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
  3055. return r;
  3056. }
  3057. r = radeon_fence_wait(ib.fence, false);
  3058. if (r) {
  3059. DRM_ERROR("radeon: fence wait failed (%d).\n", r);
  3060. return r;
  3061. }
  3062. for (i = 0; i < rdev->usec_timeout; i++) {
  3063. tmp = RREG32(scratch);
  3064. if (tmp == 0xDEADBEEF)
  3065. break;
  3066. DRM_UDELAY(1);
  3067. }
  3068. if (i < rdev->usec_timeout) {
  3069. DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
  3070. } else {
  3071. DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
  3072. scratch, tmp);
  3073. r = -EINVAL;
  3074. }
  3075. radeon_scratch_free(rdev, scratch);
  3076. radeon_ib_free(rdev, &ib);
  3077. return r;
  3078. }
  3079. /*
  3080. * CP.
  3081. * On CIK, gfx and compute now have independant command processors.
  3082. *
  3083. * GFX
  3084. * Gfx consists of a single ring and can process both gfx jobs and
  3085. * compute jobs. The gfx CP consists of three microengines (ME):
  3086. * PFP - Pre-Fetch Parser
  3087. * ME - Micro Engine
  3088. * CE - Constant Engine
  3089. * The PFP and ME make up what is considered the Drawing Engine (DE).
  3090. * The CE is an asynchronous engine used for updating buffer desciptors
  3091. * used by the DE so that they can be loaded into cache in parallel
  3092. * while the DE is processing state update packets.
  3093. *
  3094. * Compute
  3095. * The compute CP consists of two microengines (ME):
  3096. * MEC1 - Compute MicroEngine 1
  3097. * MEC2 - Compute MicroEngine 2
  3098. * Each MEC supports 4 compute pipes and each pipe supports 8 queues.
  3099. * The queues are exposed to userspace and are programmed directly
  3100. * by the compute runtime.
  3101. */
  3102. /**
  3103. * cik_cp_gfx_enable - enable/disable the gfx CP MEs
  3104. *
  3105. * @rdev: radeon_device pointer
  3106. * @enable: enable or disable the MEs
  3107. *
  3108. * Halts or unhalts the gfx MEs.
  3109. */
  3110. static void cik_cp_gfx_enable(struct radeon_device *rdev, bool enable)
  3111. {
  3112. if (enable)
  3113. WREG32(CP_ME_CNTL, 0);
  3114. else {
  3115. WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT));
  3116. rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
  3117. }
  3118. udelay(50);
  3119. }
  3120. /**
  3121. * cik_cp_gfx_load_microcode - load the gfx CP ME ucode
  3122. *
  3123. * @rdev: radeon_device pointer
  3124. *
  3125. * Loads the gfx PFP, ME, and CE ucode.
  3126. * Returns 0 for success, -EINVAL if the ucode is not available.
  3127. */
  3128. static int cik_cp_gfx_load_microcode(struct radeon_device *rdev)
  3129. {
  3130. const __be32 *fw_data;
  3131. int i;
  3132. if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw)
  3133. return -EINVAL;
  3134. cik_cp_gfx_enable(rdev, false);
  3135. /* PFP */
  3136. fw_data = (const __be32 *)rdev->pfp_fw->data;
  3137. WREG32(CP_PFP_UCODE_ADDR, 0);
  3138. for (i = 0; i < CIK_PFP_UCODE_SIZE; i++)
  3139. WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
  3140. WREG32(CP_PFP_UCODE_ADDR, 0);
  3141. /* CE */
  3142. fw_data = (const __be32 *)rdev->ce_fw->data;
  3143. WREG32(CP_CE_UCODE_ADDR, 0);
  3144. for (i = 0; i < CIK_CE_UCODE_SIZE; i++)
  3145. WREG32(CP_CE_UCODE_DATA, be32_to_cpup(fw_data++));
  3146. WREG32(CP_CE_UCODE_ADDR, 0);
  3147. /* ME */
  3148. fw_data = (const __be32 *)rdev->me_fw->data;
  3149. WREG32(CP_ME_RAM_WADDR, 0);
  3150. for (i = 0; i < CIK_ME_UCODE_SIZE; i++)
  3151. WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
  3152. WREG32(CP_ME_RAM_WADDR, 0);
  3153. WREG32(CP_PFP_UCODE_ADDR, 0);
  3154. WREG32(CP_CE_UCODE_ADDR, 0);
  3155. WREG32(CP_ME_RAM_WADDR, 0);
  3156. WREG32(CP_ME_RAM_RADDR, 0);
  3157. return 0;
  3158. }
  3159. /**
  3160. * cik_cp_gfx_start - start the gfx ring
  3161. *
  3162. * @rdev: radeon_device pointer
  3163. *
  3164. * Enables the ring and loads the clear state context and other
  3165. * packets required to init the ring.
  3166. * Returns 0 for success, error for failure.
  3167. */
  3168. static int cik_cp_gfx_start(struct radeon_device *rdev)
  3169. {
  3170. struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
  3171. int r, i;
  3172. /* init the CP */
  3173. WREG32(CP_MAX_CONTEXT, rdev->config.cik.max_hw_contexts - 1);
  3174. WREG32(CP_ENDIAN_SWAP, 0);
  3175. WREG32(CP_DEVICE_ID, 1);
  3176. cik_cp_gfx_enable(rdev, true);
  3177. r = radeon_ring_lock(rdev, ring, cik_default_size + 17);
  3178. if (r) {
  3179. DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
  3180. return r;
  3181. }
  3182. /* init the CE partitions. CE only used for gfx on CIK */
  3183. radeon_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
  3184. radeon_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
  3185. radeon_ring_write(ring, 0xc000);
  3186. radeon_ring_write(ring, 0xc000);
  3187. /* setup clear context state */
  3188. radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
  3189. radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
  3190. radeon_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
  3191. radeon_ring_write(ring, 0x80000000);
  3192. radeon_ring_write(ring, 0x80000000);
  3193. for (i = 0; i < cik_default_size; i++)
  3194. radeon_ring_write(ring, cik_default_state[i]);
  3195. radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
  3196. radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
  3197. /* set clear context state */
  3198. radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
  3199. radeon_ring_write(ring, 0);
  3200. radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
  3201. radeon_ring_write(ring, 0x00000316);
  3202. radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
  3203. radeon_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */
  3204. radeon_ring_unlock_commit(rdev, ring);
  3205. return 0;
  3206. }
  3207. /**
  3208. * cik_cp_gfx_fini - stop the gfx ring
  3209. *
  3210. * @rdev: radeon_device pointer
  3211. *
  3212. * Stop the gfx ring and tear down the driver ring
  3213. * info.
  3214. */
  3215. static void cik_cp_gfx_fini(struct radeon_device *rdev)
  3216. {
  3217. cik_cp_gfx_enable(rdev, false);
  3218. radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
  3219. }
  3220. /**
  3221. * cik_cp_gfx_resume - setup the gfx ring buffer registers
  3222. *
  3223. * @rdev: radeon_device pointer
  3224. *
  3225. * Program the location and size of the gfx ring buffer
  3226. * and test it to make sure it's working.
  3227. * Returns 0 for success, error for failure.
  3228. */
  3229. static int cik_cp_gfx_resume(struct radeon_device *rdev)
  3230. {
  3231. struct radeon_ring *ring;
  3232. u32 tmp;
  3233. u32 rb_bufsz;
  3234. u64 rb_addr;
  3235. int r;
  3236. WREG32(CP_SEM_WAIT_TIMER, 0x0);
  3237. WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
  3238. /* Set the write pointer delay */
  3239. WREG32(CP_RB_WPTR_DELAY, 0);
  3240. /* set the RB to use vmid 0 */
  3241. WREG32(CP_RB_VMID, 0);
  3242. WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
  3243. /* ring 0 - compute and gfx */
  3244. /* Set ring buffer size */
  3245. ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
  3246. rb_bufsz = order_base_2(ring->ring_size / 8);
  3247. tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
  3248. #ifdef __BIG_ENDIAN
  3249. tmp |= BUF_SWAP_32BIT;
  3250. #endif
  3251. WREG32(CP_RB0_CNTL, tmp);
  3252. /* Initialize the ring buffer's read and write pointers */
  3253. WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA);
  3254. ring->wptr = 0;
  3255. WREG32(CP_RB0_WPTR, ring->wptr);
  3256. /* set the wb address wether it's enabled or not */
  3257. WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC);
  3258. WREG32(CP_RB0_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
  3259. /* scratch register shadowing is no longer supported */
  3260. WREG32(SCRATCH_UMSK, 0);
  3261. if (!rdev->wb.enabled)
  3262. tmp |= RB_NO_UPDATE;
  3263. mdelay(1);
  3264. WREG32(CP_RB0_CNTL, tmp);
  3265. rb_addr = ring->gpu_addr >> 8;
  3266. WREG32(CP_RB0_BASE, rb_addr);
  3267. WREG32(CP_RB0_BASE_HI, upper_32_bits(rb_addr));
  3268. ring->rptr = RREG32(CP_RB0_RPTR);
  3269. /* start the ring */
  3270. cik_cp_gfx_start(rdev);
  3271. rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true;
  3272. r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
  3273. if (r) {
  3274. rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
  3275. return r;
  3276. }
  3277. return 0;
  3278. }
  3279. u32 cik_compute_ring_get_rptr(struct radeon_device *rdev,
  3280. struct radeon_ring *ring)
  3281. {
  3282. u32 rptr;
  3283. if (rdev->wb.enabled) {
  3284. rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]);
  3285. } else {
  3286. mutex_lock(&rdev->srbm_mutex);
  3287. cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0);
  3288. rptr = RREG32(CP_HQD_PQ_RPTR);
  3289. cik_srbm_select(rdev, 0, 0, 0, 0);
  3290. mutex_unlock(&rdev->srbm_mutex);
  3291. }
  3292. return rptr;
  3293. }
  3294. u32 cik_compute_ring_get_wptr(struct radeon_device *rdev,
  3295. struct radeon_ring *ring)
  3296. {
  3297. u32 wptr;
  3298. if (rdev->wb.enabled) {
  3299. wptr = le32_to_cpu(rdev->wb.wb[ring->wptr_offs/4]);
  3300. } else {
  3301. mutex_lock(&rdev->srbm_mutex);
  3302. cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0);
  3303. wptr = RREG32(CP_HQD_PQ_WPTR);
  3304. cik_srbm_select(rdev, 0, 0, 0, 0);
  3305. mutex_unlock(&rdev->srbm_mutex);
  3306. }
  3307. return wptr;
  3308. }
  3309. void cik_compute_ring_set_wptr(struct radeon_device *rdev,
  3310. struct radeon_ring *ring)
  3311. {
  3312. rdev->wb.wb[ring->wptr_offs/4] = cpu_to_le32(ring->wptr);
  3313. WDOORBELL32(ring->doorbell_offset, ring->wptr);
  3314. }
  3315. /**
  3316. * cik_cp_compute_enable - enable/disable the compute CP MEs
  3317. *
  3318. * @rdev: radeon_device pointer
  3319. * @enable: enable or disable the MEs
  3320. *
  3321. * Halts or unhalts the compute MEs.
  3322. */
  3323. static void cik_cp_compute_enable(struct radeon_device *rdev, bool enable)
  3324. {
  3325. if (enable)
  3326. WREG32(CP_MEC_CNTL, 0);
  3327. else
  3328. WREG32(CP_MEC_CNTL, (MEC_ME1_HALT | MEC_ME2_HALT));
  3329. udelay(50);
  3330. }
  3331. /**
  3332. * cik_cp_compute_load_microcode - load the compute CP ME ucode
  3333. *
  3334. * @rdev: radeon_device pointer
  3335. *
  3336. * Loads the compute MEC1&2 ucode.
  3337. * Returns 0 for success, -EINVAL if the ucode is not available.
  3338. */
  3339. static int cik_cp_compute_load_microcode(struct radeon_device *rdev)
  3340. {
  3341. const __be32 *fw_data;
  3342. int i;
  3343. if (!rdev->mec_fw)
  3344. return -EINVAL;
  3345. cik_cp_compute_enable(rdev, false);
  3346. /* MEC1 */
  3347. fw_data = (const __be32 *)rdev->mec_fw->data;
  3348. WREG32(CP_MEC_ME1_UCODE_ADDR, 0);
  3349. for (i = 0; i < CIK_MEC_UCODE_SIZE; i++)
  3350. WREG32(CP_MEC_ME1_UCODE_DATA, be32_to_cpup(fw_data++));
  3351. WREG32(CP_MEC_ME1_UCODE_ADDR, 0);
  3352. if (rdev->family == CHIP_KAVERI) {
  3353. /* MEC2 */
  3354. fw_data = (const __be32 *)rdev->mec_fw->data;
  3355. WREG32(CP_MEC_ME2_UCODE_ADDR, 0);
  3356. for (i = 0; i < CIK_MEC_UCODE_SIZE; i++)
  3357. WREG32(CP_MEC_ME2_UCODE_DATA, be32_to_cpup(fw_data++));
  3358. WREG32(CP_MEC_ME2_UCODE_ADDR, 0);
  3359. }
  3360. return 0;
  3361. }
  3362. /**
  3363. * cik_cp_compute_start - start the compute queues
  3364. *
  3365. * @rdev: radeon_device pointer
  3366. *
  3367. * Enable the compute queues.
  3368. * Returns 0 for success, error for failure.
  3369. */
  3370. static int cik_cp_compute_start(struct radeon_device *rdev)
  3371. {
  3372. cik_cp_compute_enable(rdev, true);
  3373. return 0;
  3374. }
  3375. /**
  3376. * cik_cp_compute_fini - stop the compute queues
  3377. *
  3378. * @rdev: radeon_device pointer
  3379. *
  3380. * Stop the compute queues and tear down the driver queue
  3381. * info.
  3382. */
  3383. static void cik_cp_compute_fini(struct radeon_device *rdev)
  3384. {
  3385. int i, idx, r;
  3386. cik_cp_compute_enable(rdev, false);
  3387. for (i = 0; i < 2; i++) {
  3388. if (i == 0)
  3389. idx = CAYMAN_RING_TYPE_CP1_INDEX;
  3390. else
  3391. idx = CAYMAN_RING_TYPE_CP2_INDEX;
  3392. if (rdev->ring[idx].mqd_obj) {
  3393. r = radeon_bo_reserve(rdev->ring[idx].mqd_obj, false);
  3394. if (unlikely(r != 0))
  3395. dev_warn(rdev->dev, "(%d) reserve MQD bo failed\n", r);
  3396. radeon_bo_unpin(rdev->ring[idx].mqd_obj);
  3397. radeon_bo_unreserve(rdev->ring[idx].mqd_obj);
  3398. radeon_bo_unref(&rdev->ring[idx].mqd_obj);
  3399. rdev->ring[idx].mqd_obj = NULL;
  3400. }
  3401. }
  3402. }
  3403. static void cik_mec_fini(struct radeon_device *rdev)
  3404. {
  3405. int r;
  3406. if (rdev->mec.hpd_eop_obj) {
  3407. r = radeon_bo_reserve(rdev->mec.hpd_eop_obj, false);
  3408. if (unlikely(r != 0))
  3409. dev_warn(rdev->dev, "(%d) reserve HPD EOP bo failed\n", r);
  3410. radeon_bo_unpin(rdev->mec.hpd_eop_obj);
  3411. radeon_bo_unreserve(rdev->mec.hpd_eop_obj);
  3412. radeon_bo_unref(&rdev->mec.hpd_eop_obj);
  3413. rdev->mec.hpd_eop_obj = NULL;
  3414. }
  3415. }
  3416. #define MEC_HPD_SIZE 2048
  3417. static int cik_mec_init(struct radeon_device *rdev)
  3418. {
  3419. int r;
  3420. u32 *hpd;
  3421. /*
  3422. * KV: 2 MEC, 4 Pipes/MEC, 8 Queues/Pipe - 64 Queues total
  3423. * CI/KB: 1 MEC, 4 Pipes/MEC, 8 Queues/Pipe - 32 Queues total
  3424. */
  3425. if (rdev->family == CHIP_KAVERI)
  3426. rdev->mec.num_mec = 2;
  3427. else
  3428. rdev->mec.num_mec = 1;
  3429. rdev->mec.num_pipe = 4;
  3430. rdev->mec.num_queue = rdev->mec.num_mec * rdev->mec.num_pipe * 8;
  3431. if (rdev->mec.hpd_eop_obj == NULL) {
  3432. r = radeon_bo_create(rdev,
  3433. rdev->mec.num_mec *rdev->mec.num_pipe * MEC_HPD_SIZE * 2,
  3434. PAGE_SIZE, true,
  3435. RADEON_GEM_DOMAIN_GTT, NULL,
  3436. &rdev->mec.hpd_eop_obj);
  3437. if (r) {
  3438. dev_warn(rdev->dev, "(%d) create HDP EOP bo failed\n", r);
  3439. return r;
  3440. }
  3441. }
  3442. r = radeon_bo_reserve(rdev->mec.hpd_eop_obj, false);
  3443. if (unlikely(r != 0)) {
  3444. cik_mec_fini(rdev);
  3445. return r;
  3446. }
  3447. r = radeon_bo_pin(rdev->mec.hpd_eop_obj, RADEON_GEM_DOMAIN_GTT,
  3448. &rdev->mec.hpd_eop_gpu_addr);
  3449. if (r) {
  3450. dev_warn(rdev->dev, "(%d) pin HDP EOP bo failed\n", r);
  3451. cik_mec_fini(rdev);
  3452. return r;
  3453. }
  3454. r = radeon_bo_kmap(rdev->mec.hpd_eop_obj, (void **)&hpd);
  3455. if (r) {
  3456. dev_warn(rdev->dev, "(%d) map HDP EOP bo failed\n", r);
  3457. cik_mec_fini(rdev);
  3458. return r;
  3459. }
  3460. /* clear memory. Not sure if this is required or not */
  3461. memset(hpd, 0, rdev->mec.num_mec *rdev->mec.num_pipe * MEC_HPD_SIZE * 2);
  3462. radeon_bo_kunmap(rdev->mec.hpd_eop_obj);
  3463. radeon_bo_unreserve(rdev->mec.hpd_eop_obj);
  3464. return 0;
  3465. }
  3466. struct hqd_registers
  3467. {
  3468. u32 cp_mqd_base_addr;
  3469. u32 cp_mqd_base_addr_hi;
  3470. u32 cp_hqd_active;
  3471. u32 cp_hqd_vmid;
  3472. u32 cp_hqd_persistent_state;
  3473. u32 cp_hqd_pipe_priority;
  3474. u32 cp_hqd_queue_priority;
  3475. u32 cp_hqd_quantum;
  3476. u32 cp_hqd_pq_base;
  3477. u32 cp_hqd_pq_base_hi;
  3478. u32 cp_hqd_pq_rptr;
  3479. u32 cp_hqd_pq_rptr_report_addr;
  3480. u32 cp_hqd_pq_rptr_report_addr_hi;
  3481. u32 cp_hqd_pq_wptr_poll_addr;
  3482. u32 cp_hqd_pq_wptr_poll_addr_hi;
  3483. u32 cp_hqd_pq_doorbell_control;
  3484. u32 cp_hqd_pq_wptr;
  3485. u32 cp_hqd_pq_control;
  3486. u32 cp_hqd_ib_base_addr;
  3487. u32 cp_hqd_ib_base_addr_hi;
  3488. u32 cp_hqd_ib_rptr;
  3489. u32 cp_hqd_ib_control;
  3490. u32 cp_hqd_iq_timer;
  3491. u32 cp_hqd_iq_rptr;
  3492. u32 cp_hqd_dequeue_request;
  3493. u32 cp_hqd_dma_offload;
  3494. u32 cp_hqd_sema_cmd;
  3495. u32 cp_hqd_msg_type;
  3496. u32 cp_hqd_atomic0_preop_lo;
  3497. u32 cp_hqd_atomic0_preop_hi;
  3498. u32 cp_hqd_atomic1_preop_lo;
  3499. u32 cp_hqd_atomic1_preop_hi;
  3500. u32 cp_hqd_hq_scheduler0;
  3501. u32 cp_hqd_hq_scheduler1;
  3502. u32 cp_mqd_control;
  3503. };
  3504. struct bonaire_mqd
  3505. {
  3506. u32 header;
  3507. u32 dispatch_initiator;
  3508. u32 dimensions[3];
  3509. u32 start_idx[3];
  3510. u32 num_threads[3];
  3511. u32 pipeline_stat_enable;
  3512. u32 perf_counter_enable;
  3513. u32 pgm[2];
  3514. u32 tba[2];
  3515. u32 tma[2];
  3516. u32 pgm_rsrc[2];
  3517. u32 vmid;
  3518. u32 resource_limits;
  3519. u32 static_thread_mgmt01[2];
  3520. u32 tmp_ring_size;
  3521. u32 static_thread_mgmt23[2];
  3522. u32 restart[3];
  3523. u32 thread_trace_enable;
  3524. u32 reserved1;
  3525. u32 user_data[16];
  3526. u32 vgtcs_invoke_count[2];
  3527. struct hqd_registers queue_state;
  3528. u32 dequeue_cntr;
  3529. u32 interrupt_queue[64];
  3530. };
  3531. /**
  3532. * cik_cp_compute_resume - setup the compute queue registers
  3533. *
  3534. * @rdev: radeon_device pointer
  3535. *
  3536. * Program the compute queues and test them to make sure they
  3537. * are working.
  3538. * Returns 0 for success, error for failure.
  3539. */
  3540. static int cik_cp_compute_resume(struct radeon_device *rdev)
  3541. {
  3542. int r, i, idx;
  3543. u32 tmp;
  3544. bool use_doorbell = true;
  3545. u64 hqd_gpu_addr;
  3546. u64 mqd_gpu_addr;
  3547. u64 eop_gpu_addr;
  3548. u64 wb_gpu_addr;
  3549. u32 *buf;
  3550. struct bonaire_mqd *mqd;
  3551. r = cik_cp_compute_start(rdev);
  3552. if (r)
  3553. return r;
  3554. /* fix up chicken bits */
  3555. tmp = RREG32(CP_CPF_DEBUG);
  3556. tmp |= (1 << 23);
  3557. WREG32(CP_CPF_DEBUG, tmp);
  3558. /* init the pipes */
  3559. mutex_lock(&rdev->srbm_mutex);
  3560. for (i = 0; i < (rdev->mec.num_pipe * rdev->mec.num_mec); i++) {
  3561. int me = (i < 4) ? 1 : 2;
  3562. int pipe = (i < 4) ? i : (i - 4);
  3563. eop_gpu_addr = rdev->mec.hpd_eop_gpu_addr + (i * MEC_HPD_SIZE * 2);
  3564. cik_srbm_select(rdev, me, pipe, 0, 0);
  3565. /* write the EOP addr */
  3566. WREG32(CP_HPD_EOP_BASE_ADDR, eop_gpu_addr >> 8);
  3567. WREG32(CP_HPD_EOP_BASE_ADDR_HI, upper_32_bits(eop_gpu_addr) >> 8);
  3568. /* set the VMID assigned */
  3569. WREG32(CP_HPD_EOP_VMID, 0);
  3570. /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
  3571. tmp = RREG32(CP_HPD_EOP_CONTROL);
  3572. tmp &= ~EOP_SIZE_MASK;
  3573. tmp |= order_base_2(MEC_HPD_SIZE / 8);
  3574. WREG32(CP_HPD_EOP_CONTROL, tmp);
  3575. }
  3576. cik_srbm_select(rdev, 0, 0, 0, 0);
  3577. mutex_unlock(&rdev->srbm_mutex);
  3578. /* init the queues. Just two for now. */
  3579. for (i = 0; i < 2; i++) {
  3580. if (i == 0)
  3581. idx = CAYMAN_RING_TYPE_CP1_INDEX;
  3582. else
  3583. idx = CAYMAN_RING_TYPE_CP2_INDEX;
  3584. if (rdev->ring[idx].mqd_obj == NULL) {
  3585. r = radeon_bo_create(rdev,
  3586. sizeof(struct bonaire_mqd),
  3587. PAGE_SIZE, true,
  3588. RADEON_GEM_DOMAIN_GTT, NULL,
  3589. &rdev->ring[idx].mqd_obj);
  3590. if (r) {
  3591. dev_warn(rdev->dev, "(%d) create MQD bo failed\n", r);
  3592. return r;
  3593. }
  3594. }
  3595. r = radeon_bo_reserve(rdev->ring[idx].mqd_obj, false);
  3596. if (unlikely(r != 0)) {
  3597. cik_cp_compute_fini(rdev);
  3598. return r;
  3599. }
  3600. r = radeon_bo_pin(rdev->ring[idx].mqd_obj, RADEON_GEM_DOMAIN_GTT,
  3601. &mqd_gpu_addr);
  3602. if (r) {
  3603. dev_warn(rdev->dev, "(%d) pin MQD bo failed\n", r);
  3604. cik_cp_compute_fini(rdev);
  3605. return r;
  3606. }
  3607. r = radeon_bo_kmap(rdev->ring[idx].mqd_obj, (void **)&buf);
  3608. if (r) {
  3609. dev_warn(rdev->dev, "(%d) map MQD bo failed\n", r);
  3610. cik_cp_compute_fini(rdev);
  3611. return r;
  3612. }
  3613. /* doorbell offset */
  3614. rdev->ring[idx].doorbell_offset =
  3615. (rdev->ring[idx].doorbell_page_num * PAGE_SIZE) + 0;
  3616. /* init the mqd struct */
  3617. memset(buf, 0, sizeof(struct bonaire_mqd));
  3618. mqd = (struct bonaire_mqd *)buf;
  3619. mqd->header = 0xC0310800;
  3620. mqd->static_thread_mgmt01[0] = 0xffffffff;
  3621. mqd->static_thread_mgmt01[1] = 0xffffffff;
  3622. mqd->static_thread_mgmt23[0] = 0xffffffff;
  3623. mqd->static_thread_mgmt23[1] = 0xffffffff;
  3624. mutex_lock(&rdev->srbm_mutex);
  3625. cik_srbm_select(rdev, rdev->ring[idx].me,
  3626. rdev->ring[idx].pipe,
  3627. rdev->ring[idx].queue, 0);
  3628. /* disable wptr polling */
  3629. tmp = RREG32(CP_PQ_WPTR_POLL_CNTL);
  3630. tmp &= ~WPTR_POLL_EN;
  3631. WREG32(CP_PQ_WPTR_POLL_CNTL, tmp);
  3632. /* enable doorbell? */
  3633. mqd->queue_state.cp_hqd_pq_doorbell_control =
  3634. RREG32(CP_HQD_PQ_DOORBELL_CONTROL);
  3635. if (use_doorbell)
  3636. mqd->queue_state.cp_hqd_pq_doorbell_control |= DOORBELL_EN;
  3637. else
  3638. mqd->queue_state.cp_hqd_pq_doorbell_control &= ~DOORBELL_EN;
  3639. WREG32(CP_HQD_PQ_DOORBELL_CONTROL,
  3640. mqd->queue_state.cp_hqd_pq_doorbell_control);
  3641. /* disable the queue if it's active */
  3642. mqd->queue_state.cp_hqd_dequeue_request = 0;
  3643. mqd->queue_state.cp_hqd_pq_rptr = 0;
  3644. mqd->queue_state.cp_hqd_pq_wptr= 0;
  3645. if (RREG32(CP_HQD_ACTIVE) & 1) {
  3646. WREG32(CP_HQD_DEQUEUE_REQUEST, 1);
  3647. for (i = 0; i < rdev->usec_timeout; i++) {
  3648. if (!(RREG32(CP_HQD_ACTIVE) & 1))
  3649. break;
  3650. udelay(1);
  3651. }
  3652. WREG32(CP_HQD_DEQUEUE_REQUEST, mqd->queue_state.cp_hqd_dequeue_request);
  3653. WREG32(CP_HQD_PQ_RPTR, mqd->queue_state.cp_hqd_pq_rptr);
  3654. WREG32(CP_HQD_PQ_WPTR, mqd->queue_state.cp_hqd_pq_wptr);
  3655. }
  3656. /* set the pointer to the MQD */
  3657. mqd->queue_state.cp_mqd_base_addr = mqd_gpu_addr & 0xfffffffc;
  3658. mqd->queue_state.cp_mqd_base_addr_hi = upper_32_bits(mqd_gpu_addr);
  3659. WREG32(CP_MQD_BASE_ADDR, mqd->queue_state.cp_mqd_base_addr);
  3660. WREG32(CP_MQD_BASE_ADDR_HI, mqd->queue_state.cp_mqd_base_addr_hi);
  3661. /* set MQD vmid to 0 */
  3662. mqd->queue_state.cp_mqd_control = RREG32(CP_MQD_CONTROL);
  3663. mqd->queue_state.cp_mqd_control &= ~MQD_VMID_MASK;
  3664. WREG32(CP_MQD_CONTROL, mqd->queue_state.cp_mqd_control);
  3665. /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
  3666. hqd_gpu_addr = rdev->ring[idx].gpu_addr >> 8;
  3667. mqd->queue_state.cp_hqd_pq_base = hqd_gpu_addr;
  3668. mqd->queue_state.cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
  3669. WREG32(CP_HQD_PQ_BASE, mqd->queue_state.cp_hqd_pq_base);
  3670. WREG32(CP_HQD_PQ_BASE_HI, mqd->queue_state.cp_hqd_pq_base_hi);
  3671. /* set up the HQD, this is similar to CP_RB0_CNTL */
  3672. mqd->queue_state.cp_hqd_pq_control = RREG32(CP_HQD_PQ_CONTROL);
  3673. mqd->queue_state.cp_hqd_pq_control &=
  3674. ~(QUEUE_SIZE_MASK | RPTR_BLOCK_SIZE_MASK);
  3675. mqd->queue_state.cp_hqd_pq_control |=
  3676. order_base_2(rdev->ring[idx].ring_size / 8);
  3677. mqd->queue_state.cp_hqd_pq_control |=
  3678. (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8);
  3679. #ifdef __BIG_ENDIAN
  3680. mqd->queue_state.cp_hqd_pq_control |= BUF_SWAP_32BIT;
  3681. #endif
  3682. mqd->queue_state.cp_hqd_pq_control &=
  3683. ~(UNORD_DISPATCH | ROQ_PQ_IB_FLIP | PQ_VOLATILE);
  3684. mqd->queue_state.cp_hqd_pq_control |=
  3685. PRIV_STATE | KMD_QUEUE; /* assuming kernel queue control */
  3686. WREG32(CP_HQD_PQ_CONTROL, mqd->queue_state.cp_hqd_pq_control);
  3687. /* only used if CP_PQ_WPTR_POLL_CNTL.WPTR_POLL_EN=1 */
  3688. if (i == 0)
  3689. wb_gpu_addr = rdev->wb.gpu_addr + CIK_WB_CP1_WPTR_OFFSET;
  3690. else
  3691. wb_gpu_addr = rdev->wb.gpu_addr + CIK_WB_CP2_WPTR_OFFSET;
  3692. mqd->queue_state.cp_hqd_pq_wptr_poll_addr = wb_gpu_addr & 0xfffffffc;
  3693. mqd->queue_state.cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
  3694. WREG32(CP_HQD_PQ_WPTR_POLL_ADDR, mqd->queue_state.cp_hqd_pq_wptr_poll_addr);
  3695. WREG32(CP_HQD_PQ_WPTR_POLL_ADDR_HI,
  3696. mqd->queue_state.cp_hqd_pq_wptr_poll_addr_hi);
  3697. /* set the wb address wether it's enabled or not */
  3698. if (i == 0)
  3699. wb_gpu_addr = rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET;
  3700. else
  3701. wb_gpu_addr = rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET;
  3702. mqd->queue_state.cp_hqd_pq_rptr_report_addr = wb_gpu_addr & 0xfffffffc;
  3703. mqd->queue_state.cp_hqd_pq_rptr_report_addr_hi =
  3704. upper_32_bits(wb_gpu_addr) & 0xffff;
  3705. WREG32(CP_HQD_PQ_RPTR_REPORT_ADDR,
  3706. mqd->queue_state.cp_hqd_pq_rptr_report_addr);
  3707. WREG32(CP_HQD_PQ_RPTR_REPORT_ADDR_HI,
  3708. mqd->queue_state.cp_hqd_pq_rptr_report_addr_hi);
  3709. /* enable the doorbell if requested */
  3710. if (use_doorbell) {
  3711. mqd->queue_state.cp_hqd_pq_doorbell_control =
  3712. RREG32(CP_HQD_PQ_DOORBELL_CONTROL);
  3713. mqd->queue_state.cp_hqd_pq_doorbell_control &= ~DOORBELL_OFFSET_MASK;
  3714. mqd->queue_state.cp_hqd_pq_doorbell_control |=
  3715. DOORBELL_OFFSET(rdev->ring[idx].doorbell_offset / 4);
  3716. mqd->queue_state.cp_hqd_pq_doorbell_control |= DOORBELL_EN;
  3717. mqd->queue_state.cp_hqd_pq_doorbell_control &=
  3718. ~(DOORBELL_SOURCE | DOORBELL_HIT);
  3719. } else {
  3720. mqd->queue_state.cp_hqd_pq_doorbell_control = 0;
  3721. }
  3722. WREG32(CP_HQD_PQ_DOORBELL_CONTROL,
  3723. mqd->queue_state.cp_hqd_pq_doorbell_control);
  3724. /* read and write pointers, similar to CP_RB0_WPTR/_RPTR */
  3725. rdev->ring[idx].wptr = 0;
  3726. mqd->queue_state.cp_hqd_pq_wptr = rdev->ring[idx].wptr;
  3727. WREG32(CP_HQD_PQ_WPTR, mqd->queue_state.cp_hqd_pq_wptr);
  3728. rdev->ring[idx].rptr = RREG32(CP_HQD_PQ_RPTR);
  3729. mqd->queue_state.cp_hqd_pq_rptr = rdev->ring[idx].rptr;
  3730. /* set the vmid for the queue */
  3731. mqd->queue_state.cp_hqd_vmid = 0;
  3732. WREG32(CP_HQD_VMID, mqd->queue_state.cp_hqd_vmid);
  3733. /* activate the queue */
  3734. mqd->queue_state.cp_hqd_active = 1;
  3735. WREG32(CP_HQD_ACTIVE, mqd->queue_state.cp_hqd_active);
  3736. cik_srbm_select(rdev, 0, 0, 0, 0);
  3737. mutex_unlock(&rdev->srbm_mutex);
  3738. radeon_bo_kunmap(rdev->ring[idx].mqd_obj);
  3739. radeon_bo_unreserve(rdev->ring[idx].mqd_obj);
  3740. rdev->ring[idx].ready = true;
  3741. r = radeon_ring_test(rdev, idx, &rdev->ring[idx]);
  3742. if (r)
  3743. rdev->ring[idx].ready = false;
  3744. }
  3745. return 0;
  3746. }
  3747. static void cik_cp_enable(struct radeon_device *rdev, bool enable)
  3748. {
  3749. cik_cp_gfx_enable(rdev, enable);
  3750. cik_cp_compute_enable(rdev, enable);
  3751. }
  3752. static int cik_cp_load_microcode(struct radeon_device *rdev)
  3753. {
  3754. int r;
  3755. r = cik_cp_gfx_load_microcode(rdev);
  3756. if (r)
  3757. return r;
  3758. r = cik_cp_compute_load_microcode(rdev);
  3759. if (r)
  3760. return r;
  3761. return 0;
  3762. }
  3763. static void cik_cp_fini(struct radeon_device *rdev)
  3764. {
  3765. cik_cp_gfx_fini(rdev);
  3766. cik_cp_compute_fini(rdev);
  3767. }
  3768. static int cik_cp_resume(struct radeon_device *rdev)
  3769. {
  3770. int r;
  3771. cik_enable_gui_idle_interrupt(rdev, false);
  3772. r = cik_cp_load_microcode(rdev);
  3773. if (r)
  3774. return r;
  3775. r = cik_cp_gfx_resume(rdev);
  3776. if (r)
  3777. return r;
  3778. r = cik_cp_compute_resume(rdev);
  3779. if (r)
  3780. return r;
  3781. cik_enable_gui_idle_interrupt(rdev, true);
  3782. return 0;
  3783. }
  3784. static void cik_print_gpu_status_regs(struct radeon_device *rdev)
  3785. {
  3786. dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
  3787. RREG32(GRBM_STATUS));
  3788. dev_info(rdev->dev, " GRBM_STATUS2=0x%08X\n",
  3789. RREG32(GRBM_STATUS2));
  3790. dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
  3791. RREG32(GRBM_STATUS_SE0));
  3792. dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
  3793. RREG32(GRBM_STATUS_SE1));
  3794. dev_info(rdev->dev, " GRBM_STATUS_SE2=0x%08X\n",
  3795. RREG32(GRBM_STATUS_SE2));
  3796. dev_info(rdev->dev, " GRBM_STATUS_SE3=0x%08X\n",
  3797. RREG32(GRBM_STATUS_SE3));
  3798. dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
  3799. RREG32(SRBM_STATUS));
  3800. dev_info(rdev->dev, " SRBM_STATUS2=0x%08X\n",
  3801. RREG32(SRBM_STATUS2));
  3802. dev_info(rdev->dev, " SDMA0_STATUS_REG = 0x%08X\n",
  3803. RREG32(SDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET));
  3804. dev_info(rdev->dev, " SDMA1_STATUS_REG = 0x%08X\n",
  3805. RREG32(SDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET));
  3806. dev_info(rdev->dev, " CP_STAT = 0x%08x\n", RREG32(CP_STAT));
  3807. dev_info(rdev->dev, " CP_STALLED_STAT1 = 0x%08x\n",
  3808. RREG32(CP_STALLED_STAT1));
  3809. dev_info(rdev->dev, " CP_STALLED_STAT2 = 0x%08x\n",
  3810. RREG32(CP_STALLED_STAT2));
  3811. dev_info(rdev->dev, " CP_STALLED_STAT3 = 0x%08x\n",
  3812. RREG32(CP_STALLED_STAT3));
  3813. dev_info(rdev->dev, " CP_CPF_BUSY_STAT = 0x%08x\n",
  3814. RREG32(CP_CPF_BUSY_STAT));
  3815. dev_info(rdev->dev, " CP_CPF_STALLED_STAT1 = 0x%08x\n",
  3816. RREG32(CP_CPF_STALLED_STAT1));
  3817. dev_info(rdev->dev, " CP_CPF_STATUS = 0x%08x\n", RREG32(CP_CPF_STATUS));
  3818. dev_info(rdev->dev, " CP_CPC_BUSY_STAT = 0x%08x\n", RREG32(CP_CPC_BUSY_STAT));
  3819. dev_info(rdev->dev, " CP_CPC_STALLED_STAT1 = 0x%08x\n",
  3820. RREG32(CP_CPC_STALLED_STAT1));
  3821. dev_info(rdev->dev, " CP_CPC_STATUS = 0x%08x\n", RREG32(CP_CPC_STATUS));
  3822. }
  3823. /**
  3824. * cik_gpu_check_soft_reset - check which blocks are busy
  3825. *
  3826. * @rdev: radeon_device pointer
  3827. *
  3828. * Check which blocks are busy and return the relevant reset
  3829. * mask to be used by cik_gpu_soft_reset().
  3830. * Returns a mask of the blocks to be reset.
  3831. */
  3832. u32 cik_gpu_check_soft_reset(struct radeon_device *rdev)
  3833. {
  3834. u32 reset_mask = 0;
  3835. u32 tmp;
  3836. /* GRBM_STATUS */
  3837. tmp = RREG32(GRBM_STATUS);
  3838. if (tmp & (PA_BUSY | SC_BUSY |
  3839. BCI_BUSY | SX_BUSY |
  3840. TA_BUSY | VGT_BUSY |
  3841. DB_BUSY | CB_BUSY |
  3842. GDS_BUSY | SPI_BUSY |
  3843. IA_BUSY | IA_BUSY_NO_DMA))
  3844. reset_mask |= RADEON_RESET_GFX;
  3845. if (tmp & (CP_BUSY | CP_COHERENCY_BUSY))
  3846. reset_mask |= RADEON_RESET_CP;
  3847. /* GRBM_STATUS2 */
  3848. tmp = RREG32(GRBM_STATUS2);
  3849. if (tmp & RLC_BUSY)
  3850. reset_mask |= RADEON_RESET_RLC;
  3851. /* SDMA0_STATUS_REG */
  3852. tmp = RREG32(SDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET);
  3853. if (!(tmp & SDMA_IDLE))
  3854. reset_mask |= RADEON_RESET_DMA;
  3855. /* SDMA1_STATUS_REG */
  3856. tmp = RREG32(SDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET);
  3857. if (!(tmp & SDMA_IDLE))
  3858. reset_mask |= RADEON_RESET_DMA1;
  3859. /* SRBM_STATUS2 */
  3860. tmp = RREG32(SRBM_STATUS2);
  3861. if (tmp & SDMA_BUSY)
  3862. reset_mask |= RADEON_RESET_DMA;
  3863. if (tmp & SDMA1_BUSY)
  3864. reset_mask |= RADEON_RESET_DMA1;
  3865. /* SRBM_STATUS */
  3866. tmp = RREG32(SRBM_STATUS);
  3867. if (tmp & IH_BUSY)
  3868. reset_mask |= RADEON_RESET_IH;
  3869. if (tmp & SEM_BUSY)
  3870. reset_mask |= RADEON_RESET_SEM;
  3871. if (tmp & GRBM_RQ_PENDING)
  3872. reset_mask |= RADEON_RESET_GRBM;
  3873. if (tmp & VMC_BUSY)
  3874. reset_mask |= RADEON_RESET_VMC;
  3875. if (tmp & (MCB_BUSY | MCB_NON_DISPLAY_BUSY |
  3876. MCC_BUSY | MCD_BUSY))
  3877. reset_mask |= RADEON_RESET_MC;
  3878. if (evergreen_is_display_hung(rdev))
  3879. reset_mask |= RADEON_RESET_DISPLAY;
  3880. /* Skip MC reset as it's mostly likely not hung, just busy */
  3881. if (reset_mask & RADEON_RESET_MC) {
  3882. DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
  3883. reset_mask &= ~RADEON_RESET_MC;
  3884. }
  3885. return reset_mask;
  3886. }
  3887. /**
  3888. * cik_gpu_soft_reset - soft reset GPU
  3889. *
  3890. * @rdev: radeon_device pointer
  3891. * @reset_mask: mask of which blocks to reset
  3892. *
  3893. * Soft reset the blocks specified in @reset_mask.
  3894. */
  3895. static void cik_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
  3896. {
  3897. struct evergreen_mc_save save;
  3898. u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
  3899. u32 tmp;
  3900. if (reset_mask == 0)
  3901. return;
  3902. dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
  3903. cik_print_gpu_status_regs(rdev);
  3904. dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
  3905. RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR));
  3906. dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
  3907. RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
  3908. /* stop the rlc */
  3909. cik_rlc_stop(rdev);
  3910. /* Disable GFX parsing/prefetching */
  3911. WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT);
  3912. /* Disable MEC parsing/prefetching */
  3913. WREG32(CP_MEC_CNTL, MEC_ME1_HALT | MEC_ME2_HALT);
  3914. if (reset_mask & RADEON_RESET_DMA) {
  3915. /* sdma0 */
  3916. tmp = RREG32(SDMA0_ME_CNTL + SDMA0_REGISTER_OFFSET);
  3917. tmp |= SDMA_HALT;
  3918. WREG32(SDMA0_ME_CNTL + SDMA0_REGISTER_OFFSET, tmp);
  3919. }
  3920. if (reset_mask & RADEON_RESET_DMA1) {
  3921. /* sdma1 */
  3922. tmp = RREG32(SDMA0_ME_CNTL + SDMA1_REGISTER_OFFSET);
  3923. tmp |= SDMA_HALT;
  3924. WREG32(SDMA0_ME_CNTL + SDMA1_REGISTER_OFFSET, tmp);
  3925. }
  3926. evergreen_mc_stop(rdev, &save);
  3927. if (evergreen_mc_wait_for_idle(rdev)) {
  3928. dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
  3929. }
  3930. if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE | RADEON_RESET_CP))
  3931. grbm_soft_reset = SOFT_RESET_CP | SOFT_RESET_GFX;
  3932. if (reset_mask & RADEON_RESET_CP) {
  3933. grbm_soft_reset |= SOFT_RESET_CP;
  3934. srbm_soft_reset |= SOFT_RESET_GRBM;
  3935. }
  3936. if (reset_mask & RADEON_RESET_DMA)
  3937. srbm_soft_reset |= SOFT_RESET_SDMA;
  3938. if (reset_mask & RADEON_RESET_DMA1)
  3939. srbm_soft_reset |= SOFT_RESET_SDMA1;
  3940. if (reset_mask & RADEON_RESET_DISPLAY)
  3941. srbm_soft_reset |= SOFT_RESET_DC;
  3942. if (reset_mask & RADEON_RESET_RLC)
  3943. grbm_soft_reset |= SOFT_RESET_RLC;
  3944. if (reset_mask & RADEON_RESET_SEM)
  3945. srbm_soft_reset |= SOFT_RESET_SEM;
  3946. if (reset_mask & RADEON_RESET_IH)
  3947. srbm_soft_reset |= SOFT_RESET_IH;
  3948. if (reset_mask & RADEON_RESET_GRBM)
  3949. srbm_soft_reset |= SOFT_RESET_GRBM;
  3950. if (reset_mask & RADEON_RESET_VMC)
  3951. srbm_soft_reset |= SOFT_RESET_VMC;
  3952. if (!(rdev->flags & RADEON_IS_IGP)) {
  3953. if (reset_mask & RADEON_RESET_MC)
  3954. srbm_soft_reset |= SOFT_RESET_MC;
  3955. }
  3956. if (grbm_soft_reset) {
  3957. tmp = RREG32(GRBM_SOFT_RESET);
  3958. tmp |= grbm_soft_reset;
  3959. dev_info(rdev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
  3960. WREG32(GRBM_SOFT_RESET, tmp);
  3961. tmp = RREG32(GRBM_SOFT_RESET);
  3962. udelay(50);
  3963. tmp &= ~grbm_soft_reset;
  3964. WREG32(GRBM_SOFT_RESET, tmp);
  3965. tmp = RREG32(GRBM_SOFT_RESET);
  3966. }
  3967. if (srbm_soft_reset) {
  3968. tmp = RREG32(SRBM_SOFT_RESET);
  3969. tmp |= srbm_soft_reset;
  3970. dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
  3971. WREG32(SRBM_SOFT_RESET, tmp);
  3972. tmp = RREG32(SRBM_SOFT_RESET);
  3973. udelay(50);
  3974. tmp &= ~srbm_soft_reset;
  3975. WREG32(SRBM_SOFT_RESET, tmp);
  3976. tmp = RREG32(SRBM_SOFT_RESET);
  3977. }
  3978. /* Wait a little for things to settle down */
  3979. udelay(50);
  3980. evergreen_mc_resume(rdev, &save);
  3981. udelay(50);
  3982. cik_print_gpu_status_regs(rdev);
  3983. }
  3984. /**
  3985. * cik_asic_reset - soft reset GPU
  3986. *
  3987. * @rdev: radeon_device pointer
  3988. *
  3989. * Look up which blocks are hung and attempt
  3990. * to reset them.
  3991. * Returns 0 for success.
  3992. */
  3993. int cik_asic_reset(struct radeon_device *rdev)
  3994. {
  3995. u32 reset_mask;
  3996. reset_mask = cik_gpu_check_soft_reset(rdev);
  3997. if (reset_mask)
  3998. r600_set_bios_scratch_engine_hung(rdev, true);
  3999. cik_gpu_soft_reset(rdev, reset_mask);
  4000. reset_mask = cik_gpu_check_soft_reset(rdev);
  4001. if (!reset_mask)
  4002. r600_set_bios_scratch_engine_hung(rdev, false);
  4003. return 0;
  4004. }
  4005. /**
  4006. * cik_gfx_is_lockup - check if the 3D engine is locked up
  4007. *
  4008. * @rdev: radeon_device pointer
  4009. * @ring: radeon_ring structure holding ring information
  4010. *
  4011. * Check if the 3D engine is locked up (CIK).
  4012. * Returns true if the engine is locked, false if not.
  4013. */
  4014. bool cik_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
  4015. {
  4016. u32 reset_mask = cik_gpu_check_soft_reset(rdev);
  4017. if (!(reset_mask & (RADEON_RESET_GFX |
  4018. RADEON_RESET_COMPUTE |
  4019. RADEON_RESET_CP))) {
  4020. radeon_ring_lockup_update(ring);
  4021. return false;
  4022. }
  4023. /* force CP activities */
  4024. radeon_ring_force_activity(rdev, ring);
  4025. return radeon_ring_test_lockup(rdev, ring);
  4026. }
  4027. /* MC */
  4028. /**
  4029. * cik_mc_program - program the GPU memory controller
  4030. *
  4031. * @rdev: radeon_device pointer
  4032. *
  4033. * Set the location of vram, gart, and AGP in the GPU's
  4034. * physical address space (CIK).
  4035. */
  4036. static void cik_mc_program(struct radeon_device *rdev)
  4037. {
  4038. struct evergreen_mc_save save;
  4039. u32 tmp;
  4040. int i, j;
  4041. /* Initialize HDP */
  4042. for (i = 0, j = 0; i < 32; i++, j += 0x18) {
  4043. WREG32((0x2c14 + j), 0x00000000);
  4044. WREG32((0x2c18 + j), 0x00000000);
  4045. WREG32((0x2c1c + j), 0x00000000);
  4046. WREG32((0x2c20 + j), 0x00000000);
  4047. WREG32((0x2c24 + j), 0x00000000);
  4048. }
  4049. WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
  4050. evergreen_mc_stop(rdev, &save);
  4051. if (radeon_mc_wait_for_idle(rdev)) {
  4052. dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
  4053. }
  4054. /* Lockout access through VGA aperture*/
  4055. WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
  4056. /* Update configuration */
  4057. WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
  4058. rdev->mc.vram_start >> 12);
  4059. WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
  4060. rdev->mc.vram_end >> 12);
  4061. WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
  4062. rdev->vram_scratch.gpu_addr >> 12);
  4063. tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
  4064. tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
  4065. WREG32(MC_VM_FB_LOCATION, tmp);
  4066. /* XXX double check these! */
  4067. WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
  4068. WREG32(HDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
  4069. WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
  4070. WREG32(MC_VM_AGP_BASE, 0);
  4071. WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
  4072. WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
  4073. if (radeon_mc_wait_for_idle(rdev)) {
  4074. dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
  4075. }
  4076. evergreen_mc_resume(rdev, &save);
  4077. /* we need to own VRAM, so turn off the VGA renderer here
  4078. * to stop it overwriting our objects */
  4079. rv515_vga_render_disable(rdev);
  4080. }
  4081. /**
  4082. * cik_mc_init - initialize the memory controller driver params
  4083. *
  4084. * @rdev: radeon_device pointer
  4085. *
  4086. * Look up the amount of vram, vram width, and decide how to place
  4087. * vram and gart within the GPU's physical address space (CIK).
  4088. * Returns 0 for success.
  4089. */
  4090. static int cik_mc_init(struct radeon_device *rdev)
  4091. {
  4092. u32 tmp;
  4093. int chansize, numchan;
  4094. /* Get VRAM informations */
  4095. rdev->mc.vram_is_ddr = true;
  4096. tmp = RREG32(MC_ARB_RAMCFG);
  4097. if (tmp & CHANSIZE_MASK) {
  4098. chansize = 64;
  4099. } else {
  4100. chansize = 32;
  4101. }
  4102. tmp = RREG32(MC_SHARED_CHMAP);
  4103. switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
  4104. case 0:
  4105. default:
  4106. numchan = 1;
  4107. break;
  4108. case 1:
  4109. numchan = 2;
  4110. break;
  4111. case 2:
  4112. numchan = 4;
  4113. break;
  4114. case 3:
  4115. numchan = 8;
  4116. break;
  4117. case 4:
  4118. numchan = 3;
  4119. break;
  4120. case 5:
  4121. numchan = 6;
  4122. break;
  4123. case 6:
  4124. numchan = 10;
  4125. break;
  4126. case 7:
  4127. numchan = 12;
  4128. break;
  4129. case 8:
  4130. numchan = 16;
  4131. break;
  4132. }
  4133. rdev->mc.vram_width = numchan * chansize;
  4134. /* Could aper size report 0 ? */
  4135. rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
  4136. rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
  4137. /* size in MB on si */
  4138. rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
  4139. rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
  4140. rdev->mc.visible_vram_size = rdev->mc.aper_size;
  4141. si_vram_gtt_location(rdev, &rdev->mc);
  4142. radeon_update_bandwidth_info(rdev);
  4143. return 0;
  4144. }
  4145. /*
  4146. * GART
  4147. * VMID 0 is the physical GPU addresses as used by the kernel.
  4148. * VMIDs 1-15 are used for userspace clients and are handled
  4149. * by the radeon vm/hsa code.
  4150. */
  4151. /**
  4152. * cik_pcie_gart_tlb_flush - gart tlb flush callback
  4153. *
  4154. * @rdev: radeon_device pointer
  4155. *
  4156. * Flush the TLB for the VMID 0 page table (CIK).
  4157. */
  4158. void cik_pcie_gart_tlb_flush(struct radeon_device *rdev)
  4159. {
  4160. /* flush hdp cache */
  4161. WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0);
  4162. /* bits 0-15 are the VM contexts0-15 */
  4163. WREG32(VM_INVALIDATE_REQUEST, 0x1);
  4164. }
  4165. /**
  4166. * cik_pcie_gart_enable - gart enable
  4167. *
  4168. * @rdev: radeon_device pointer
  4169. *
  4170. * This sets up the TLBs, programs the page tables for VMID0,
  4171. * sets up the hw for VMIDs 1-15 which are allocated on
  4172. * demand, and sets up the global locations for the LDS, GDS,
  4173. * and GPUVM for FSA64 clients (CIK).
  4174. * Returns 0 for success, errors for failure.
  4175. */
  4176. static int cik_pcie_gart_enable(struct radeon_device *rdev)
  4177. {
  4178. int r, i;
  4179. if (rdev->gart.robj == NULL) {
  4180. dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
  4181. return -EINVAL;
  4182. }
  4183. r = radeon_gart_table_vram_pin(rdev);
  4184. if (r)
  4185. return r;
  4186. radeon_gart_restore(rdev);
  4187. /* Setup TLB control */
  4188. WREG32(MC_VM_MX_L1_TLB_CNTL,
  4189. (0xA << 7) |
  4190. ENABLE_L1_TLB |
  4191. SYSTEM_ACCESS_MODE_NOT_IN_SYS |
  4192. ENABLE_ADVANCED_DRIVER_MODEL |
  4193. SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
  4194. /* Setup L2 cache */
  4195. WREG32(VM_L2_CNTL, ENABLE_L2_CACHE |
  4196. ENABLE_L2_FRAGMENT_PROCESSING |
  4197. ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
  4198. ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
  4199. EFFECTIVE_L2_QUEUE_SIZE(7) |
  4200. CONTEXT1_IDENTITY_ACCESS_MODE(1));
  4201. WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE);
  4202. WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
  4203. L2_CACHE_BIGK_FRAGMENT_SIZE(6));
  4204. /* setup context0 */
  4205. WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
  4206. WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
  4207. WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
  4208. WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
  4209. (u32)(rdev->dummy_page.addr >> 12));
  4210. WREG32(VM_CONTEXT0_CNTL2, 0);
  4211. WREG32(VM_CONTEXT0_CNTL, (ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
  4212. RANGE_PROTECTION_FAULT_ENABLE_DEFAULT));
  4213. WREG32(0x15D4, 0);
  4214. WREG32(0x15D8, 0);
  4215. WREG32(0x15DC, 0);
  4216. /* empty context1-15 */
  4217. /* FIXME start with 4G, once using 2 level pt switch to full
  4218. * vm size space
  4219. */
  4220. /* set vm size, must be a multiple of 4 */
  4221. WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
  4222. WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn);
  4223. for (i = 1; i < 16; i++) {
  4224. if (i < 8)
  4225. WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
  4226. rdev->gart.table_addr >> 12);
  4227. else
  4228. WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2),
  4229. rdev->gart.table_addr >> 12);
  4230. }
  4231. /* enable context1-15 */
  4232. WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
  4233. (u32)(rdev->dummy_page.addr >> 12));
  4234. WREG32(VM_CONTEXT1_CNTL2, 4);
  4235. WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
  4236. RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
  4237. RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
  4238. DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
  4239. DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT |
  4240. PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT |
  4241. PDE0_PROTECTION_FAULT_ENABLE_DEFAULT |
  4242. VALID_PROTECTION_FAULT_ENABLE_INTERRUPT |
  4243. VALID_PROTECTION_FAULT_ENABLE_DEFAULT |
  4244. READ_PROTECTION_FAULT_ENABLE_INTERRUPT |
  4245. READ_PROTECTION_FAULT_ENABLE_DEFAULT |
  4246. WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT |
  4247. WRITE_PROTECTION_FAULT_ENABLE_DEFAULT);
  4248. /* TC cache setup ??? */
  4249. WREG32(TC_CFG_L1_LOAD_POLICY0, 0);
  4250. WREG32(TC_CFG_L1_LOAD_POLICY1, 0);
  4251. WREG32(TC_CFG_L1_STORE_POLICY, 0);
  4252. WREG32(TC_CFG_L2_LOAD_POLICY0, 0);
  4253. WREG32(TC_CFG_L2_LOAD_POLICY1, 0);
  4254. WREG32(TC_CFG_L2_STORE_POLICY0, 0);
  4255. WREG32(TC_CFG_L2_STORE_POLICY1, 0);
  4256. WREG32(TC_CFG_L2_ATOMIC_POLICY, 0);
  4257. WREG32(TC_CFG_L1_VOLATILE, 0);
  4258. WREG32(TC_CFG_L2_VOLATILE, 0);
  4259. if (rdev->family == CHIP_KAVERI) {
  4260. u32 tmp = RREG32(CHUB_CONTROL);
  4261. tmp &= ~BYPASS_VM;
  4262. WREG32(CHUB_CONTROL, tmp);
  4263. }
  4264. /* XXX SH_MEM regs */
  4265. /* where to put LDS, scratch, GPUVM in FSA64 space */
  4266. mutex_lock(&rdev->srbm_mutex);
  4267. for (i = 0; i < 16; i++) {
  4268. cik_srbm_select(rdev, 0, 0, 0, i);
  4269. /* CP and shaders */
  4270. WREG32(SH_MEM_CONFIG, 0);
  4271. WREG32(SH_MEM_APE1_BASE, 1);
  4272. WREG32(SH_MEM_APE1_LIMIT, 0);
  4273. WREG32(SH_MEM_BASES, 0);
  4274. /* SDMA GFX */
  4275. WREG32(SDMA0_GFX_VIRTUAL_ADDR + SDMA0_REGISTER_OFFSET, 0);
  4276. WREG32(SDMA0_GFX_APE1_CNTL + SDMA0_REGISTER_OFFSET, 0);
  4277. WREG32(SDMA0_GFX_VIRTUAL_ADDR + SDMA1_REGISTER_OFFSET, 0);
  4278. WREG32(SDMA0_GFX_APE1_CNTL + SDMA1_REGISTER_OFFSET, 0);
  4279. /* XXX SDMA RLC - todo */
  4280. }
  4281. cik_srbm_select(rdev, 0, 0, 0, 0);
  4282. mutex_unlock(&rdev->srbm_mutex);
  4283. cik_pcie_gart_tlb_flush(rdev);
  4284. DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
  4285. (unsigned)(rdev->mc.gtt_size >> 20),
  4286. (unsigned long long)rdev->gart.table_addr);
  4287. rdev->gart.ready = true;
  4288. return 0;
  4289. }
  4290. /**
  4291. * cik_pcie_gart_disable - gart disable
  4292. *
  4293. * @rdev: radeon_device pointer
  4294. *
  4295. * This disables all VM page table (CIK).
  4296. */
  4297. static void cik_pcie_gart_disable(struct radeon_device *rdev)
  4298. {
  4299. /* Disable all tables */
  4300. WREG32(VM_CONTEXT0_CNTL, 0);
  4301. WREG32(VM_CONTEXT1_CNTL, 0);
  4302. /* Setup TLB control */
  4303. WREG32(MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE_NOT_IN_SYS |
  4304. SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
  4305. /* Setup L2 cache */
  4306. WREG32(VM_L2_CNTL,
  4307. ENABLE_L2_FRAGMENT_PROCESSING |
  4308. ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
  4309. ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
  4310. EFFECTIVE_L2_QUEUE_SIZE(7) |
  4311. CONTEXT1_IDENTITY_ACCESS_MODE(1));
  4312. WREG32(VM_L2_CNTL2, 0);
  4313. WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
  4314. L2_CACHE_BIGK_FRAGMENT_SIZE(6));
  4315. radeon_gart_table_vram_unpin(rdev);
  4316. }
  4317. /**
  4318. * cik_pcie_gart_fini - vm fini callback
  4319. *
  4320. * @rdev: radeon_device pointer
  4321. *
  4322. * Tears down the driver GART/VM setup (CIK).
  4323. */
  4324. static void cik_pcie_gart_fini(struct radeon_device *rdev)
  4325. {
  4326. cik_pcie_gart_disable(rdev);
  4327. radeon_gart_table_vram_free(rdev);
  4328. radeon_gart_fini(rdev);
  4329. }
  4330. /* vm parser */
  4331. /**
  4332. * cik_ib_parse - vm ib_parse callback
  4333. *
  4334. * @rdev: radeon_device pointer
  4335. * @ib: indirect buffer pointer
  4336. *
  4337. * CIK uses hw IB checking so this is a nop (CIK).
  4338. */
  4339. int cik_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
  4340. {
  4341. return 0;
  4342. }
  4343. /*
  4344. * vm
  4345. * VMID 0 is the physical GPU addresses as used by the kernel.
  4346. * VMIDs 1-15 are used for userspace clients and are handled
  4347. * by the radeon vm/hsa code.
  4348. */
  4349. /**
  4350. * cik_vm_init - cik vm init callback
  4351. *
  4352. * @rdev: radeon_device pointer
  4353. *
  4354. * Inits cik specific vm parameters (number of VMs, base of vram for
  4355. * VMIDs 1-15) (CIK).
  4356. * Returns 0 for success.
  4357. */
  4358. int cik_vm_init(struct radeon_device *rdev)
  4359. {
  4360. /* number of VMs */
  4361. rdev->vm_manager.nvm = 16;
  4362. /* base offset of vram pages */
  4363. if (rdev->flags & RADEON_IS_IGP) {
  4364. u64 tmp = RREG32(MC_VM_FB_OFFSET);
  4365. tmp <<= 22;
  4366. rdev->vm_manager.vram_base_offset = tmp;
  4367. } else
  4368. rdev->vm_manager.vram_base_offset = 0;
  4369. return 0;
  4370. }
  4371. /**
  4372. * cik_vm_fini - cik vm fini callback
  4373. *
  4374. * @rdev: radeon_device pointer
  4375. *
  4376. * Tear down any asic specific VM setup (CIK).
  4377. */
  4378. void cik_vm_fini(struct radeon_device *rdev)
  4379. {
  4380. }
  4381. /**
  4382. * cik_vm_decode_fault - print human readable fault info
  4383. *
  4384. * @rdev: radeon_device pointer
  4385. * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
  4386. * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
  4387. *
  4388. * Print human readable fault information (CIK).
  4389. */
  4390. static void cik_vm_decode_fault(struct radeon_device *rdev,
  4391. u32 status, u32 addr, u32 mc_client)
  4392. {
  4393. u32 mc_id = (status & MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT;
  4394. u32 vmid = (status & FAULT_VMID_MASK) >> FAULT_VMID_SHIFT;
  4395. u32 protections = (status & PROTECTIONS_MASK) >> PROTECTIONS_SHIFT;
  4396. char *block = (char *)&mc_client;
  4397. printk("VM fault (0x%02x, vmid %d) at page %u, %s from %s (%d)\n",
  4398. protections, vmid, addr,
  4399. (status & MEMORY_CLIENT_RW_MASK) ? "write" : "read",
  4400. block, mc_id);
  4401. }
  4402. /**
  4403. * cik_vm_flush - cik vm flush using the CP
  4404. *
  4405. * @rdev: radeon_device pointer
  4406. *
  4407. * Update the page table base and flush the VM TLB
  4408. * using the CP (CIK).
  4409. */
  4410. void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
  4411. {
  4412. struct radeon_ring *ring = &rdev->ring[ridx];
  4413. if (vm == NULL)
  4414. return;
  4415. radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
  4416. radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
  4417. WRITE_DATA_DST_SEL(0)));
  4418. if (vm->id < 8) {
  4419. radeon_ring_write(ring,
  4420. (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2);
  4421. } else {
  4422. radeon_ring_write(ring,
  4423. (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2);
  4424. }
  4425. radeon_ring_write(ring, 0);
  4426. radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
  4427. /* update SH_MEM_* regs */
  4428. radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
  4429. radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
  4430. WRITE_DATA_DST_SEL(0)));
  4431. radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
  4432. radeon_ring_write(ring, 0);
  4433. radeon_ring_write(ring, VMID(vm->id));
  4434. radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 6));
  4435. radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
  4436. WRITE_DATA_DST_SEL(0)));
  4437. radeon_ring_write(ring, SH_MEM_BASES >> 2);
  4438. radeon_ring_write(ring, 0);
  4439. radeon_ring_write(ring, 0); /* SH_MEM_BASES */
  4440. radeon_ring_write(ring, 0); /* SH_MEM_CONFIG */
  4441. radeon_ring_write(ring, 1); /* SH_MEM_APE1_BASE */
  4442. radeon_ring_write(ring, 0); /* SH_MEM_APE1_LIMIT */
  4443. radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
  4444. radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
  4445. WRITE_DATA_DST_SEL(0)));
  4446. radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
  4447. radeon_ring_write(ring, 0);
  4448. radeon_ring_write(ring, VMID(0));
  4449. /* HDP flush */
  4450. /* We should be using the WAIT_REG_MEM packet here like in
  4451. * cik_fence_ring_emit(), but it causes the CP to hang in this
  4452. * context...
  4453. */
  4454. radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
  4455. radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
  4456. WRITE_DATA_DST_SEL(0)));
  4457. radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
  4458. radeon_ring_write(ring, 0);
  4459. radeon_ring_write(ring, 0);
  4460. /* bits 0-15 are the VM contexts0-15 */
  4461. radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
  4462. radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
  4463. WRITE_DATA_DST_SEL(0)));
  4464. radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
  4465. radeon_ring_write(ring, 0);
  4466. radeon_ring_write(ring, 1 << vm->id);
  4467. /* compute doesn't have PFP */
  4468. if (ridx == RADEON_RING_TYPE_GFX_INDEX) {
  4469. /* sync PFP to ME, otherwise we might get invalid PFP reads */
  4470. radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
  4471. radeon_ring_write(ring, 0x0);
  4472. }
  4473. }
  4474. /**
  4475. * cik_vm_set_page - update the page tables using sDMA
  4476. *
  4477. * @rdev: radeon_device pointer
  4478. * @ib: indirect buffer to fill with commands
  4479. * @pe: addr of the page entry
  4480. * @addr: dst addr to write into pe
  4481. * @count: number of page entries to update
  4482. * @incr: increase next addr by incr bytes
  4483. * @flags: access flags
  4484. *
  4485. * Update the page tables using CP or sDMA (CIK).
  4486. */
  4487. void cik_vm_set_page(struct radeon_device *rdev,
  4488. struct radeon_ib *ib,
  4489. uint64_t pe,
  4490. uint64_t addr, unsigned count,
  4491. uint32_t incr, uint32_t flags)
  4492. {
  4493. uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
  4494. uint64_t value;
  4495. unsigned ndw;
  4496. if (rdev->asic->vm.pt_ring_index == RADEON_RING_TYPE_GFX_INDEX) {
  4497. /* CP */
  4498. while (count) {
  4499. ndw = 2 + count * 2;
  4500. if (ndw > 0x3FFE)
  4501. ndw = 0x3FFE;
  4502. ib->ptr[ib->length_dw++] = PACKET3(PACKET3_WRITE_DATA, ndw);
  4503. ib->ptr[ib->length_dw++] = (WRITE_DATA_ENGINE_SEL(0) |
  4504. WRITE_DATA_DST_SEL(1));
  4505. ib->ptr[ib->length_dw++] = pe;
  4506. ib->ptr[ib->length_dw++] = upper_32_bits(pe);
  4507. for (; ndw > 2; ndw -= 2, --count, pe += 8) {
  4508. if (flags & RADEON_VM_PAGE_SYSTEM) {
  4509. value = radeon_vm_map_gart(rdev, addr);
  4510. value &= 0xFFFFFFFFFFFFF000ULL;
  4511. } else if (flags & RADEON_VM_PAGE_VALID) {
  4512. value = addr;
  4513. } else {
  4514. value = 0;
  4515. }
  4516. addr += incr;
  4517. value |= r600_flags;
  4518. ib->ptr[ib->length_dw++] = value;
  4519. ib->ptr[ib->length_dw++] = upper_32_bits(value);
  4520. }
  4521. }
  4522. } else {
  4523. /* DMA */
  4524. cik_sdma_vm_set_page(rdev, ib, pe, addr, count, incr, flags);
  4525. }
  4526. }
  4527. /*
  4528. * RLC
  4529. * The RLC is a multi-purpose microengine that handles a
  4530. * variety of functions, the most important of which is
  4531. * the interrupt controller.
  4532. */
  4533. static void cik_enable_gui_idle_interrupt(struct radeon_device *rdev,
  4534. bool enable)
  4535. {
  4536. u32 tmp = RREG32(CP_INT_CNTL_RING0);
  4537. if (enable)
  4538. tmp |= (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
  4539. else
  4540. tmp &= ~(CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
  4541. WREG32(CP_INT_CNTL_RING0, tmp);
  4542. }
  4543. static void cik_enable_lbpw(struct radeon_device *rdev, bool enable)
  4544. {
  4545. u32 tmp;
  4546. tmp = RREG32(RLC_LB_CNTL);
  4547. if (enable)
  4548. tmp |= LOAD_BALANCE_ENABLE;
  4549. else
  4550. tmp &= ~LOAD_BALANCE_ENABLE;
  4551. WREG32(RLC_LB_CNTL, tmp);
  4552. }
  4553. static void cik_wait_for_rlc_serdes(struct radeon_device *rdev)
  4554. {
  4555. u32 i, j, k;
  4556. u32 mask;
  4557. for (i = 0; i < rdev->config.cik.max_shader_engines; i++) {
  4558. for (j = 0; j < rdev->config.cik.max_sh_per_se; j++) {
  4559. cik_select_se_sh(rdev, i, j);
  4560. for (k = 0; k < rdev->usec_timeout; k++) {
  4561. if (RREG32(RLC_SERDES_CU_MASTER_BUSY) == 0)
  4562. break;
  4563. udelay(1);
  4564. }
  4565. }
  4566. }
  4567. cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
  4568. mask = SE_MASTER_BUSY_MASK | GC_MASTER_BUSY | TC0_MASTER_BUSY | TC1_MASTER_BUSY;
  4569. for (k = 0; k < rdev->usec_timeout; k++) {
  4570. if ((RREG32(RLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0)
  4571. break;
  4572. udelay(1);
  4573. }
  4574. }
  4575. static void cik_update_rlc(struct radeon_device *rdev, u32 rlc)
  4576. {
  4577. u32 tmp;
  4578. tmp = RREG32(RLC_CNTL);
  4579. if (tmp != rlc)
  4580. WREG32(RLC_CNTL, rlc);
  4581. }
  4582. static u32 cik_halt_rlc(struct radeon_device *rdev)
  4583. {
  4584. u32 data, orig;
  4585. orig = data = RREG32(RLC_CNTL);
  4586. if (data & RLC_ENABLE) {
  4587. u32 i;
  4588. data &= ~RLC_ENABLE;
  4589. WREG32(RLC_CNTL, data);
  4590. for (i = 0; i < rdev->usec_timeout; i++) {
  4591. if ((RREG32(RLC_GPM_STAT) & RLC_GPM_BUSY) == 0)
  4592. break;
  4593. udelay(1);
  4594. }
  4595. cik_wait_for_rlc_serdes(rdev);
  4596. }
  4597. return orig;
  4598. }
  4599. void cik_enter_rlc_safe_mode(struct radeon_device *rdev)
  4600. {
  4601. u32 tmp, i, mask;
  4602. tmp = REQ | MESSAGE(MSG_ENTER_RLC_SAFE_MODE);
  4603. WREG32(RLC_GPR_REG2, tmp);
  4604. mask = GFX_POWER_STATUS | GFX_CLOCK_STATUS;
  4605. for (i = 0; i < rdev->usec_timeout; i++) {
  4606. if ((RREG32(RLC_GPM_STAT) & mask) == mask)
  4607. break;
  4608. udelay(1);
  4609. }
  4610. for (i = 0; i < rdev->usec_timeout; i++) {
  4611. if ((RREG32(RLC_GPR_REG2) & REQ) == 0)
  4612. break;
  4613. udelay(1);
  4614. }
  4615. }
  4616. void cik_exit_rlc_safe_mode(struct radeon_device *rdev)
  4617. {
  4618. u32 tmp;
  4619. tmp = REQ | MESSAGE(MSG_EXIT_RLC_SAFE_MODE);
  4620. WREG32(RLC_GPR_REG2, tmp);
  4621. }
  4622. /**
  4623. * cik_rlc_stop - stop the RLC ME
  4624. *
  4625. * @rdev: radeon_device pointer
  4626. *
  4627. * Halt the RLC ME (MicroEngine) (CIK).
  4628. */
  4629. static void cik_rlc_stop(struct radeon_device *rdev)
  4630. {
  4631. WREG32(RLC_CNTL, 0);
  4632. cik_enable_gui_idle_interrupt(rdev, false);
  4633. cik_wait_for_rlc_serdes(rdev);
  4634. }
  4635. /**
  4636. * cik_rlc_start - start the RLC ME
  4637. *
  4638. * @rdev: radeon_device pointer
  4639. *
  4640. * Unhalt the RLC ME (MicroEngine) (CIK).
  4641. */
  4642. static void cik_rlc_start(struct radeon_device *rdev)
  4643. {
  4644. WREG32(RLC_CNTL, RLC_ENABLE);
  4645. cik_enable_gui_idle_interrupt(rdev, true);
  4646. udelay(50);
  4647. }
  4648. /**
  4649. * cik_rlc_resume - setup the RLC hw
  4650. *
  4651. * @rdev: radeon_device pointer
  4652. *
  4653. * Initialize the RLC registers, load the ucode,
  4654. * and start the RLC (CIK).
  4655. * Returns 0 for success, -EINVAL if the ucode is not available.
  4656. */
  4657. static int cik_rlc_resume(struct radeon_device *rdev)
  4658. {
  4659. u32 i, size, tmp;
  4660. const __be32 *fw_data;
  4661. if (!rdev->rlc_fw)
  4662. return -EINVAL;
  4663. switch (rdev->family) {
  4664. case CHIP_BONAIRE:
  4665. default:
  4666. size = BONAIRE_RLC_UCODE_SIZE;
  4667. break;
  4668. case CHIP_KAVERI:
  4669. size = KV_RLC_UCODE_SIZE;
  4670. break;
  4671. case CHIP_KABINI:
  4672. size = KB_RLC_UCODE_SIZE;
  4673. break;
  4674. }
  4675. cik_rlc_stop(rdev);
  4676. /* disable CG */
  4677. tmp = RREG32(RLC_CGCG_CGLS_CTRL) & 0xfffffffc;
  4678. WREG32(RLC_CGCG_CGLS_CTRL, tmp);
  4679. si_rlc_reset(rdev);
  4680. cik_init_pg(rdev);
  4681. cik_init_cg(rdev);
  4682. WREG32(RLC_LB_CNTR_INIT, 0);
  4683. WREG32(RLC_LB_CNTR_MAX, 0x00008000);
  4684. cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
  4685. WREG32(RLC_LB_INIT_CU_MASK, 0xffffffff);
  4686. WREG32(RLC_LB_PARAMS, 0x00600408);
  4687. WREG32(RLC_LB_CNTL, 0x80000004);
  4688. WREG32(RLC_MC_CNTL, 0);
  4689. WREG32(RLC_UCODE_CNTL, 0);
  4690. fw_data = (const __be32 *)rdev->rlc_fw->data;
  4691. WREG32(RLC_GPM_UCODE_ADDR, 0);
  4692. for (i = 0; i < size; i++)
  4693. WREG32(RLC_GPM_UCODE_DATA, be32_to_cpup(fw_data++));
  4694. WREG32(RLC_GPM_UCODE_ADDR, 0);
  4695. /* XXX - find out what chips support lbpw */
  4696. cik_enable_lbpw(rdev, false);
  4697. if (rdev->family == CHIP_BONAIRE)
  4698. WREG32(RLC_DRIVER_DMA_STATUS, 0);
  4699. cik_rlc_start(rdev);
  4700. return 0;
  4701. }
  4702. static void cik_enable_cgcg(struct radeon_device *rdev, bool enable)
  4703. {
  4704. u32 data, orig, tmp, tmp2;
  4705. orig = data = RREG32(RLC_CGCG_CGLS_CTRL);
  4706. if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CGCG)) {
  4707. cik_enable_gui_idle_interrupt(rdev, true);
  4708. tmp = cik_halt_rlc(rdev);
  4709. cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
  4710. WREG32(RLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
  4711. WREG32(RLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
  4712. tmp2 = BPM_ADDR_MASK | CGCG_OVERRIDE_0 | CGLS_ENABLE;
  4713. WREG32(RLC_SERDES_WR_CTRL, tmp2);
  4714. cik_update_rlc(rdev, tmp);
  4715. data |= CGCG_EN | CGLS_EN;
  4716. } else {
  4717. cik_enable_gui_idle_interrupt(rdev, false);
  4718. RREG32(CB_CGTT_SCLK_CTRL);
  4719. RREG32(CB_CGTT_SCLK_CTRL);
  4720. RREG32(CB_CGTT_SCLK_CTRL);
  4721. RREG32(CB_CGTT_SCLK_CTRL);
  4722. data &= ~(CGCG_EN | CGLS_EN);
  4723. }
  4724. if (orig != data)
  4725. WREG32(RLC_CGCG_CGLS_CTRL, data);
  4726. }
  4727. static void cik_enable_mgcg(struct radeon_device *rdev, bool enable)
  4728. {
  4729. u32 data, orig, tmp = 0;
  4730. if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_MGCG)) {
  4731. if (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_MGLS) {
  4732. if (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CP_LS) {
  4733. orig = data = RREG32(CP_MEM_SLP_CNTL);
  4734. data |= CP_MEM_LS_EN;
  4735. if (orig != data)
  4736. WREG32(CP_MEM_SLP_CNTL, data);
  4737. }
  4738. }
  4739. orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
  4740. data &= 0xfffffffd;
  4741. if (orig != data)
  4742. WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
  4743. tmp = cik_halt_rlc(rdev);
  4744. cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
  4745. WREG32(RLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
  4746. WREG32(RLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
  4747. data = BPM_ADDR_MASK | MGCG_OVERRIDE_0;
  4748. WREG32(RLC_SERDES_WR_CTRL, data);
  4749. cik_update_rlc(rdev, tmp);
  4750. if (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CGTS) {
  4751. orig = data = RREG32(CGTS_SM_CTRL_REG);
  4752. data &= ~SM_MODE_MASK;
  4753. data |= SM_MODE(0x2);
  4754. data |= SM_MODE_ENABLE;
  4755. data &= ~CGTS_OVERRIDE;
  4756. if ((rdev->cg_flags & RADEON_CG_SUPPORT_GFX_MGLS) &&
  4757. (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CGTS_LS))
  4758. data &= ~CGTS_LS_OVERRIDE;
  4759. data &= ~ON_MONITOR_ADD_MASK;
  4760. data |= ON_MONITOR_ADD_EN;
  4761. data |= ON_MONITOR_ADD(0x96);
  4762. if (orig != data)
  4763. WREG32(CGTS_SM_CTRL_REG, data);
  4764. }
  4765. } else {
  4766. orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
  4767. data |= 0x00000002;
  4768. if (orig != data)
  4769. WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
  4770. data = RREG32(RLC_MEM_SLP_CNTL);
  4771. if (data & RLC_MEM_LS_EN) {
  4772. data &= ~RLC_MEM_LS_EN;
  4773. WREG32(RLC_MEM_SLP_CNTL, data);
  4774. }
  4775. data = RREG32(CP_MEM_SLP_CNTL);
  4776. if (data & CP_MEM_LS_EN) {
  4777. data &= ~CP_MEM_LS_EN;
  4778. WREG32(CP_MEM_SLP_CNTL, data);
  4779. }
  4780. orig = data = RREG32(CGTS_SM_CTRL_REG);
  4781. data |= CGTS_OVERRIDE | CGTS_LS_OVERRIDE;
  4782. if (orig != data)
  4783. WREG32(CGTS_SM_CTRL_REG, data);
  4784. tmp = cik_halt_rlc(rdev);
  4785. cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
  4786. WREG32(RLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
  4787. WREG32(RLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
  4788. data = BPM_ADDR_MASK | MGCG_OVERRIDE_1;
  4789. WREG32(RLC_SERDES_WR_CTRL, data);
  4790. cik_update_rlc(rdev, tmp);
  4791. }
  4792. }
  4793. static const u32 mc_cg_registers[] =
  4794. {
  4795. MC_HUB_MISC_HUB_CG,
  4796. MC_HUB_MISC_SIP_CG,
  4797. MC_HUB_MISC_VM_CG,
  4798. MC_XPB_CLK_GAT,
  4799. ATC_MISC_CG,
  4800. MC_CITF_MISC_WR_CG,
  4801. MC_CITF_MISC_RD_CG,
  4802. MC_CITF_MISC_VM_CG,
  4803. VM_L2_CG,
  4804. };
  4805. static void cik_enable_mc_ls(struct radeon_device *rdev,
  4806. bool enable)
  4807. {
  4808. int i;
  4809. u32 orig, data;
  4810. for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
  4811. orig = data = RREG32(mc_cg_registers[i]);
  4812. if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_MC_LS))
  4813. data |= MC_LS_ENABLE;
  4814. else
  4815. data &= ~MC_LS_ENABLE;
  4816. if (data != orig)
  4817. WREG32(mc_cg_registers[i], data);
  4818. }
  4819. }
  4820. static void cik_enable_mc_mgcg(struct radeon_device *rdev,
  4821. bool enable)
  4822. {
  4823. int i;
  4824. u32 orig, data;
  4825. for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
  4826. orig = data = RREG32(mc_cg_registers[i]);
  4827. if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_MC_MGCG))
  4828. data |= MC_CG_ENABLE;
  4829. else
  4830. data &= ~MC_CG_ENABLE;
  4831. if (data != orig)
  4832. WREG32(mc_cg_registers[i], data);
  4833. }
  4834. }
  4835. static void cik_enable_sdma_mgcg(struct radeon_device *rdev,
  4836. bool enable)
  4837. {
  4838. u32 orig, data;
  4839. if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_SDMA_MGCG)) {
  4840. WREG32(SDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET, 0x00000100);
  4841. WREG32(SDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET, 0x00000100);
  4842. } else {
  4843. orig = data = RREG32(SDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET);
  4844. data |= 0xff000000;
  4845. if (data != orig)
  4846. WREG32(SDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET, data);
  4847. orig = data = RREG32(SDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET);
  4848. data |= 0xff000000;
  4849. if (data != orig)
  4850. WREG32(SDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET, data);
  4851. }
  4852. }
  4853. static void cik_enable_sdma_mgls(struct radeon_device *rdev,
  4854. bool enable)
  4855. {
  4856. u32 orig, data;
  4857. if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_SDMA_LS)) {
  4858. orig = data = RREG32(SDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET);
  4859. data |= 0x100;
  4860. if (orig != data)
  4861. WREG32(SDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET, data);
  4862. orig = data = RREG32(SDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET);
  4863. data |= 0x100;
  4864. if (orig != data)
  4865. WREG32(SDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET, data);
  4866. } else {
  4867. orig = data = RREG32(SDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET);
  4868. data &= ~0x100;
  4869. if (orig != data)
  4870. WREG32(SDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET, data);
  4871. orig = data = RREG32(SDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET);
  4872. data &= ~0x100;
  4873. if (orig != data)
  4874. WREG32(SDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET, data);
  4875. }
  4876. }
  4877. static void cik_enable_uvd_mgcg(struct radeon_device *rdev,
  4878. bool enable)
  4879. {
  4880. u32 orig, data;
  4881. if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_UVD_MGCG)) {
  4882. data = RREG32_UVD_CTX(UVD_CGC_MEM_CTRL);
  4883. data = 0xfff;
  4884. WREG32_UVD_CTX(UVD_CGC_MEM_CTRL, data);
  4885. orig = data = RREG32(UVD_CGC_CTRL);
  4886. data |= DCM;
  4887. if (orig != data)
  4888. WREG32(UVD_CGC_CTRL, data);
  4889. } else {
  4890. data = RREG32_UVD_CTX(UVD_CGC_MEM_CTRL);
  4891. data &= ~0xfff;
  4892. WREG32_UVD_CTX(UVD_CGC_MEM_CTRL, data);
  4893. orig = data = RREG32(UVD_CGC_CTRL);
  4894. data &= ~DCM;
  4895. if (orig != data)
  4896. WREG32(UVD_CGC_CTRL, data);
  4897. }
  4898. }
  4899. static void cik_enable_bif_mgls(struct radeon_device *rdev,
  4900. bool enable)
  4901. {
  4902. u32 orig, data;
  4903. orig = data = RREG32_PCIE_PORT(PCIE_CNTL2);
  4904. if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_BIF_LS))
  4905. data |= SLV_MEM_LS_EN | MST_MEM_LS_EN |
  4906. REPLAY_MEM_LS_EN | SLV_MEM_AGGRESSIVE_LS_EN;
  4907. else
  4908. data &= ~(SLV_MEM_LS_EN | MST_MEM_LS_EN |
  4909. REPLAY_MEM_LS_EN | SLV_MEM_AGGRESSIVE_LS_EN);
  4910. if (orig != data)
  4911. WREG32_PCIE_PORT(PCIE_CNTL2, data);
  4912. }
  4913. static void cik_enable_hdp_mgcg(struct radeon_device *rdev,
  4914. bool enable)
  4915. {
  4916. u32 orig, data;
  4917. orig = data = RREG32(HDP_HOST_PATH_CNTL);
  4918. if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_HDP_MGCG))
  4919. data &= ~CLOCK_GATING_DIS;
  4920. else
  4921. data |= CLOCK_GATING_DIS;
  4922. if (orig != data)
  4923. WREG32(HDP_HOST_PATH_CNTL, data);
  4924. }
  4925. static void cik_enable_hdp_ls(struct radeon_device *rdev,
  4926. bool enable)
  4927. {
  4928. u32 orig, data;
  4929. orig = data = RREG32(HDP_MEM_POWER_LS);
  4930. if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_HDP_LS))
  4931. data |= HDP_LS_ENABLE;
  4932. else
  4933. data &= ~HDP_LS_ENABLE;
  4934. if (orig != data)
  4935. WREG32(HDP_MEM_POWER_LS, data);
  4936. }
  4937. void cik_update_cg(struct radeon_device *rdev,
  4938. u32 block, bool enable)
  4939. {
  4940. if (block & RADEON_CG_BLOCK_GFX) {
  4941. cik_enable_gui_idle_interrupt(rdev, false);
  4942. /* order matters! */
  4943. if (enable) {
  4944. cik_enable_mgcg(rdev, true);
  4945. cik_enable_cgcg(rdev, true);
  4946. } else {
  4947. cik_enable_cgcg(rdev, false);
  4948. cik_enable_mgcg(rdev, false);
  4949. }
  4950. cik_enable_gui_idle_interrupt(rdev, true);
  4951. }
  4952. if (block & RADEON_CG_BLOCK_MC) {
  4953. if (!(rdev->flags & RADEON_IS_IGP)) {
  4954. cik_enable_mc_mgcg(rdev, enable);
  4955. cik_enable_mc_ls(rdev, enable);
  4956. }
  4957. }
  4958. if (block & RADEON_CG_BLOCK_SDMA) {
  4959. cik_enable_sdma_mgcg(rdev, enable);
  4960. cik_enable_sdma_mgls(rdev, enable);
  4961. }
  4962. if (block & RADEON_CG_BLOCK_BIF) {
  4963. cik_enable_bif_mgls(rdev, enable);
  4964. }
  4965. if (block & RADEON_CG_BLOCK_UVD) {
  4966. if (rdev->has_uvd)
  4967. cik_enable_uvd_mgcg(rdev, enable);
  4968. }
  4969. if (block & RADEON_CG_BLOCK_HDP) {
  4970. cik_enable_hdp_mgcg(rdev, enable);
  4971. cik_enable_hdp_ls(rdev, enable);
  4972. }
  4973. }
  4974. static void cik_init_cg(struct radeon_device *rdev)
  4975. {
  4976. cik_update_cg(rdev, RADEON_CG_BLOCK_GFX, true);
  4977. if (rdev->has_uvd)
  4978. si_init_uvd_internal_cg(rdev);
  4979. cik_update_cg(rdev, (RADEON_CG_BLOCK_MC |
  4980. RADEON_CG_BLOCK_SDMA |
  4981. RADEON_CG_BLOCK_BIF |
  4982. RADEON_CG_BLOCK_UVD |
  4983. RADEON_CG_BLOCK_HDP), true);
  4984. }
  4985. static void cik_fini_cg(struct radeon_device *rdev)
  4986. {
  4987. cik_update_cg(rdev, (RADEON_CG_BLOCK_MC |
  4988. RADEON_CG_BLOCK_SDMA |
  4989. RADEON_CG_BLOCK_BIF |
  4990. RADEON_CG_BLOCK_UVD |
  4991. RADEON_CG_BLOCK_HDP), false);
  4992. cik_update_cg(rdev, RADEON_CG_BLOCK_GFX, false);
  4993. }
  4994. static void cik_enable_sck_slowdown_on_pu(struct radeon_device *rdev,
  4995. bool enable)
  4996. {
  4997. u32 data, orig;
  4998. orig = data = RREG32(RLC_PG_CNTL);
  4999. if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_RLC_SMU_HS))
  5000. data |= SMU_CLK_SLOWDOWN_ON_PU_ENABLE;
  5001. else
  5002. data &= ~SMU_CLK_SLOWDOWN_ON_PU_ENABLE;
  5003. if (orig != data)
  5004. WREG32(RLC_PG_CNTL, data);
  5005. }
  5006. static void cik_enable_sck_slowdown_on_pd(struct radeon_device *rdev,
  5007. bool enable)
  5008. {
  5009. u32 data, orig;
  5010. orig = data = RREG32(RLC_PG_CNTL);
  5011. if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_RLC_SMU_HS))
  5012. data |= SMU_CLK_SLOWDOWN_ON_PD_ENABLE;
  5013. else
  5014. data &= ~SMU_CLK_SLOWDOWN_ON_PD_ENABLE;
  5015. if (orig != data)
  5016. WREG32(RLC_PG_CNTL, data);
  5017. }
  5018. static void cik_enable_cp_pg(struct radeon_device *rdev, bool enable)
  5019. {
  5020. u32 data, orig;
  5021. orig = data = RREG32(RLC_PG_CNTL);
  5022. if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_CP))
  5023. data &= ~DISABLE_CP_PG;
  5024. else
  5025. data |= DISABLE_CP_PG;
  5026. if (orig != data)
  5027. WREG32(RLC_PG_CNTL, data);
  5028. }
  5029. static void cik_enable_gds_pg(struct radeon_device *rdev, bool enable)
  5030. {
  5031. u32 data, orig;
  5032. orig = data = RREG32(RLC_PG_CNTL);
  5033. if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GDS))
  5034. data &= ~DISABLE_GDS_PG;
  5035. else
  5036. data |= DISABLE_GDS_PG;
  5037. if (orig != data)
  5038. WREG32(RLC_PG_CNTL, data);
  5039. }
  5040. #define CP_ME_TABLE_SIZE 96
  5041. #define CP_ME_TABLE_OFFSET 2048
  5042. #define CP_MEC_TABLE_OFFSET 4096
  5043. void cik_init_cp_pg_table(struct radeon_device *rdev)
  5044. {
  5045. const __be32 *fw_data;
  5046. volatile u32 *dst_ptr;
  5047. int me, i, max_me = 4;
  5048. u32 bo_offset = 0;
  5049. u32 table_offset;
  5050. if (rdev->family == CHIP_KAVERI)
  5051. max_me = 5;
  5052. if (rdev->rlc.cp_table_ptr == NULL)
  5053. return;
  5054. /* write the cp table buffer */
  5055. dst_ptr = rdev->rlc.cp_table_ptr;
  5056. for (me = 0; me < max_me; me++) {
  5057. if (me == 0) {
  5058. fw_data = (const __be32 *)rdev->ce_fw->data;
  5059. table_offset = CP_ME_TABLE_OFFSET;
  5060. } else if (me == 1) {
  5061. fw_data = (const __be32 *)rdev->pfp_fw->data;
  5062. table_offset = CP_ME_TABLE_OFFSET;
  5063. } else if (me == 2) {
  5064. fw_data = (const __be32 *)rdev->me_fw->data;
  5065. table_offset = CP_ME_TABLE_OFFSET;
  5066. } else {
  5067. fw_data = (const __be32 *)rdev->mec_fw->data;
  5068. table_offset = CP_MEC_TABLE_OFFSET;
  5069. }
  5070. for (i = 0; i < CP_ME_TABLE_SIZE; i ++) {
  5071. dst_ptr[bo_offset + i] = be32_to_cpu(fw_data[table_offset + i]);
  5072. }
  5073. bo_offset += CP_ME_TABLE_SIZE;
  5074. }
  5075. }
  5076. static void cik_enable_gfx_cgpg(struct radeon_device *rdev,
  5077. bool enable)
  5078. {
  5079. u32 data, orig;
  5080. if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG)) {
  5081. orig = data = RREG32(RLC_PG_CNTL);
  5082. data |= GFX_PG_ENABLE;
  5083. if (orig != data)
  5084. WREG32(RLC_PG_CNTL, data);
  5085. orig = data = RREG32(RLC_AUTO_PG_CTRL);
  5086. data |= AUTO_PG_EN;
  5087. if (orig != data)
  5088. WREG32(RLC_AUTO_PG_CTRL, data);
  5089. } else {
  5090. orig = data = RREG32(RLC_PG_CNTL);
  5091. data &= ~GFX_PG_ENABLE;
  5092. if (orig != data)
  5093. WREG32(RLC_PG_CNTL, data);
  5094. orig = data = RREG32(RLC_AUTO_PG_CTRL);
  5095. data &= ~AUTO_PG_EN;
  5096. if (orig != data)
  5097. WREG32(RLC_AUTO_PG_CTRL, data);
  5098. data = RREG32(DB_RENDER_CONTROL);
  5099. }
  5100. }
  5101. static u32 cik_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh)
  5102. {
  5103. u32 mask = 0, tmp, tmp1;
  5104. int i;
  5105. cik_select_se_sh(rdev, se, sh);
  5106. tmp = RREG32(CC_GC_SHADER_ARRAY_CONFIG);
  5107. tmp1 = RREG32(GC_USER_SHADER_ARRAY_CONFIG);
  5108. cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
  5109. tmp &= 0xffff0000;
  5110. tmp |= tmp1;
  5111. tmp >>= 16;
  5112. for (i = 0; i < rdev->config.cik.max_cu_per_sh; i ++) {
  5113. mask <<= 1;
  5114. mask |= 1;
  5115. }
  5116. return (~tmp) & mask;
  5117. }
  5118. static void cik_init_ao_cu_mask(struct radeon_device *rdev)
  5119. {
  5120. u32 i, j, k, active_cu_number = 0;
  5121. u32 mask, counter, cu_bitmap;
  5122. u32 tmp = 0;
  5123. for (i = 0; i < rdev->config.cik.max_shader_engines; i++) {
  5124. for (j = 0; j < rdev->config.cik.max_sh_per_se; j++) {
  5125. mask = 1;
  5126. cu_bitmap = 0;
  5127. counter = 0;
  5128. for (k = 0; k < rdev->config.cik.max_cu_per_sh; k ++) {
  5129. if (cik_get_cu_active_bitmap(rdev, i, j) & mask) {
  5130. if (counter < 2)
  5131. cu_bitmap |= mask;
  5132. counter ++;
  5133. }
  5134. mask <<= 1;
  5135. }
  5136. active_cu_number += counter;
  5137. tmp |= (cu_bitmap << (i * 16 + j * 8));
  5138. }
  5139. }
  5140. WREG32(RLC_PG_AO_CU_MASK, tmp);
  5141. tmp = RREG32(RLC_MAX_PG_CU);
  5142. tmp &= ~MAX_PU_CU_MASK;
  5143. tmp |= MAX_PU_CU(active_cu_number);
  5144. WREG32(RLC_MAX_PG_CU, tmp);
  5145. }
  5146. static void cik_enable_gfx_static_mgpg(struct radeon_device *rdev,
  5147. bool enable)
  5148. {
  5149. u32 data, orig;
  5150. orig = data = RREG32(RLC_PG_CNTL);
  5151. if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_SMG))
  5152. data |= STATIC_PER_CU_PG_ENABLE;
  5153. else
  5154. data &= ~STATIC_PER_CU_PG_ENABLE;
  5155. if (orig != data)
  5156. WREG32(RLC_PG_CNTL, data);
  5157. }
  5158. static void cik_enable_gfx_dynamic_mgpg(struct radeon_device *rdev,
  5159. bool enable)
  5160. {
  5161. u32 data, orig;
  5162. orig = data = RREG32(RLC_PG_CNTL);
  5163. if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_DMG))
  5164. data |= DYN_PER_CU_PG_ENABLE;
  5165. else
  5166. data &= ~DYN_PER_CU_PG_ENABLE;
  5167. if (orig != data)
  5168. WREG32(RLC_PG_CNTL, data);
  5169. }
  5170. #define RLC_SAVE_AND_RESTORE_STARTING_OFFSET 0x90
  5171. #define RLC_CLEAR_STATE_DESCRIPTOR_OFFSET 0x3D
  5172. static void cik_init_gfx_cgpg(struct radeon_device *rdev)
  5173. {
  5174. u32 data, orig;
  5175. u32 i;
  5176. if (rdev->rlc.cs_data) {
  5177. WREG32(RLC_GPM_SCRATCH_ADDR, RLC_CLEAR_STATE_DESCRIPTOR_OFFSET);
  5178. WREG32(RLC_GPM_SCRATCH_DATA, upper_32_bits(rdev->rlc.clear_state_gpu_addr));
  5179. WREG32(RLC_GPM_SCRATCH_DATA, lower_32_bits(rdev->rlc.clear_state_gpu_addr));
  5180. WREG32(RLC_GPM_SCRATCH_DATA, rdev->rlc.clear_state_size);
  5181. } else {
  5182. WREG32(RLC_GPM_SCRATCH_ADDR, RLC_CLEAR_STATE_DESCRIPTOR_OFFSET);
  5183. for (i = 0; i < 3; i++)
  5184. WREG32(RLC_GPM_SCRATCH_DATA, 0);
  5185. }
  5186. if (rdev->rlc.reg_list) {
  5187. WREG32(RLC_GPM_SCRATCH_ADDR, RLC_SAVE_AND_RESTORE_STARTING_OFFSET);
  5188. for (i = 0; i < rdev->rlc.reg_list_size; i++)
  5189. WREG32(RLC_GPM_SCRATCH_DATA, rdev->rlc.reg_list[i]);
  5190. }
  5191. orig = data = RREG32(RLC_PG_CNTL);
  5192. data |= GFX_PG_SRC;
  5193. if (orig != data)
  5194. WREG32(RLC_PG_CNTL, data);
  5195. WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
  5196. WREG32(RLC_CP_TABLE_RESTORE, rdev->rlc.cp_table_gpu_addr >> 8);
  5197. data = RREG32(CP_RB_WPTR_POLL_CNTL);
  5198. data &= ~IDLE_POLL_COUNT_MASK;
  5199. data |= IDLE_POLL_COUNT(0x60);
  5200. WREG32(CP_RB_WPTR_POLL_CNTL, data);
  5201. data = 0x10101010;
  5202. WREG32(RLC_PG_DELAY, data);
  5203. data = RREG32(RLC_PG_DELAY_2);
  5204. data &= ~0xff;
  5205. data |= 0x3;
  5206. WREG32(RLC_PG_DELAY_2, data);
  5207. data = RREG32(RLC_AUTO_PG_CTRL);
  5208. data &= ~GRBM_REG_SGIT_MASK;
  5209. data |= GRBM_REG_SGIT(0x700);
  5210. WREG32(RLC_AUTO_PG_CTRL, data);
  5211. }
  5212. static void cik_update_gfx_pg(struct radeon_device *rdev, bool enable)
  5213. {
  5214. cik_enable_gfx_cgpg(rdev, enable);
  5215. cik_enable_gfx_static_mgpg(rdev, enable);
  5216. cik_enable_gfx_dynamic_mgpg(rdev, enable);
  5217. }
  5218. u32 cik_get_csb_size(struct radeon_device *rdev)
  5219. {
  5220. u32 count = 0;
  5221. const struct cs_section_def *sect = NULL;
  5222. const struct cs_extent_def *ext = NULL;
  5223. if (rdev->rlc.cs_data == NULL)
  5224. return 0;
  5225. /* begin clear state */
  5226. count += 2;
  5227. /* context control state */
  5228. count += 3;
  5229. for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) {
  5230. for (ext = sect->section; ext->extent != NULL; ++ext) {
  5231. if (sect->id == SECT_CONTEXT)
  5232. count += 2 + ext->reg_count;
  5233. else
  5234. return 0;
  5235. }
  5236. }
  5237. /* pa_sc_raster_config/pa_sc_raster_config1 */
  5238. count += 4;
  5239. /* end clear state */
  5240. count += 2;
  5241. /* clear state */
  5242. count += 2;
  5243. return count;
  5244. }
  5245. void cik_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer)
  5246. {
  5247. u32 count = 0, i;
  5248. const struct cs_section_def *sect = NULL;
  5249. const struct cs_extent_def *ext = NULL;
  5250. if (rdev->rlc.cs_data == NULL)
  5251. return;
  5252. if (buffer == NULL)
  5253. return;
  5254. buffer[count++] = PACKET3(PACKET3_PREAMBLE_CNTL, 0);
  5255. buffer[count++] = PACKET3_PREAMBLE_BEGIN_CLEAR_STATE;
  5256. buffer[count++] = PACKET3(PACKET3_CONTEXT_CONTROL, 1);
  5257. buffer[count++] = 0x80000000;
  5258. buffer[count++] = 0x80000000;
  5259. for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) {
  5260. for (ext = sect->section; ext->extent != NULL; ++ext) {
  5261. if (sect->id == SECT_CONTEXT) {
  5262. buffer[count++] = PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count);
  5263. buffer[count++] = ext->reg_index - 0xa000;
  5264. for (i = 0; i < ext->reg_count; i++)
  5265. buffer[count++] = ext->extent[i];
  5266. } else {
  5267. return;
  5268. }
  5269. }
  5270. }
  5271. buffer[count++] = PACKET3(PACKET3_SET_CONTEXT_REG, 2);
  5272. buffer[count++] = PA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START;
  5273. switch (rdev->family) {
  5274. case CHIP_BONAIRE:
  5275. buffer[count++] = 0x16000012;
  5276. buffer[count++] = 0x00000000;
  5277. break;
  5278. case CHIP_KAVERI:
  5279. buffer[count++] = 0x00000000; /* XXX */
  5280. buffer[count++] = 0x00000000;
  5281. break;
  5282. case CHIP_KABINI:
  5283. buffer[count++] = 0x00000000; /* XXX */
  5284. buffer[count++] = 0x00000000;
  5285. break;
  5286. default:
  5287. buffer[count++] = 0x00000000;
  5288. buffer[count++] = 0x00000000;
  5289. break;
  5290. }
  5291. buffer[count++] = PACKET3(PACKET3_PREAMBLE_CNTL, 0);
  5292. buffer[count++] = PACKET3_PREAMBLE_END_CLEAR_STATE;
  5293. buffer[count++] = PACKET3(PACKET3_CLEAR_STATE, 0);
  5294. buffer[count++] = 0;
  5295. }
  5296. static void cik_init_pg(struct radeon_device *rdev)
  5297. {
  5298. if (rdev->pg_flags) {
  5299. cik_enable_sck_slowdown_on_pu(rdev, true);
  5300. cik_enable_sck_slowdown_on_pd(rdev, true);
  5301. if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG) {
  5302. cik_init_gfx_cgpg(rdev);
  5303. cik_enable_cp_pg(rdev, true);
  5304. cik_enable_gds_pg(rdev, true);
  5305. }
  5306. cik_init_ao_cu_mask(rdev);
  5307. cik_update_gfx_pg(rdev, true);
  5308. }
  5309. }
  5310. static void cik_fini_pg(struct radeon_device *rdev)
  5311. {
  5312. if (rdev->pg_flags) {
  5313. cik_update_gfx_pg(rdev, false);
  5314. if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG) {
  5315. cik_enable_cp_pg(rdev, false);
  5316. cik_enable_gds_pg(rdev, false);
  5317. }
  5318. }
  5319. }
  5320. /*
  5321. * Interrupts
  5322. * Starting with r6xx, interrupts are handled via a ring buffer.
  5323. * Ring buffers are areas of GPU accessible memory that the GPU
  5324. * writes interrupt vectors into and the host reads vectors out of.
  5325. * There is a rptr (read pointer) that determines where the
  5326. * host is currently reading, and a wptr (write pointer)
  5327. * which determines where the GPU has written. When the
  5328. * pointers are equal, the ring is idle. When the GPU
  5329. * writes vectors to the ring buffer, it increments the
  5330. * wptr. When there is an interrupt, the host then starts
  5331. * fetching commands and processing them until the pointers are
  5332. * equal again at which point it updates the rptr.
  5333. */
  5334. /**
  5335. * cik_enable_interrupts - Enable the interrupt ring buffer
  5336. *
  5337. * @rdev: radeon_device pointer
  5338. *
  5339. * Enable the interrupt ring buffer (CIK).
  5340. */
  5341. static void cik_enable_interrupts(struct radeon_device *rdev)
  5342. {
  5343. u32 ih_cntl = RREG32(IH_CNTL);
  5344. u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
  5345. ih_cntl |= ENABLE_INTR;
  5346. ih_rb_cntl |= IH_RB_ENABLE;
  5347. WREG32(IH_CNTL, ih_cntl);
  5348. WREG32(IH_RB_CNTL, ih_rb_cntl);
  5349. rdev->ih.enabled = true;
  5350. }
  5351. /**
  5352. * cik_disable_interrupts - Disable the interrupt ring buffer
  5353. *
  5354. * @rdev: radeon_device pointer
  5355. *
  5356. * Disable the interrupt ring buffer (CIK).
  5357. */
  5358. static void cik_disable_interrupts(struct radeon_device *rdev)
  5359. {
  5360. u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
  5361. u32 ih_cntl = RREG32(IH_CNTL);
  5362. ih_rb_cntl &= ~IH_RB_ENABLE;
  5363. ih_cntl &= ~ENABLE_INTR;
  5364. WREG32(IH_RB_CNTL, ih_rb_cntl);
  5365. WREG32(IH_CNTL, ih_cntl);
  5366. /* set rptr, wptr to 0 */
  5367. WREG32(IH_RB_RPTR, 0);
  5368. WREG32(IH_RB_WPTR, 0);
  5369. rdev->ih.enabled = false;
  5370. rdev->ih.rptr = 0;
  5371. }
  5372. /**
  5373. * cik_disable_interrupt_state - Disable all interrupt sources
  5374. *
  5375. * @rdev: radeon_device pointer
  5376. *
  5377. * Clear all interrupt enable bits used by the driver (CIK).
  5378. */
  5379. static void cik_disable_interrupt_state(struct radeon_device *rdev)
  5380. {
  5381. u32 tmp;
  5382. /* gfx ring */
  5383. tmp = RREG32(CP_INT_CNTL_RING0) &
  5384. (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
  5385. WREG32(CP_INT_CNTL_RING0, tmp);
  5386. /* sdma */
  5387. tmp = RREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
  5388. WREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET, tmp);
  5389. tmp = RREG32(SDMA0_CNTL + SDMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
  5390. WREG32(SDMA0_CNTL + SDMA1_REGISTER_OFFSET, tmp);
  5391. /* compute queues */
  5392. WREG32(CP_ME1_PIPE0_INT_CNTL, 0);
  5393. WREG32(CP_ME1_PIPE1_INT_CNTL, 0);
  5394. WREG32(CP_ME1_PIPE2_INT_CNTL, 0);
  5395. WREG32(CP_ME1_PIPE3_INT_CNTL, 0);
  5396. WREG32(CP_ME2_PIPE0_INT_CNTL, 0);
  5397. WREG32(CP_ME2_PIPE1_INT_CNTL, 0);
  5398. WREG32(CP_ME2_PIPE2_INT_CNTL, 0);
  5399. WREG32(CP_ME2_PIPE3_INT_CNTL, 0);
  5400. /* grbm */
  5401. WREG32(GRBM_INT_CNTL, 0);
  5402. /* vline/vblank, etc. */
  5403. WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
  5404. WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
  5405. if (rdev->num_crtc >= 4) {
  5406. WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
  5407. WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
  5408. }
  5409. if (rdev->num_crtc >= 6) {
  5410. WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
  5411. WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
  5412. }
  5413. /* dac hotplug */
  5414. WREG32(DAC_AUTODETECT_INT_CONTROL, 0);
  5415. /* digital hotplug */
  5416. tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
  5417. WREG32(DC_HPD1_INT_CONTROL, tmp);
  5418. tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
  5419. WREG32(DC_HPD2_INT_CONTROL, tmp);
  5420. tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
  5421. WREG32(DC_HPD3_INT_CONTROL, tmp);
  5422. tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
  5423. WREG32(DC_HPD4_INT_CONTROL, tmp);
  5424. tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
  5425. WREG32(DC_HPD5_INT_CONTROL, tmp);
  5426. tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
  5427. WREG32(DC_HPD6_INT_CONTROL, tmp);
  5428. }
  5429. /**
  5430. * cik_irq_init - init and enable the interrupt ring
  5431. *
  5432. * @rdev: radeon_device pointer
  5433. *
  5434. * Allocate a ring buffer for the interrupt controller,
  5435. * enable the RLC, disable interrupts, enable the IH
  5436. * ring buffer and enable it (CIK).
  5437. * Called at device load and reume.
  5438. * Returns 0 for success, errors for failure.
  5439. */
  5440. static int cik_irq_init(struct radeon_device *rdev)
  5441. {
  5442. int ret = 0;
  5443. int rb_bufsz;
  5444. u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
  5445. /* allocate ring */
  5446. ret = r600_ih_ring_alloc(rdev);
  5447. if (ret)
  5448. return ret;
  5449. /* disable irqs */
  5450. cik_disable_interrupts(rdev);
  5451. /* init rlc */
  5452. ret = cik_rlc_resume(rdev);
  5453. if (ret) {
  5454. r600_ih_ring_fini(rdev);
  5455. return ret;
  5456. }
  5457. /* setup interrupt control */
  5458. /* XXX this should actually be a bus address, not an MC address. same on older asics */
  5459. WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
  5460. interrupt_cntl = RREG32(INTERRUPT_CNTL);
  5461. /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
  5462. * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
  5463. */
  5464. interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
  5465. /* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
  5466. interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
  5467. WREG32(INTERRUPT_CNTL, interrupt_cntl);
  5468. WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
  5469. rb_bufsz = order_base_2(rdev->ih.ring_size / 4);
  5470. ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
  5471. IH_WPTR_OVERFLOW_CLEAR |
  5472. (rb_bufsz << 1));
  5473. if (rdev->wb.enabled)
  5474. ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;
  5475. /* set the writeback address whether it's enabled or not */
  5476. WREG32(IH_RB_WPTR_ADDR_LO, (rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFFFFFFFC);
  5477. WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFF);
  5478. WREG32(IH_RB_CNTL, ih_rb_cntl);
  5479. /* set rptr, wptr to 0 */
  5480. WREG32(IH_RB_RPTR, 0);
  5481. WREG32(IH_RB_WPTR, 0);
  5482. /* Default settings for IH_CNTL (disabled at first) */
  5483. ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10) | MC_VMID(0);
  5484. /* RPTR_REARM only works if msi's are enabled */
  5485. if (rdev->msi_enabled)
  5486. ih_cntl |= RPTR_REARM;
  5487. WREG32(IH_CNTL, ih_cntl);
  5488. /* force the active interrupt state to all disabled */
  5489. cik_disable_interrupt_state(rdev);
  5490. pci_set_master(rdev->pdev);
  5491. /* enable irqs */
  5492. cik_enable_interrupts(rdev);
  5493. return ret;
  5494. }
  5495. /**
  5496. * cik_irq_set - enable/disable interrupt sources
  5497. *
  5498. * @rdev: radeon_device pointer
  5499. *
  5500. * Enable interrupt sources on the GPU (vblanks, hpd,
  5501. * etc.) (CIK).
  5502. * Returns 0 for success, errors for failure.
  5503. */
  5504. int cik_irq_set(struct radeon_device *rdev)
  5505. {
  5506. u32 cp_int_cntl;
  5507. u32 cp_m1p0, cp_m1p1, cp_m1p2, cp_m1p3;
  5508. u32 cp_m2p0, cp_m2p1, cp_m2p2, cp_m2p3;
  5509. u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
  5510. u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
  5511. u32 grbm_int_cntl = 0;
  5512. u32 dma_cntl, dma_cntl1;
  5513. u32 thermal_int;
  5514. if (!rdev->irq.installed) {
  5515. WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
  5516. return -EINVAL;
  5517. }
  5518. /* don't enable anything if the ih is disabled */
  5519. if (!rdev->ih.enabled) {
  5520. cik_disable_interrupts(rdev);
  5521. /* force the active interrupt state to all disabled */
  5522. cik_disable_interrupt_state(rdev);
  5523. return 0;
  5524. }
  5525. cp_int_cntl = RREG32(CP_INT_CNTL_RING0) &
  5526. (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
  5527. cp_int_cntl |= PRIV_INSTR_INT_ENABLE | PRIV_REG_INT_ENABLE;
  5528. hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
  5529. hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
  5530. hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
  5531. hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
  5532. hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
  5533. hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
  5534. dma_cntl = RREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
  5535. dma_cntl1 = RREG32(SDMA0_CNTL + SDMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
  5536. cp_m1p0 = RREG32(CP_ME1_PIPE0_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
  5537. cp_m1p1 = RREG32(CP_ME1_PIPE1_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
  5538. cp_m1p2 = RREG32(CP_ME1_PIPE2_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
  5539. cp_m1p3 = RREG32(CP_ME1_PIPE3_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
  5540. cp_m2p0 = RREG32(CP_ME2_PIPE0_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
  5541. cp_m2p1 = RREG32(CP_ME2_PIPE1_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
  5542. cp_m2p2 = RREG32(CP_ME2_PIPE2_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
  5543. cp_m2p3 = RREG32(CP_ME2_PIPE3_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
  5544. if (rdev->flags & RADEON_IS_IGP)
  5545. thermal_int = RREG32_SMC(CG_THERMAL_INT_CTRL) &
  5546. ~(THERM_INTH_MASK | THERM_INTL_MASK);
  5547. else
  5548. thermal_int = RREG32_SMC(CG_THERMAL_INT) &
  5549. ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
  5550. /* enable CP interrupts on all rings */
  5551. if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
  5552. DRM_DEBUG("cik_irq_set: sw int gfx\n");
  5553. cp_int_cntl |= TIME_STAMP_INT_ENABLE;
  5554. }
  5555. if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP1_INDEX])) {
  5556. struct radeon_ring *ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
  5557. DRM_DEBUG("si_irq_set: sw int cp1\n");
  5558. if (ring->me == 1) {
  5559. switch (ring->pipe) {
  5560. case 0:
  5561. cp_m1p0 |= TIME_STAMP_INT_ENABLE;
  5562. break;
  5563. case 1:
  5564. cp_m1p1 |= TIME_STAMP_INT_ENABLE;
  5565. break;
  5566. case 2:
  5567. cp_m1p2 |= TIME_STAMP_INT_ENABLE;
  5568. break;
  5569. case 3:
  5570. cp_m1p2 |= TIME_STAMP_INT_ENABLE;
  5571. break;
  5572. default:
  5573. DRM_DEBUG("si_irq_set: sw int cp1 invalid pipe %d\n", ring->pipe);
  5574. break;
  5575. }
  5576. } else if (ring->me == 2) {
  5577. switch (ring->pipe) {
  5578. case 0:
  5579. cp_m2p0 |= TIME_STAMP_INT_ENABLE;
  5580. break;
  5581. case 1:
  5582. cp_m2p1 |= TIME_STAMP_INT_ENABLE;
  5583. break;
  5584. case 2:
  5585. cp_m2p2 |= TIME_STAMP_INT_ENABLE;
  5586. break;
  5587. case 3:
  5588. cp_m2p2 |= TIME_STAMP_INT_ENABLE;
  5589. break;
  5590. default:
  5591. DRM_DEBUG("si_irq_set: sw int cp1 invalid pipe %d\n", ring->pipe);
  5592. break;
  5593. }
  5594. } else {
  5595. DRM_DEBUG("si_irq_set: sw int cp1 invalid me %d\n", ring->me);
  5596. }
  5597. }
  5598. if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP2_INDEX])) {
  5599. struct radeon_ring *ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
  5600. DRM_DEBUG("si_irq_set: sw int cp2\n");
  5601. if (ring->me == 1) {
  5602. switch (ring->pipe) {
  5603. case 0:
  5604. cp_m1p0 |= TIME_STAMP_INT_ENABLE;
  5605. break;
  5606. case 1:
  5607. cp_m1p1 |= TIME_STAMP_INT_ENABLE;
  5608. break;
  5609. case 2:
  5610. cp_m1p2 |= TIME_STAMP_INT_ENABLE;
  5611. break;
  5612. case 3:
  5613. cp_m1p2 |= TIME_STAMP_INT_ENABLE;
  5614. break;
  5615. default:
  5616. DRM_DEBUG("si_irq_set: sw int cp2 invalid pipe %d\n", ring->pipe);
  5617. break;
  5618. }
  5619. } else if (ring->me == 2) {
  5620. switch (ring->pipe) {
  5621. case 0:
  5622. cp_m2p0 |= TIME_STAMP_INT_ENABLE;
  5623. break;
  5624. case 1:
  5625. cp_m2p1 |= TIME_STAMP_INT_ENABLE;
  5626. break;
  5627. case 2:
  5628. cp_m2p2 |= TIME_STAMP_INT_ENABLE;
  5629. break;
  5630. case 3:
  5631. cp_m2p2 |= TIME_STAMP_INT_ENABLE;
  5632. break;
  5633. default:
  5634. DRM_DEBUG("si_irq_set: sw int cp2 invalid pipe %d\n", ring->pipe);
  5635. break;
  5636. }
  5637. } else {
  5638. DRM_DEBUG("si_irq_set: sw int cp2 invalid me %d\n", ring->me);
  5639. }
  5640. }
  5641. if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) {
  5642. DRM_DEBUG("cik_irq_set: sw int dma\n");
  5643. dma_cntl |= TRAP_ENABLE;
  5644. }
  5645. if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_DMA1_INDEX])) {
  5646. DRM_DEBUG("cik_irq_set: sw int dma1\n");
  5647. dma_cntl1 |= TRAP_ENABLE;
  5648. }
  5649. if (rdev->irq.crtc_vblank_int[0] ||
  5650. atomic_read(&rdev->irq.pflip[0])) {
  5651. DRM_DEBUG("cik_irq_set: vblank 0\n");
  5652. crtc1 |= VBLANK_INTERRUPT_MASK;
  5653. }
  5654. if (rdev->irq.crtc_vblank_int[1] ||
  5655. atomic_read(&rdev->irq.pflip[1])) {
  5656. DRM_DEBUG("cik_irq_set: vblank 1\n");
  5657. crtc2 |= VBLANK_INTERRUPT_MASK;
  5658. }
  5659. if (rdev->irq.crtc_vblank_int[2] ||
  5660. atomic_read(&rdev->irq.pflip[2])) {
  5661. DRM_DEBUG("cik_irq_set: vblank 2\n");
  5662. crtc3 |= VBLANK_INTERRUPT_MASK;
  5663. }
  5664. if (rdev->irq.crtc_vblank_int[3] ||
  5665. atomic_read(&rdev->irq.pflip[3])) {
  5666. DRM_DEBUG("cik_irq_set: vblank 3\n");
  5667. crtc4 |= VBLANK_INTERRUPT_MASK;
  5668. }
  5669. if (rdev->irq.crtc_vblank_int[4] ||
  5670. atomic_read(&rdev->irq.pflip[4])) {
  5671. DRM_DEBUG("cik_irq_set: vblank 4\n");
  5672. crtc5 |= VBLANK_INTERRUPT_MASK;
  5673. }
  5674. if (rdev->irq.crtc_vblank_int[5] ||
  5675. atomic_read(&rdev->irq.pflip[5])) {
  5676. DRM_DEBUG("cik_irq_set: vblank 5\n");
  5677. crtc6 |= VBLANK_INTERRUPT_MASK;
  5678. }
  5679. if (rdev->irq.hpd[0]) {
  5680. DRM_DEBUG("cik_irq_set: hpd 1\n");
  5681. hpd1 |= DC_HPDx_INT_EN;
  5682. }
  5683. if (rdev->irq.hpd[1]) {
  5684. DRM_DEBUG("cik_irq_set: hpd 2\n");
  5685. hpd2 |= DC_HPDx_INT_EN;
  5686. }
  5687. if (rdev->irq.hpd[2]) {
  5688. DRM_DEBUG("cik_irq_set: hpd 3\n");
  5689. hpd3 |= DC_HPDx_INT_EN;
  5690. }
  5691. if (rdev->irq.hpd[3]) {
  5692. DRM_DEBUG("cik_irq_set: hpd 4\n");
  5693. hpd4 |= DC_HPDx_INT_EN;
  5694. }
  5695. if (rdev->irq.hpd[4]) {
  5696. DRM_DEBUG("cik_irq_set: hpd 5\n");
  5697. hpd5 |= DC_HPDx_INT_EN;
  5698. }
  5699. if (rdev->irq.hpd[5]) {
  5700. DRM_DEBUG("cik_irq_set: hpd 6\n");
  5701. hpd6 |= DC_HPDx_INT_EN;
  5702. }
  5703. if (rdev->irq.dpm_thermal) {
  5704. DRM_DEBUG("dpm thermal\n");
  5705. if (rdev->flags & RADEON_IS_IGP)
  5706. thermal_int |= THERM_INTH_MASK | THERM_INTL_MASK;
  5707. else
  5708. thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
  5709. }
  5710. WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
  5711. WREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET, dma_cntl);
  5712. WREG32(SDMA0_CNTL + SDMA1_REGISTER_OFFSET, dma_cntl1);
  5713. WREG32(CP_ME1_PIPE0_INT_CNTL, cp_m1p0);
  5714. WREG32(CP_ME1_PIPE1_INT_CNTL, cp_m1p1);
  5715. WREG32(CP_ME1_PIPE2_INT_CNTL, cp_m1p2);
  5716. WREG32(CP_ME1_PIPE3_INT_CNTL, cp_m1p3);
  5717. WREG32(CP_ME2_PIPE0_INT_CNTL, cp_m2p0);
  5718. WREG32(CP_ME2_PIPE1_INT_CNTL, cp_m2p1);
  5719. WREG32(CP_ME2_PIPE2_INT_CNTL, cp_m2p2);
  5720. WREG32(CP_ME2_PIPE3_INT_CNTL, cp_m2p3);
  5721. WREG32(GRBM_INT_CNTL, grbm_int_cntl);
  5722. WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
  5723. WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2);
  5724. if (rdev->num_crtc >= 4) {
  5725. WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3);
  5726. WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4);
  5727. }
  5728. if (rdev->num_crtc >= 6) {
  5729. WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5);
  5730. WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
  5731. }
  5732. WREG32(DC_HPD1_INT_CONTROL, hpd1);
  5733. WREG32(DC_HPD2_INT_CONTROL, hpd2);
  5734. WREG32(DC_HPD3_INT_CONTROL, hpd3);
  5735. WREG32(DC_HPD4_INT_CONTROL, hpd4);
  5736. WREG32(DC_HPD5_INT_CONTROL, hpd5);
  5737. WREG32(DC_HPD6_INT_CONTROL, hpd6);
  5738. if (rdev->flags & RADEON_IS_IGP)
  5739. WREG32_SMC(CG_THERMAL_INT_CTRL, thermal_int);
  5740. else
  5741. WREG32_SMC(CG_THERMAL_INT, thermal_int);
  5742. return 0;
  5743. }
  5744. /**
  5745. * cik_irq_ack - ack interrupt sources
  5746. *
  5747. * @rdev: radeon_device pointer
  5748. *
  5749. * Ack interrupt sources on the GPU (vblanks, hpd,
  5750. * etc.) (CIK). Certain interrupts sources are sw
  5751. * generated and do not require an explicit ack.
  5752. */
  5753. static inline void cik_irq_ack(struct radeon_device *rdev)
  5754. {
  5755. u32 tmp;
  5756. rdev->irq.stat_regs.cik.disp_int = RREG32(DISP_INTERRUPT_STATUS);
  5757. rdev->irq.stat_regs.cik.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
  5758. rdev->irq.stat_regs.cik.disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2);
  5759. rdev->irq.stat_regs.cik.disp_int_cont3 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE3);
  5760. rdev->irq.stat_regs.cik.disp_int_cont4 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE4);
  5761. rdev->irq.stat_regs.cik.disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5);
  5762. rdev->irq.stat_regs.cik.disp_int_cont6 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE6);
  5763. if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VBLANK_INTERRUPT)
  5764. WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK);
  5765. if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VLINE_INTERRUPT)
  5766. WREG32(LB_VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VLINE_ACK);
  5767. if (rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VBLANK_INTERRUPT)
  5768. WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VBLANK_ACK);
  5769. if (rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VLINE_INTERRUPT)
  5770. WREG32(LB_VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK);
  5771. if (rdev->num_crtc >= 4) {
  5772. if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT)
  5773. WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK);
  5774. if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VLINE_INTERRUPT)
  5775. WREG32(LB_VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VLINE_ACK);
  5776. if (rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT)
  5777. WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VBLANK_ACK);
  5778. if (rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VLINE_INTERRUPT)
  5779. WREG32(LB_VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VLINE_ACK);
  5780. }
  5781. if (rdev->num_crtc >= 6) {
  5782. if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT)
  5783. WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK);
  5784. if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VLINE_INTERRUPT)
  5785. WREG32(LB_VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VLINE_ACK);
  5786. if (rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT)
  5787. WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VBLANK_ACK);
  5788. if (rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VLINE_INTERRUPT)
  5789. WREG32(LB_VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VLINE_ACK);
  5790. }
  5791. if (rdev->irq.stat_regs.cik.disp_int & DC_HPD1_INTERRUPT) {
  5792. tmp = RREG32(DC_HPD1_INT_CONTROL);
  5793. tmp |= DC_HPDx_INT_ACK;
  5794. WREG32(DC_HPD1_INT_CONTROL, tmp);
  5795. }
  5796. if (rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_INTERRUPT) {
  5797. tmp = RREG32(DC_HPD2_INT_CONTROL);
  5798. tmp |= DC_HPDx_INT_ACK;
  5799. WREG32(DC_HPD2_INT_CONTROL, tmp);
  5800. }
  5801. if (rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_INTERRUPT) {
  5802. tmp = RREG32(DC_HPD3_INT_CONTROL);
  5803. tmp |= DC_HPDx_INT_ACK;
  5804. WREG32(DC_HPD3_INT_CONTROL, tmp);
  5805. }
  5806. if (rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_INTERRUPT) {
  5807. tmp = RREG32(DC_HPD4_INT_CONTROL);
  5808. tmp |= DC_HPDx_INT_ACK;
  5809. WREG32(DC_HPD4_INT_CONTROL, tmp);
  5810. }
  5811. if (rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_INTERRUPT) {
  5812. tmp = RREG32(DC_HPD5_INT_CONTROL);
  5813. tmp |= DC_HPDx_INT_ACK;
  5814. WREG32(DC_HPD5_INT_CONTROL, tmp);
  5815. }
  5816. if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_INTERRUPT) {
  5817. tmp = RREG32(DC_HPD5_INT_CONTROL);
  5818. tmp |= DC_HPDx_INT_ACK;
  5819. WREG32(DC_HPD6_INT_CONTROL, tmp);
  5820. }
  5821. }
  5822. /**
  5823. * cik_irq_disable - disable interrupts
  5824. *
  5825. * @rdev: radeon_device pointer
  5826. *
  5827. * Disable interrupts on the hw (CIK).
  5828. */
  5829. static void cik_irq_disable(struct radeon_device *rdev)
  5830. {
  5831. cik_disable_interrupts(rdev);
  5832. /* Wait and acknowledge irq */
  5833. mdelay(1);
  5834. cik_irq_ack(rdev);
  5835. cik_disable_interrupt_state(rdev);
  5836. }
  5837. /**
  5838. * cik_irq_disable - disable interrupts for suspend
  5839. *
  5840. * @rdev: radeon_device pointer
  5841. *
  5842. * Disable interrupts and stop the RLC (CIK).
  5843. * Used for suspend.
  5844. */
  5845. static void cik_irq_suspend(struct radeon_device *rdev)
  5846. {
  5847. cik_irq_disable(rdev);
  5848. cik_rlc_stop(rdev);
  5849. }
  5850. /**
  5851. * cik_irq_fini - tear down interrupt support
  5852. *
  5853. * @rdev: radeon_device pointer
  5854. *
  5855. * Disable interrupts on the hw and free the IH ring
  5856. * buffer (CIK).
  5857. * Used for driver unload.
  5858. */
  5859. static void cik_irq_fini(struct radeon_device *rdev)
  5860. {
  5861. cik_irq_suspend(rdev);
  5862. r600_ih_ring_fini(rdev);
  5863. }
  5864. /**
  5865. * cik_get_ih_wptr - get the IH ring buffer wptr
  5866. *
  5867. * @rdev: radeon_device pointer
  5868. *
  5869. * Get the IH ring buffer wptr from either the register
  5870. * or the writeback memory buffer (CIK). Also check for
  5871. * ring buffer overflow and deal with it.
  5872. * Used by cik_irq_process().
  5873. * Returns the value of the wptr.
  5874. */
  5875. static inline u32 cik_get_ih_wptr(struct radeon_device *rdev)
  5876. {
  5877. u32 wptr, tmp;
  5878. if (rdev->wb.enabled)
  5879. wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]);
  5880. else
  5881. wptr = RREG32(IH_RB_WPTR);
  5882. if (wptr & RB_OVERFLOW) {
  5883. /* When a ring buffer overflow happen start parsing interrupt
  5884. * from the last not overwritten vector (wptr + 16). Hopefully
  5885. * this should allow us to catchup.
  5886. */
  5887. dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
  5888. wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
  5889. rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
  5890. tmp = RREG32(IH_RB_CNTL);
  5891. tmp |= IH_WPTR_OVERFLOW_CLEAR;
  5892. WREG32(IH_RB_CNTL, tmp);
  5893. }
  5894. return (wptr & rdev->ih.ptr_mask);
  5895. }
  5896. /* CIK IV Ring
  5897. * Each IV ring entry is 128 bits:
  5898. * [7:0] - interrupt source id
  5899. * [31:8] - reserved
  5900. * [59:32] - interrupt source data
  5901. * [63:60] - reserved
  5902. * [71:64] - RINGID
  5903. * CP:
  5904. * ME_ID [1:0], PIPE_ID[1:0], QUEUE_ID[2:0]
  5905. * QUEUE_ID - for compute, which of the 8 queues owned by the dispatcher
  5906. * - for gfx, hw shader state (0=PS...5=LS, 6=CS)
  5907. * ME_ID - 0 = gfx, 1 = first 4 CS pipes, 2 = second 4 CS pipes
  5908. * PIPE_ID - ME0 0=3D
  5909. * - ME1&2 compute dispatcher (4 pipes each)
  5910. * SDMA:
  5911. * INSTANCE_ID [1:0], QUEUE_ID[1:0]
  5912. * INSTANCE_ID - 0 = sdma0, 1 = sdma1
  5913. * QUEUE_ID - 0 = gfx, 1 = rlc0, 2 = rlc1
  5914. * [79:72] - VMID
  5915. * [95:80] - PASID
  5916. * [127:96] - reserved
  5917. */
  5918. /**
  5919. * cik_irq_process - interrupt handler
  5920. *
  5921. * @rdev: radeon_device pointer
  5922. *
  5923. * Interrupt hander (CIK). Walk the IH ring,
  5924. * ack interrupts and schedule work to handle
  5925. * interrupt events.
  5926. * Returns irq process return code.
  5927. */
  5928. int cik_irq_process(struct radeon_device *rdev)
  5929. {
  5930. struct radeon_ring *cp1_ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
  5931. struct radeon_ring *cp2_ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
  5932. u32 wptr;
  5933. u32 rptr;
  5934. u32 src_id, src_data, ring_id;
  5935. u8 me_id, pipe_id, queue_id;
  5936. u32 ring_index;
  5937. bool queue_hotplug = false;
  5938. bool queue_reset = false;
  5939. u32 addr, status, mc_client;
  5940. bool queue_thermal = false;
  5941. if (!rdev->ih.enabled || rdev->shutdown)
  5942. return IRQ_NONE;
  5943. wptr = cik_get_ih_wptr(rdev);
  5944. restart_ih:
  5945. /* is somebody else already processing irqs? */
  5946. if (atomic_xchg(&rdev->ih.lock, 1))
  5947. return IRQ_NONE;
  5948. rptr = rdev->ih.rptr;
  5949. DRM_DEBUG("cik_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
  5950. /* Order reading of wptr vs. reading of IH ring data */
  5951. rmb();
  5952. /* display interrupts */
  5953. cik_irq_ack(rdev);
  5954. while (rptr != wptr) {
  5955. /* wptr/rptr are in bytes! */
  5956. ring_index = rptr / 4;
  5957. src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
  5958. src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
  5959. ring_id = le32_to_cpu(rdev->ih.ring[ring_index + 2]) & 0xff;
  5960. switch (src_id) {
  5961. case 1: /* D1 vblank/vline */
  5962. switch (src_data) {
  5963. case 0: /* D1 vblank */
  5964. if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VBLANK_INTERRUPT) {
  5965. if (rdev->irq.crtc_vblank_int[0]) {
  5966. drm_handle_vblank(rdev->ddev, 0);
  5967. rdev->pm.vblank_sync = true;
  5968. wake_up(&rdev->irq.vblank_queue);
  5969. }
  5970. if (atomic_read(&rdev->irq.pflip[0]))
  5971. radeon_crtc_handle_flip(rdev, 0);
  5972. rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
  5973. DRM_DEBUG("IH: D1 vblank\n");
  5974. }
  5975. break;
  5976. case 1: /* D1 vline */
  5977. if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VLINE_INTERRUPT) {
  5978. rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VLINE_INTERRUPT;
  5979. DRM_DEBUG("IH: D1 vline\n");
  5980. }
  5981. break;
  5982. default:
  5983. DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
  5984. break;
  5985. }
  5986. break;
  5987. case 2: /* D2 vblank/vline */
  5988. switch (src_data) {
  5989. case 0: /* D2 vblank */
  5990. if (rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
  5991. if (rdev->irq.crtc_vblank_int[1]) {
  5992. drm_handle_vblank(rdev->ddev, 1);
  5993. rdev->pm.vblank_sync = true;
  5994. wake_up(&rdev->irq.vblank_queue);
  5995. }
  5996. if (atomic_read(&rdev->irq.pflip[1]))
  5997. radeon_crtc_handle_flip(rdev, 1);
  5998. rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
  5999. DRM_DEBUG("IH: D2 vblank\n");
  6000. }
  6001. break;
  6002. case 1: /* D2 vline */
  6003. if (rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VLINE_INTERRUPT) {
  6004. rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
  6005. DRM_DEBUG("IH: D2 vline\n");
  6006. }
  6007. break;
  6008. default:
  6009. DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
  6010. break;
  6011. }
  6012. break;
  6013. case 3: /* D3 vblank/vline */
  6014. switch (src_data) {
  6015. case 0: /* D3 vblank */
  6016. if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
  6017. if (rdev->irq.crtc_vblank_int[2]) {
  6018. drm_handle_vblank(rdev->ddev, 2);
  6019. rdev->pm.vblank_sync = true;
  6020. wake_up(&rdev->irq.vblank_queue);
  6021. }
  6022. if (atomic_read(&rdev->irq.pflip[2]))
  6023. radeon_crtc_handle_flip(rdev, 2);
  6024. rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
  6025. DRM_DEBUG("IH: D3 vblank\n");
  6026. }
  6027. break;
  6028. case 1: /* D3 vline */
  6029. if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {
  6030. rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
  6031. DRM_DEBUG("IH: D3 vline\n");
  6032. }
  6033. break;
  6034. default:
  6035. DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
  6036. break;
  6037. }
  6038. break;
  6039. case 4: /* D4 vblank/vline */
  6040. switch (src_data) {
  6041. case 0: /* D4 vblank */
  6042. if (rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
  6043. if (rdev->irq.crtc_vblank_int[3]) {
  6044. drm_handle_vblank(rdev->ddev, 3);
  6045. rdev->pm.vblank_sync = true;
  6046. wake_up(&rdev->irq.vblank_queue);
  6047. }
  6048. if (atomic_read(&rdev->irq.pflip[3]))
  6049. radeon_crtc_handle_flip(rdev, 3);
  6050. rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
  6051. DRM_DEBUG("IH: D4 vblank\n");
  6052. }
  6053. break;
  6054. case 1: /* D4 vline */
  6055. if (rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {
  6056. rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
  6057. DRM_DEBUG("IH: D4 vline\n");
  6058. }
  6059. break;
  6060. default:
  6061. DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
  6062. break;
  6063. }
  6064. break;
  6065. case 5: /* D5 vblank/vline */
  6066. switch (src_data) {
  6067. case 0: /* D5 vblank */
  6068. if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
  6069. if (rdev->irq.crtc_vblank_int[4]) {
  6070. drm_handle_vblank(rdev->ddev, 4);
  6071. rdev->pm.vblank_sync = true;
  6072. wake_up(&rdev->irq.vblank_queue);
  6073. }
  6074. if (atomic_read(&rdev->irq.pflip[4]))
  6075. radeon_crtc_handle_flip(rdev, 4);
  6076. rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
  6077. DRM_DEBUG("IH: D5 vblank\n");
  6078. }
  6079. break;
  6080. case 1: /* D5 vline */
  6081. if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {
  6082. rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
  6083. DRM_DEBUG("IH: D5 vline\n");
  6084. }
  6085. break;
  6086. default:
  6087. DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
  6088. break;
  6089. }
  6090. break;
  6091. case 6: /* D6 vblank/vline */
  6092. switch (src_data) {
  6093. case 0: /* D6 vblank */
  6094. if (rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
  6095. if (rdev->irq.crtc_vblank_int[5]) {
  6096. drm_handle_vblank(rdev->ddev, 5);
  6097. rdev->pm.vblank_sync = true;
  6098. wake_up(&rdev->irq.vblank_queue);
  6099. }
  6100. if (atomic_read(&rdev->irq.pflip[5]))
  6101. radeon_crtc_handle_flip(rdev, 5);
  6102. rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
  6103. DRM_DEBUG("IH: D6 vblank\n");
  6104. }
  6105. break;
  6106. case 1: /* D6 vline */
  6107. if (rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {
  6108. rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
  6109. DRM_DEBUG("IH: D6 vline\n");
  6110. }
  6111. break;
  6112. default:
  6113. DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
  6114. break;
  6115. }
  6116. break;
  6117. case 42: /* HPD hotplug */
  6118. switch (src_data) {
  6119. case 0:
  6120. if (rdev->irq.stat_regs.cik.disp_int & DC_HPD1_INTERRUPT) {
  6121. rdev->irq.stat_regs.cik.disp_int &= ~DC_HPD1_INTERRUPT;
  6122. queue_hotplug = true;
  6123. DRM_DEBUG("IH: HPD1\n");
  6124. }
  6125. break;
  6126. case 1:
  6127. if (rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_INTERRUPT) {
  6128. rdev->irq.stat_regs.cik.disp_int_cont &= ~DC_HPD2_INTERRUPT;
  6129. queue_hotplug = true;
  6130. DRM_DEBUG("IH: HPD2\n");
  6131. }
  6132. break;
  6133. case 2:
  6134. if (rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_INTERRUPT) {
  6135. rdev->irq.stat_regs.cik.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
  6136. queue_hotplug = true;
  6137. DRM_DEBUG("IH: HPD3\n");
  6138. }
  6139. break;
  6140. case 3:
  6141. if (rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_INTERRUPT) {
  6142. rdev->irq.stat_regs.cik.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
  6143. queue_hotplug = true;
  6144. DRM_DEBUG("IH: HPD4\n");
  6145. }
  6146. break;
  6147. case 4:
  6148. if (rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_INTERRUPT) {
  6149. rdev->irq.stat_regs.cik.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
  6150. queue_hotplug = true;
  6151. DRM_DEBUG("IH: HPD5\n");
  6152. }
  6153. break;
  6154. case 5:
  6155. if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_INTERRUPT) {
  6156. rdev->irq.stat_regs.cik.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
  6157. queue_hotplug = true;
  6158. DRM_DEBUG("IH: HPD6\n");
  6159. }
  6160. break;
  6161. default:
  6162. DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
  6163. break;
  6164. }
  6165. break;
  6166. case 124: /* UVD */
  6167. DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
  6168. radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
  6169. break;
  6170. case 146:
  6171. case 147:
  6172. addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR);
  6173. status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS);
  6174. mc_client = RREG32(VM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
  6175. dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
  6176. dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
  6177. addr);
  6178. dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
  6179. status);
  6180. cik_vm_decode_fault(rdev, status, addr, mc_client);
  6181. /* reset addr and status */
  6182. WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
  6183. break;
  6184. case 176: /* GFX RB CP_INT */
  6185. case 177: /* GFX IB CP_INT */
  6186. radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
  6187. break;
  6188. case 181: /* CP EOP event */
  6189. DRM_DEBUG("IH: CP EOP\n");
  6190. /* XXX check the bitfield order! */
  6191. me_id = (ring_id & 0x60) >> 5;
  6192. pipe_id = (ring_id & 0x18) >> 3;
  6193. queue_id = (ring_id & 0x7) >> 0;
  6194. switch (me_id) {
  6195. case 0:
  6196. radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
  6197. break;
  6198. case 1:
  6199. case 2:
  6200. if ((cp1_ring->me == me_id) & (cp1_ring->pipe == pipe_id))
  6201. radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
  6202. if ((cp2_ring->me == me_id) & (cp2_ring->pipe == pipe_id))
  6203. radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
  6204. break;
  6205. }
  6206. break;
  6207. case 184: /* CP Privileged reg access */
  6208. DRM_ERROR("Illegal register access in command stream\n");
  6209. /* XXX check the bitfield order! */
  6210. me_id = (ring_id & 0x60) >> 5;
  6211. pipe_id = (ring_id & 0x18) >> 3;
  6212. queue_id = (ring_id & 0x7) >> 0;
  6213. switch (me_id) {
  6214. case 0:
  6215. /* This results in a full GPU reset, but all we need to do is soft
  6216. * reset the CP for gfx
  6217. */
  6218. queue_reset = true;
  6219. break;
  6220. case 1:
  6221. /* XXX compute */
  6222. queue_reset = true;
  6223. break;
  6224. case 2:
  6225. /* XXX compute */
  6226. queue_reset = true;
  6227. break;
  6228. }
  6229. break;
  6230. case 185: /* CP Privileged inst */
  6231. DRM_ERROR("Illegal instruction in command stream\n");
  6232. /* XXX check the bitfield order! */
  6233. me_id = (ring_id & 0x60) >> 5;
  6234. pipe_id = (ring_id & 0x18) >> 3;
  6235. queue_id = (ring_id & 0x7) >> 0;
  6236. switch (me_id) {
  6237. case 0:
  6238. /* This results in a full GPU reset, but all we need to do is soft
  6239. * reset the CP for gfx
  6240. */
  6241. queue_reset = true;
  6242. break;
  6243. case 1:
  6244. /* XXX compute */
  6245. queue_reset = true;
  6246. break;
  6247. case 2:
  6248. /* XXX compute */
  6249. queue_reset = true;
  6250. break;
  6251. }
  6252. break;
  6253. case 224: /* SDMA trap event */
  6254. /* XXX check the bitfield order! */
  6255. me_id = (ring_id & 0x3) >> 0;
  6256. queue_id = (ring_id & 0xc) >> 2;
  6257. DRM_DEBUG("IH: SDMA trap\n");
  6258. switch (me_id) {
  6259. case 0:
  6260. switch (queue_id) {
  6261. case 0:
  6262. radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
  6263. break;
  6264. case 1:
  6265. /* XXX compute */
  6266. break;
  6267. case 2:
  6268. /* XXX compute */
  6269. break;
  6270. }
  6271. break;
  6272. case 1:
  6273. switch (queue_id) {
  6274. case 0:
  6275. radeon_fence_process(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
  6276. break;
  6277. case 1:
  6278. /* XXX compute */
  6279. break;
  6280. case 2:
  6281. /* XXX compute */
  6282. break;
  6283. }
  6284. break;
  6285. }
  6286. break;
  6287. case 230: /* thermal low to high */
  6288. DRM_DEBUG("IH: thermal low to high\n");
  6289. rdev->pm.dpm.thermal.high_to_low = false;
  6290. queue_thermal = true;
  6291. break;
  6292. case 231: /* thermal high to low */
  6293. DRM_DEBUG("IH: thermal high to low\n");
  6294. rdev->pm.dpm.thermal.high_to_low = true;
  6295. queue_thermal = true;
  6296. break;
  6297. case 233: /* GUI IDLE */
  6298. DRM_DEBUG("IH: GUI idle\n");
  6299. break;
  6300. case 241: /* SDMA Privileged inst */
  6301. case 247: /* SDMA Privileged inst */
  6302. DRM_ERROR("Illegal instruction in SDMA command stream\n");
  6303. /* XXX check the bitfield order! */
  6304. me_id = (ring_id & 0x3) >> 0;
  6305. queue_id = (ring_id & 0xc) >> 2;
  6306. switch (me_id) {
  6307. case 0:
  6308. switch (queue_id) {
  6309. case 0:
  6310. queue_reset = true;
  6311. break;
  6312. case 1:
  6313. /* XXX compute */
  6314. queue_reset = true;
  6315. break;
  6316. case 2:
  6317. /* XXX compute */
  6318. queue_reset = true;
  6319. break;
  6320. }
  6321. break;
  6322. case 1:
  6323. switch (queue_id) {
  6324. case 0:
  6325. queue_reset = true;
  6326. break;
  6327. case 1:
  6328. /* XXX compute */
  6329. queue_reset = true;
  6330. break;
  6331. case 2:
  6332. /* XXX compute */
  6333. queue_reset = true;
  6334. break;
  6335. }
  6336. break;
  6337. }
  6338. break;
  6339. default:
  6340. DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
  6341. break;
  6342. }
  6343. /* wptr/rptr are in bytes! */
  6344. rptr += 16;
  6345. rptr &= rdev->ih.ptr_mask;
  6346. }
  6347. if (queue_hotplug)
  6348. schedule_work(&rdev->hotplug_work);
  6349. if (queue_reset)
  6350. schedule_work(&rdev->reset_work);
  6351. if (queue_thermal)
  6352. schedule_work(&rdev->pm.dpm.thermal.work);
  6353. rdev->ih.rptr = rptr;
  6354. WREG32(IH_RB_RPTR, rdev->ih.rptr);
  6355. atomic_set(&rdev->ih.lock, 0);
  6356. /* make sure wptr hasn't changed while processing */
  6357. wptr = cik_get_ih_wptr(rdev);
  6358. if (wptr != rptr)
  6359. goto restart_ih;
  6360. return IRQ_HANDLED;
  6361. }
  6362. /*
  6363. * startup/shutdown callbacks
  6364. */
  6365. /**
  6366. * cik_startup - program the asic to a functional state
  6367. *
  6368. * @rdev: radeon_device pointer
  6369. *
  6370. * Programs the asic to a functional state (CIK).
  6371. * Called by cik_init() and cik_resume().
  6372. * Returns 0 for success, error for failure.
  6373. */
  6374. static int cik_startup(struct radeon_device *rdev)
  6375. {
  6376. struct radeon_ring *ring;
  6377. int r;
  6378. /* enable pcie gen2/3 link */
  6379. cik_pcie_gen3_enable(rdev);
  6380. /* enable aspm */
  6381. cik_program_aspm(rdev);
  6382. /* scratch needs to be initialized before MC */
  6383. r = r600_vram_scratch_init(rdev);
  6384. if (r)
  6385. return r;
  6386. cik_mc_program(rdev);
  6387. if (rdev->flags & RADEON_IS_IGP) {
  6388. if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
  6389. !rdev->mec_fw || !rdev->sdma_fw || !rdev->rlc_fw) {
  6390. r = cik_init_microcode(rdev);
  6391. if (r) {
  6392. DRM_ERROR("Failed to load firmware!\n");
  6393. return r;
  6394. }
  6395. }
  6396. } else {
  6397. if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
  6398. !rdev->mec_fw || !rdev->sdma_fw || !rdev->rlc_fw ||
  6399. !rdev->mc_fw) {
  6400. r = cik_init_microcode(rdev);
  6401. if (r) {
  6402. DRM_ERROR("Failed to load firmware!\n");
  6403. return r;
  6404. }
  6405. }
  6406. r = ci_mc_load_microcode(rdev);
  6407. if (r) {
  6408. DRM_ERROR("Failed to load MC firmware!\n");
  6409. return r;
  6410. }
  6411. }
  6412. r = cik_pcie_gart_enable(rdev);
  6413. if (r)
  6414. return r;
  6415. cik_gpu_init(rdev);
  6416. /* allocate rlc buffers */
  6417. if (rdev->flags & RADEON_IS_IGP) {
  6418. if (rdev->family == CHIP_KAVERI) {
  6419. rdev->rlc.reg_list = spectre_rlc_save_restore_register_list;
  6420. rdev->rlc.reg_list_size =
  6421. (u32)ARRAY_SIZE(spectre_rlc_save_restore_register_list);
  6422. } else {
  6423. rdev->rlc.reg_list = kalindi_rlc_save_restore_register_list;
  6424. rdev->rlc.reg_list_size =
  6425. (u32)ARRAY_SIZE(kalindi_rlc_save_restore_register_list);
  6426. }
  6427. }
  6428. rdev->rlc.cs_data = ci_cs_data;
  6429. rdev->rlc.cp_table_size = CP_ME_TABLE_SIZE * 5 * 4;
  6430. r = sumo_rlc_init(rdev);
  6431. if (r) {
  6432. DRM_ERROR("Failed to init rlc BOs!\n");
  6433. return r;
  6434. }
  6435. /* allocate wb buffer */
  6436. r = radeon_wb_init(rdev);
  6437. if (r)
  6438. return r;
  6439. /* allocate mec buffers */
  6440. r = cik_mec_init(rdev);
  6441. if (r) {
  6442. DRM_ERROR("Failed to init MEC BOs!\n");
  6443. return r;
  6444. }
  6445. r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
  6446. if (r) {
  6447. dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
  6448. return r;
  6449. }
  6450. r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
  6451. if (r) {
  6452. dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
  6453. return r;
  6454. }
  6455. r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
  6456. if (r) {
  6457. dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
  6458. return r;
  6459. }
  6460. r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
  6461. if (r) {
  6462. dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
  6463. return r;
  6464. }
  6465. r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
  6466. if (r) {
  6467. dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
  6468. return r;
  6469. }
  6470. r = radeon_uvd_resume(rdev);
  6471. if (!r) {
  6472. r = uvd_v4_2_resume(rdev);
  6473. if (!r) {
  6474. r = radeon_fence_driver_start_ring(rdev,
  6475. R600_RING_TYPE_UVD_INDEX);
  6476. if (r)
  6477. dev_err(rdev->dev, "UVD fences init error (%d).\n", r);
  6478. }
  6479. }
  6480. if (r)
  6481. rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
  6482. /* Enable IRQ */
  6483. if (!rdev->irq.installed) {
  6484. r = radeon_irq_kms_init(rdev);
  6485. if (r)
  6486. return r;
  6487. }
  6488. r = cik_irq_init(rdev);
  6489. if (r) {
  6490. DRM_ERROR("radeon: IH init failed (%d).\n", r);
  6491. radeon_irq_kms_fini(rdev);
  6492. return r;
  6493. }
  6494. cik_irq_set(rdev);
  6495. ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
  6496. r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
  6497. CP_RB0_RPTR, CP_RB0_WPTR,
  6498. RADEON_CP_PACKET2);
  6499. if (r)
  6500. return r;
  6501. /* set up the compute queues */
  6502. /* type-2 packets are deprecated on MEC, use type-3 instead */
  6503. ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
  6504. r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP1_RPTR_OFFSET,
  6505. CP_HQD_PQ_RPTR, CP_HQD_PQ_WPTR,
  6506. PACKET3(PACKET3_NOP, 0x3FFF));
  6507. if (r)
  6508. return r;
  6509. ring->me = 1; /* first MEC */
  6510. ring->pipe = 0; /* first pipe */
  6511. ring->queue = 0; /* first queue */
  6512. ring->wptr_offs = CIK_WB_CP1_WPTR_OFFSET;
  6513. /* type-2 packets are deprecated on MEC, use type-3 instead */
  6514. ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
  6515. r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP2_RPTR_OFFSET,
  6516. CP_HQD_PQ_RPTR, CP_HQD_PQ_WPTR,
  6517. PACKET3(PACKET3_NOP, 0x3FFF));
  6518. if (r)
  6519. return r;
  6520. /* dGPU only have 1 MEC */
  6521. ring->me = 1; /* first MEC */
  6522. ring->pipe = 0; /* first pipe */
  6523. ring->queue = 1; /* second queue */
  6524. ring->wptr_offs = CIK_WB_CP2_WPTR_OFFSET;
  6525. ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
  6526. r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
  6527. SDMA0_GFX_RB_RPTR + SDMA0_REGISTER_OFFSET,
  6528. SDMA0_GFX_RB_WPTR + SDMA0_REGISTER_OFFSET,
  6529. SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0));
  6530. if (r)
  6531. return r;
  6532. ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
  6533. r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
  6534. SDMA0_GFX_RB_RPTR + SDMA1_REGISTER_OFFSET,
  6535. SDMA0_GFX_RB_WPTR + SDMA1_REGISTER_OFFSET,
  6536. SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0));
  6537. if (r)
  6538. return r;
  6539. r = cik_cp_resume(rdev);
  6540. if (r)
  6541. return r;
  6542. r = cik_sdma_resume(rdev);
  6543. if (r)
  6544. return r;
  6545. ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
  6546. if (ring->ring_size) {
  6547. r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
  6548. UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
  6549. RADEON_CP_PACKET2);
  6550. if (!r)
  6551. r = uvd_v1_0_init(rdev);
  6552. if (r)
  6553. DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
  6554. }
  6555. r = radeon_ib_pool_init(rdev);
  6556. if (r) {
  6557. dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
  6558. return r;
  6559. }
  6560. r = radeon_vm_manager_init(rdev);
  6561. if (r) {
  6562. dev_err(rdev->dev, "vm manager initialization failed (%d).\n", r);
  6563. return r;
  6564. }
  6565. r = dce6_audio_init(rdev);
  6566. if (r)
  6567. return r;
  6568. return 0;
  6569. }
  6570. /**
  6571. * cik_resume - resume the asic to a functional state
  6572. *
  6573. * @rdev: radeon_device pointer
  6574. *
  6575. * Programs the asic to a functional state (CIK).
  6576. * Called at resume.
  6577. * Returns 0 for success, error for failure.
  6578. */
  6579. int cik_resume(struct radeon_device *rdev)
  6580. {
  6581. int r;
  6582. /* post card */
  6583. atom_asic_init(rdev->mode_info.atom_context);
  6584. /* init golden registers */
  6585. cik_init_golden_registers(rdev);
  6586. rdev->accel_working = true;
  6587. r = cik_startup(rdev);
  6588. if (r) {
  6589. DRM_ERROR("cik startup failed on resume\n");
  6590. rdev->accel_working = false;
  6591. return r;
  6592. }
  6593. return r;
  6594. }
  6595. /**
  6596. * cik_suspend - suspend the asic
  6597. *
  6598. * @rdev: radeon_device pointer
  6599. *
  6600. * Bring the chip into a state suitable for suspend (CIK).
  6601. * Called at suspend.
  6602. * Returns 0 for success.
  6603. */
  6604. int cik_suspend(struct radeon_device *rdev)
  6605. {
  6606. dce6_audio_fini(rdev);
  6607. radeon_vm_manager_fini(rdev);
  6608. cik_cp_enable(rdev, false);
  6609. cik_sdma_enable(rdev, false);
  6610. uvd_v1_0_fini(rdev);
  6611. radeon_uvd_suspend(rdev);
  6612. cik_fini_pg(rdev);
  6613. cik_fini_cg(rdev);
  6614. cik_irq_suspend(rdev);
  6615. radeon_wb_disable(rdev);
  6616. cik_pcie_gart_disable(rdev);
  6617. return 0;
  6618. }
  6619. /* Plan is to move initialization in that function and use
  6620. * helper function so that radeon_device_init pretty much
  6621. * do nothing more than calling asic specific function. This
  6622. * should also allow to remove a bunch of callback function
  6623. * like vram_info.
  6624. */
  6625. /**
  6626. * cik_init - asic specific driver and hw init
  6627. *
  6628. * @rdev: radeon_device pointer
  6629. *
  6630. * Setup asic specific driver variables and program the hw
  6631. * to a functional state (CIK).
  6632. * Called at driver startup.
  6633. * Returns 0 for success, errors for failure.
  6634. */
  6635. int cik_init(struct radeon_device *rdev)
  6636. {
  6637. struct radeon_ring *ring;
  6638. int r;
  6639. /* Read BIOS */
  6640. if (!radeon_get_bios(rdev)) {
  6641. if (ASIC_IS_AVIVO(rdev))
  6642. return -EINVAL;
  6643. }
  6644. /* Must be an ATOMBIOS */
  6645. if (!rdev->is_atom_bios) {
  6646. dev_err(rdev->dev, "Expecting atombios for cayman GPU\n");
  6647. return -EINVAL;
  6648. }
  6649. r = radeon_atombios_init(rdev);
  6650. if (r)
  6651. return r;
  6652. /* Post card if necessary */
  6653. if (!radeon_card_posted(rdev)) {
  6654. if (!rdev->bios) {
  6655. dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
  6656. return -EINVAL;
  6657. }
  6658. DRM_INFO("GPU not posted. posting now...\n");
  6659. atom_asic_init(rdev->mode_info.atom_context);
  6660. }
  6661. /* init golden registers */
  6662. cik_init_golden_registers(rdev);
  6663. /* Initialize scratch registers */
  6664. cik_scratch_init(rdev);
  6665. /* Initialize surface registers */
  6666. radeon_surface_init(rdev);
  6667. /* Initialize clocks */
  6668. radeon_get_clock_info(rdev->ddev);
  6669. /* Fence driver */
  6670. r = radeon_fence_driver_init(rdev);
  6671. if (r)
  6672. return r;
  6673. /* initialize memory controller */
  6674. r = cik_mc_init(rdev);
  6675. if (r)
  6676. return r;
  6677. /* Memory manager */
  6678. r = radeon_bo_init(rdev);
  6679. if (r)
  6680. return r;
  6681. ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
  6682. ring->ring_obj = NULL;
  6683. r600_ring_init(rdev, ring, 1024 * 1024);
  6684. ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
  6685. ring->ring_obj = NULL;
  6686. r600_ring_init(rdev, ring, 1024 * 1024);
  6687. r = radeon_doorbell_get(rdev, &ring->doorbell_page_num);
  6688. if (r)
  6689. return r;
  6690. ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
  6691. ring->ring_obj = NULL;
  6692. r600_ring_init(rdev, ring, 1024 * 1024);
  6693. r = radeon_doorbell_get(rdev, &ring->doorbell_page_num);
  6694. if (r)
  6695. return r;
  6696. ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
  6697. ring->ring_obj = NULL;
  6698. r600_ring_init(rdev, ring, 256 * 1024);
  6699. ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
  6700. ring->ring_obj = NULL;
  6701. r600_ring_init(rdev, ring, 256 * 1024);
  6702. r = radeon_uvd_init(rdev);
  6703. if (!r) {
  6704. ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
  6705. ring->ring_obj = NULL;
  6706. r600_ring_init(rdev, ring, 4096);
  6707. }
  6708. rdev->ih.ring_obj = NULL;
  6709. r600_ih_ring_init(rdev, 64 * 1024);
  6710. r = r600_pcie_gart_init(rdev);
  6711. if (r)
  6712. return r;
  6713. rdev->accel_working = true;
  6714. r = cik_startup(rdev);
  6715. if (r) {
  6716. dev_err(rdev->dev, "disabling GPU acceleration\n");
  6717. cik_cp_fini(rdev);
  6718. cik_sdma_fini(rdev);
  6719. cik_irq_fini(rdev);
  6720. sumo_rlc_fini(rdev);
  6721. cik_mec_fini(rdev);
  6722. radeon_wb_fini(rdev);
  6723. radeon_ib_pool_fini(rdev);
  6724. radeon_vm_manager_fini(rdev);
  6725. radeon_irq_kms_fini(rdev);
  6726. cik_pcie_gart_fini(rdev);
  6727. rdev->accel_working = false;
  6728. }
  6729. /* Don't start up if the MC ucode is missing.
  6730. * The default clocks and voltages before the MC ucode
  6731. * is loaded are not suffient for advanced operations.
  6732. */
  6733. if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) {
  6734. DRM_ERROR("radeon: MC ucode required for NI+.\n");
  6735. return -EINVAL;
  6736. }
  6737. return 0;
  6738. }
  6739. /**
  6740. * cik_fini - asic specific driver and hw fini
  6741. *
  6742. * @rdev: radeon_device pointer
  6743. *
  6744. * Tear down the asic specific driver variables and program the hw
  6745. * to an idle state (CIK).
  6746. * Called at driver unload.
  6747. */
  6748. void cik_fini(struct radeon_device *rdev)
  6749. {
  6750. cik_cp_fini(rdev);
  6751. cik_sdma_fini(rdev);
  6752. cik_fini_pg(rdev);
  6753. cik_fini_cg(rdev);
  6754. cik_irq_fini(rdev);
  6755. sumo_rlc_fini(rdev);
  6756. cik_mec_fini(rdev);
  6757. radeon_wb_fini(rdev);
  6758. radeon_vm_manager_fini(rdev);
  6759. radeon_ib_pool_fini(rdev);
  6760. radeon_irq_kms_fini(rdev);
  6761. uvd_v1_0_fini(rdev);
  6762. radeon_uvd_fini(rdev);
  6763. cik_pcie_gart_fini(rdev);
  6764. r600_vram_scratch_fini(rdev);
  6765. radeon_gem_fini(rdev);
  6766. radeon_fence_driver_fini(rdev);
  6767. radeon_bo_fini(rdev);
  6768. radeon_atombios_fini(rdev);
  6769. kfree(rdev->bios);
  6770. rdev->bios = NULL;
  6771. }
  6772. /* display watermark setup */
  6773. /**
  6774. * dce8_line_buffer_adjust - Set up the line buffer
  6775. *
  6776. * @rdev: radeon_device pointer
  6777. * @radeon_crtc: the selected display controller
  6778. * @mode: the current display mode on the selected display
  6779. * controller
  6780. *
  6781. * Setup up the line buffer allocation for
  6782. * the selected display controller (CIK).
  6783. * Returns the line buffer size in pixels.
  6784. */
  6785. static u32 dce8_line_buffer_adjust(struct radeon_device *rdev,
  6786. struct radeon_crtc *radeon_crtc,
  6787. struct drm_display_mode *mode)
  6788. {
  6789. u32 tmp, buffer_alloc, i;
  6790. u32 pipe_offset = radeon_crtc->crtc_id * 0x20;
  6791. /*
  6792. * Line Buffer Setup
  6793. * There are 6 line buffers, one for each display controllers.
  6794. * There are 3 partitions per LB. Select the number of partitions
  6795. * to enable based on the display width. For display widths larger
  6796. * than 4096, you need use to use 2 display controllers and combine
  6797. * them using the stereo blender.
  6798. */
  6799. if (radeon_crtc->base.enabled && mode) {
  6800. if (mode->crtc_hdisplay < 1920) {
  6801. tmp = 1;
  6802. buffer_alloc = 2;
  6803. } else if (mode->crtc_hdisplay < 2560) {
  6804. tmp = 2;
  6805. buffer_alloc = 2;
  6806. } else if (mode->crtc_hdisplay < 4096) {
  6807. tmp = 0;
  6808. buffer_alloc = (rdev->flags & RADEON_IS_IGP) ? 2 : 4;
  6809. } else {
  6810. DRM_DEBUG_KMS("Mode too big for LB!\n");
  6811. tmp = 0;
  6812. buffer_alloc = (rdev->flags & RADEON_IS_IGP) ? 2 : 4;
  6813. }
  6814. } else {
  6815. tmp = 1;
  6816. buffer_alloc = 0;
  6817. }
  6818. WREG32(LB_MEMORY_CTRL + radeon_crtc->crtc_offset,
  6819. LB_MEMORY_CONFIG(tmp) | LB_MEMORY_SIZE(0x6B0));
  6820. WREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
  6821. DMIF_BUFFERS_ALLOCATED(buffer_alloc));
  6822. for (i = 0; i < rdev->usec_timeout; i++) {
  6823. if (RREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
  6824. DMIF_BUFFERS_ALLOCATED_COMPLETED)
  6825. break;
  6826. udelay(1);
  6827. }
  6828. if (radeon_crtc->base.enabled && mode) {
  6829. switch (tmp) {
  6830. case 0:
  6831. default:
  6832. return 4096 * 2;
  6833. case 1:
  6834. return 1920 * 2;
  6835. case 2:
  6836. return 2560 * 2;
  6837. }
  6838. }
  6839. /* controller not enabled, so no lb used */
  6840. return 0;
  6841. }
  6842. /**
  6843. * cik_get_number_of_dram_channels - get the number of dram channels
  6844. *
  6845. * @rdev: radeon_device pointer
  6846. *
  6847. * Look up the number of video ram channels (CIK).
  6848. * Used for display watermark bandwidth calculations
  6849. * Returns the number of dram channels
  6850. */
  6851. static u32 cik_get_number_of_dram_channels(struct radeon_device *rdev)
  6852. {
  6853. u32 tmp = RREG32(MC_SHARED_CHMAP);
  6854. switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
  6855. case 0:
  6856. default:
  6857. return 1;
  6858. case 1:
  6859. return 2;
  6860. case 2:
  6861. return 4;
  6862. case 3:
  6863. return 8;
  6864. case 4:
  6865. return 3;
  6866. case 5:
  6867. return 6;
  6868. case 6:
  6869. return 10;
  6870. case 7:
  6871. return 12;
  6872. case 8:
  6873. return 16;
  6874. }
  6875. }
  6876. struct dce8_wm_params {
  6877. u32 dram_channels; /* number of dram channels */
  6878. u32 yclk; /* bandwidth per dram data pin in kHz */
  6879. u32 sclk; /* engine clock in kHz */
  6880. u32 disp_clk; /* display clock in kHz */
  6881. u32 src_width; /* viewport width */
  6882. u32 active_time; /* active display time in ns */
  6883. u32 blank_time; /* blank time in ns */
  6884. bool interlaced; /* mode is interlaced */
  6885. fixed20_12 vsc; /* vertical scale ratio */
  6886. u32 num_heads; /* number of active crtcs */
  6887. u32 bytes_per_pixel; /* bytes per pixel display + overlay */
  6888. u32 lb_size; /* line buffer allocated to pipe */
  6889. u32 vtaps; /* vertical scaler taps */
  6890. };
  6891. /**
  6892. * dce8_dram_bandwidth - get the dram bandwidth
  6893. *
  6894. * @wm: watermark calculation data
  6895. *
  6896. * Calculate the raw dram bandwidth (CIK).
  6897. * Used for display watermark bandwidth calculations
  6898. * Returns the dram bandwidth in MBytes/s
  6899. */
  6900. static u32 dce8_dram_bandwidth(struct dce8_wm_params *wm)
  6901. {
  6902. /* Calculate raw DRAM Bandwidth */
  6903. fixed20_12 dram_efficiency; /* 0.7 */
  6904. fixed20_12 yclk, dram_channels, bandwidth;
  6905. fixed20_12 a;
  6906. a.full = dfixed_const(1000);
  6907. yclk.full = dfixed_const(wm->yclk);
  6908. yclk.full = dfixed_div(yclk, a);
  6909. dram_channels.full = dfixed_const(wm->dram_channels * 4);
  6910. a.full = dfixed_const(10);
  6911. dram_efficiency.full = dfixed_const(7);
  6912. dram_efficiency.full = dfixed_div(dram_efficiency, a);
  6913. bandwidth.full = dfixed_mul(dram_channels, yclk);
  6914. bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
  6915. return dfixed_trunc(bandwidth);
  6916. }
  6917. /**
  6918. * dce8_dram_bandwidth_for_display - get the dram bandwidth for display
  6919. *
  6920. * @wm: watermark calculation data
  6921. *
  6922. * Calculate the dram bandwidth used for display (CIK).
  6923. * Used for display watermark bandwidth calculations
  6924. * Returns the dram bandwidth for display in MBytes/s
  6925. */
  6926. static u32 dce8_dram_bandwidth_for_display(struct dce8_wm_params *wm)
  6927. {
  6928. /* Calculate DRAM Bandwidth and the part allocated to display. */
  6929. fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
  6930. fixed20_12 yclk, dram_channels, bandwidth;
  6931. fixed20_12 a;
  6932. a.full = dfixed_const(1000);
  6933. yclk.full = dfixed_const(wm->yclk);
  6934. yclk.full = dfixed_div(yclk, a);
  6935. dram_channels.full = dfixed_const(wm->dram_channels * 4);
  6936. a.full = dfixed_const(10);
  6937. disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
  6938. disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
  6939. bandwidth.full = dfixed_mul(dram_channels, yclk);
  6940. bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
  6941. return dfixed_trunc(bandwidth);
  6942. }
  6943. /**
  6944. * dce8_data_return_bandwidth - get the data return bandwidth
  6945. *
  6946. * @wm: watermark calculation data
  6947. *
  6948. * Calculate the data return bandwidth used for display (CIK).
  6949. * Used for display watermark bandwidth calculations
  6950. * Returns the data return bandwidth in MBytes/s
  6951. */
  6952. static u32 dce8_data_return_bandwidth(struct dce8_wm_params *wm)
  6953. {
  6954. /* Calculate the display Data return Bandwidth */
  6955. fixed20_12 return_efficiency; /* 0.8 */
  6956. fixed20_12 sclk, bandwidth;
  6957. fixed20_12 a;
  6958. a.full = dfixed_const(1000);
  6959. sclk.full = dfixed_const(wm->sclk);
  6960. sclk.full = dfixed_div(sclk, a);
  6961. a.full = dfixed_const(10);
  6962. return_efficiency.full = dfixed_const(8);
  6963. return_efficiency.full = dfixed_div(return_efficiency, a);
  6964. a.full = dfixed_const(32);
  6965. bandwidth.full = dfixed_mul(a, sclk);
  6966. bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
  6967. return dfixed_trunc(bandwidth);
  6968. }
  6969. /**
  6970. * dce8_dmif_request_bandwidth - get the dmif bandwidth
  6971. *
  6972. * @wm: watermark calculation data
  6973. *
  6974. * Calculate the dmif bandwidth used for display (CIK).
  6975. * Used for display watermark bandwidth calculations
  6976. * Returns the dmif bandwidth in MBytes/s
  6977. */
  6978. static u32 dce8_dmif_request_bandwidth(struct dce8_wm_params *wm)
  6979. {
  6980. /* Calculate the DMIF Request Bandwidth */
  6981. fixed20_12 disp_clk_request_efficiency; /* 0.8 */
  6982. fixed20_12 disp_clk, bandwidth;
  6983. fixed20_12 a, b;
  6984. a.full = dfixed_const(1000);
  6985. disp_clk.full = dfixed_const(wm->disp_clk);
  6986. disp_clk.full = dfixed_div(disp_clk, a);
  6987. a.full = dfixed_const(32);
  6988. b.full = dfixed_mul(a, disp_clk);
  6989. a.full = dfixed_const(10);
  6990. disp_clk_request_efficiency.full = dfixed_const(8);
  6991. disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
  6992. bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency);
  6993. return dfixed_trunc(bandwidth);
  6994. }
  6995. /**
  6996. * dce8_available_bandwidth - get the min available bandwidth
  6997. *
  6998. * @wm: watermark calculation data
  6999. *
  7000. * Calculate the min available bandwidth used for display (CIK).
  7001. * Used for display watermark bandwidth calculations
  7002. * Returns the min available bandwidth in MBytes/s
  7003. */
  7004. static u32 dce8_available_bandwidth(struct dce8_wm_params *wm)
  7005. {
  7006. /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
  7007. u32 dram_bandwidth = dce8_dram_bandwidth(wm);
  7008. u32 data_return_bandwidth = dce8_data_return_bandwidth(wm);
  7009. u32 dmif_req_bandwidth = dce8_dmif_request_bandwidth(wm);
  7010. return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
  7011. }
  7012. /**
  7013. * dce8_average_bandwidth - get the average available bandwidth
  7014. *
  7015. * @wm: watermark calculation data
  7016. *
  7017. * Calculate the average available bandwidth used for display (CIK).
  7018. * Used for display watermark bandwidth calculations
  7019. * Returns the average available bandwidth in MBytes/s
  7020. */
  7021. static u32 dce8_average_bandwidth(struct dce8_wm_params *wm)
  7022. {
  7023. /* Calculate the display mode Average Bandwidth
  7024. * DisplayMode should contain the source and destination dimensions,
  7025. * timing, etc.
  7026. */
  7027. fixed20_12 bpp;
  7028. fixed20_12 line_time;
  7029. fixed20_12 src_width;
  7030. fixed20_12 bandwidth;
  7031. fixed20_12 a;
  7032. a.full = dfixed_const(1000);
  7033. line_time.full = dfixed_const(wm->active_time + wm->blank_time);
  7034. line_time.full = dfixed_div(line_time, a);
  7035. bpp.full = dfixed_const(wm->bytes_per_pixel);
  7036. src_width.full = dfixed_const(wm->src_width);
  7037. bandwidth.full = dfixed_mul(src_width, bpp);
  7038. bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
  7039. bandwidth.full = dfixed_div(bandwidth, line_time);
  7040. return dfixed_trunc(bandwidth);
  7041. }
  7042. /**
  7043. * dce8_latency_watermark - get the latency watermark
  7044. *
  7045. * @wm: watermark calculation data
  7046. *
  7047. * Calculate the latency watermark (CIK).
  7048. * Used for display watermark bandwidth calculations
  7049. * Returns the latency watermark in ns
  7050. */
  7051. static u32 dce8_latency_watermark(struct dce8_wm_params *wm)
  7052. {
  7053. /* First calculate the latency in ns */
  7054. u32 mc_latency = 2000; /* 2000 ns. */
  7055. u32 available_bandwidth = dce8_available_bandwidth(wm);
  7056. u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
  7057. u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
  7058. u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
  7059. u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
  7060. (wm->num_heads * cursor_line_pair_return_time);
  7061. u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
  7062. u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
  7063. u32 tmp, dmif_size = 12288;
  7064. fixed20_12 a, b, c;
  7065. if (wm->num_heads == 0)
  7066. return 0;
  7067. a.full = dfixed_const(2);
  7068. b.full = dfixed_const(1);
  7069. if ((wm->vsc.full > a.full) ||
  7070. ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
  7071. (wm->vtaps >= 5) ||
  7072. ((wm->vsc.full >= a.full) && wm->interlaced))
  7073. max_src_lines_per_dst_line = 4;
  7074. else
  7075. max_src_lines_per_dst_line = 2;
  7076. a.full = dfixed_const(available_bandwidth);
  7077. b.full = dfixed_const(wm->num_heads);
  7078. a.full = dfixed_div(a, b);
  7079. b.full = dfixed_const(mc_latency + 512);
  7080. c.full = dfixed_const(wm->disp_clk);
  7081. b.full = dfixed_div(b, c);
  7082. c.full = dfixed_const(dmif_size);
  7083. b.full = dfixed_div(c, b);
  7084. tmp = min(dfixed_trunc(a), dfixed_trunc(b));
  7085. b.full = dfixed_const(1000);
  7086. c.full = dfixed_const(wm->disp_clk);
  7087. b.full = dfixed_div(c, b);
  7088. c.full = dfixed_const(wm->bytes_per_pixel);
  7089. b.full = dfixed_mul(b, c);
  7090. lb_fill_bw = min(tmp, dfixed_trunc(b));
  7091. a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
  7092. b.full = dfixed_const(1000);
  7093. c.full = dfixed_const(lb_fill_bw);
  7094. b.full = dfixed_div(c, b);
  7095. a.full = dfixed_div(a, b);
  7096. line_fill_time = dfixed_trunc(a);
  7097. if (line_fill_time < wm->active_time)
  7098. return latency;
  7099. else
  7100. return latency + (line_fill_time - wm->active_time);
  7101. }
  7102. /**
  7103. * dce8_average_bandwidth_vs_dram_bandwidth_for_display - check
  7104. * average and available dram bandwidth
  7105. *
  7106. * @wm: watermark calculation data
  7107. *
  7108. * Check if the display average bandwidth fits in the display
  7109. * dram bandwidth (CIK).
  7110. * Used for display watermark bandwidth calculations
  7111. * Returns true if the display fits, false if not.
  7112. */
  7113. static bool dce8_average_bandwidth_vs_dram_bandwidth_for_display(struct dce8_wm_params *wm)
  7114. {
  7115. if (dce8_average_bandwidth(wm) <=
  7116. (dce8_dram_bandwidth_for_display(wm) / wm->num_heads))
  7117. return true;
  7118. else
  7119. return false;
  7120. }
  7121. /**
  7122. * dce8_average_bandwidth_vs_available_bandwidth - check
  7123. * average and available bandwidth
  7124. *
  7125. * @wm: watermark calculation data
  7126. *
  7127. * Check if the display average bandwidth fits in the display
  7128. * available bandwidth (CIK).
  7129. * Used for display watermark bandwidth calculations
  7130. * Returns true if the display fits, false if not.
  7131. */
  7132. static bool dce8_average_bandwidth_vs_available_bandwidth(struct dce8_wm_params *wm)
  7133. {
  7134. if (dce8_average_bandwidth(wm) <=
  7135. (dce8_available_bandwidth(wm) / wm->num_heads))
  7136. return true;
  7137. else
  7138. return false;
  7139. }
  7140. /**
  7141. * dce8_check_latency_hiding - check latency hiding
  7142. *
  7143. * @wm: watermark calculation data
  7144. *
  7145. * Check latency hiding (CIK).
  7146. * Used for display watermark bandwidth calculations
  7147. * Returns true if the display fits, false if not.
  7148. */
  7149. static bool dce8_check_latency_hiding(struct dce8_wm_params *wm)
  7150. {
  7151. u32 lb_partitions = wm->lb_size / wm->src_width;
  7152. u32 line_time = wm->active_time + wm->blank_time;
  7153. u32 latency_tolerant_lines;
  7154. u32 latency_hiding;
  7155. fixed20_12 a;
  7156. a.full = dfixed_const(1);
  7157. if (wm->vsc.full > a.full)
  7158. latency_tolerant_lines = 1;
  7159. else {
  7160. if (lb_partitions <= (wm->vtaps + 1))
  7161. latency_tolerant_lines = 1;
  7162. else
  7163. latency_tolerant_lines = 2;
  7164. }
  7165. latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
  7166. if (dce8_latency_watermark(wm) <= latency_hiding)
  7167. return true;
  7168. else
  7169. return false;
  7170. }
  7171. /**
  7172. * dce8_program_watermarks - program display watermarks
  7173. *
  7174. * @rdev: radeon_device pointer
  7175. * @radeon_crtc: the selected display controller
  7176. * @lb_size: line buffer size
  7177. * @num_heads: number of display controllers in use
  7178. *
  7179. * Calculate and program the display watermarks for the
  7180. * selected display controller (CIK).
  7181. */
  7182. static void dce8_program_watermarks(struct radeon_device *rdev,
  7183. struct radeon_crtc *radeon_crtc,
  7184. u32 lb_size, u32 num_heads)
  7185. {
  7186. struct drm_display_mode *mode = &radeon_crtc->base.mode;
  7187. struct dce8_wm_params wm_low, wm_high;
  7188. u32 pixel_period;
  7189. u32 line_time = 0;
  7190. u32 latency_watermark_a = 0, latency_watermark_b = 0;
  7191. u32 tmp, wm_mask;
  7192. if (radeon_crtc->base.enabled && num_heads && mode) {
  7193. pixel_period = 1000000 / (u32)mode->clock;
  7194. line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
  7195. /* watermark for high clocks */
  7196. if ((rdev->pm.pm_method == PM_METHOD_DPM) &&
  7197. rdev->pm.dpm_enabled) {
  7198. wm_high.yclk =
  7199. radeon_dpm_get_mclk(rdev, false) * 10;
  7200. wm_high.sclk =
  7201. radeon_dpm_get_sclk(rdev, false) * 10;
  7202. } else {
  7203. wm_high.yclk = rdev->pm.current_mclk * 10;
  7204. wm_high.sclk = rdev->pm.current_sclk * 10;
  7205. }
  7206. wm_high.disp_clk = mode->clock;
  7207. wm_high.src_width = mode->crtc_hdisplay;
  7208. wm_high.active_time = mode->crtc_hdisplay * pixel_period;
  7209. wm_high.blank_time = line_time - wm_high.active_time;
  7210. wm_high.interlaced = false;
  7211. if (mode->flags & DRM_MODE_FLAG_INTERLACE)
  7212. wm_high.interlaced = true;
  7213. wm_high.vsc = radeon_crtc->vsc;
  7214. wm_high.vtaps = 1;
  7215. if (radeon_crtc->rmx_type != RMX_OFF)
  7216. wm_high.vtaps = 2;
  7217. wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
  7218. wm_high.lb_size = lb_size;
  7219. wm_high.dram_channels = cik_get_number_of_dram_channels(rdev);
  7220. wm_high.num_heads = num_heads;
  7221. /* set for high clocks */
  7222. latency_watermark_a = min(dce8_latency_watermark(&wm_high), (u32)65535);
  7223. /* possibly force display priority to high */
  7224. /* should really do this at mode validation time... */
  7225. if (!dce8_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
  7226. !dce8_average_bandwidth_vs_available_bandwidth(&wm_high) ||
  7227. !dce8_check_latency_hiding(&wm_high) ||
  7228. (rdev->disp_priority == 2)) {
  7229. DRM_DEBUG_KMS("force priority to high\n");
  7230. }
  7231. /* watermark for low clocks */
  7232. if ((rdev->pm.pm_method == PM_METHOD_DPM) &&
  7233. rdev->pm.dpm_enabled) {
  7234. wm_low.yclk =
  7235. radeon_dpm_get_mclk(rdev, true) * 10;
  7236. wm_low.sclk =
  7237. radeon_dpm_get_sclk(rdev, true) * 10;
  7238. } else {
  7239. wm_low.yclk = rdev->pm.current_mclk * 10;
  7240. wm_low.sclk = rdev->pm.current_sclk * 10;
  7241. }
  7242. wm_low.disp_clk = mode->clock;
  7243. wm_low.src_width = mode->crtc_hdisplay;
  7244. wm_low.active_time = mode->crtc_hdisplay * pixel_period;
  7245. wm_low.blank_time = line_time - wm_low.active_time;
  7246. wm_low.interlaced = false;
  7247. if (mode->flags & DRM_MODE_FLAG_INTERLACE)
  7248. wm_low.interlaced = true;
  7249. wm_low.vsc = radeon_crtc->vsc;
  7250. wm_low.vtaps = 1;
  7251. if (radeon_crtc->rmx_type != RMX_OFF)
  7252. wm_low.vtaps = 2;
  7253. wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
  7254. wm_low.lb_size = lb_size;
  7255. wm_low.dram_channels = cik_get_number_of_dram_channels(rdev);
  7256. wm_low.num_heads = num_heads;
  7257. /* set for low clocks */
  7258. latency_watermark_b = min(dce8_latency_watermark(&wm_low), (u32)65535);
  7259. /* possibly force display priority to high */
  7260. /* should really do this at mode validation time... */
  7261. if (!dce8_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
  7262. !dce8_average_bandwidth_vs_available_bandwidth(&wm_low) ||
  7263. !dce8_check_latency_hiding(&wm_low) ||
  7264. (rdev->disp_priority == 2)) {
  7265. DRM_DEBUG_KMS("force priority to high\n");
  7266. }
  7267. }
  7268. /* select wm A */
  7269. wm_mask = RREG32(DPG_WATERMARK_MASK_CONTROL + radeon_crtc->crtc_offset);
  7270. tmp = wm_mask;
  7271. tmp &= ~LATENCY_WATERMARK_MASK(3);
  7272. tmp |= LATENCY_WATERMARK_MASK(1);
  7273. WREG32(DPG_WATERMARK_MASK_CONTROL + radeon_crtc->crtc_offset, tmp);
  7274. WREG32(DPG_PIPE_LATENCY_CONTROL + radeon_crtc->crtc_offset,
  7275. (LATENCY_LOW_WATERMARK(latency_watermark_a) |
  7276. LATENCY_HIGH_WATERMARK(line_time)));
  7277. /* select wm B */
  7278. tmp = RREG32(DPG_WATERMARK_MASK_CONTROL + radeon_crtc->crtc_offset);
  7279. tmp &= ~LATENCY_WATERMARK_MASK(3);
  7280. tmp |= LATENCY_WATERMARK_MASK(2);
  7281. WREG32(DPG_WATERMARK_MASK_CONTROL + radeon_crtc->crtc_offset, tmp);
  7282. WREG32(DPG_PIPE_LATENCY_CONTROL + radeon_crtc->crtc_offset,
  7283. (LATENCY_LOW_WATERMARK(latency_watermark_b) |
  7284. LATENCY_HIGH_WATERMARK(line_time)));
  7285. /* restore original selection */
  7286. WREG32(DPG_WATERMARK_MASK_CONTROL + radeon_crtc->crtc_offset, wm_mask);
  7287. /* save values for DPM */
  7288. radeon_crtc->line_time = line_time;
  7289. radeon_crtc->wm_high = latency_watermark_a;
  7290. radeon_crtc->wm_low = latency_watermark_b;
  7291. }
  7292. /**
  7293. * dce8_bandwidth_update - program display watermarks
  7294. *
  7295. * @rdev: radeon_device pointer
  7296. *
  7297. * Calculate and program the display watermarks and line
  7298. * buffer allocation (CIK).
  7299. */
  7300. void dce8_bandwidth_update(struct radeon_device *rdev)
  7301. {
  7302. struct drm_display_mode *mode = NULL;
  7303. u32 num_heads = 0, lb_size;
  7304. int i;
  7305. radeon_update_display_priority(rdev);
  7306. for (i = 0; i < rdev->num_crtc; i++) {
  7307. if (rdev->mode_info.crtcs[i]->base.enabled)
  7308. num_heads++;
  7309. }
  7310. for (i = 0; i < rdev->num_crtc; i++) {
  7311. mode = &rdev->mode_info.crtcs[i]->base.mode;
  7312. lb_size = dce8_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i], mode);
  7313. dce8_program_watermarks(rdev, rdev->mode_info.crtcs[i], lb_size, num_heads);
  7314. }
  7315. }
  7316. /**
  7317. * cik_get_gpu_clock_counter - return GPU clock counter snapshot
  7318. *
  7319. * @rdev: radeon_device pointer
  7320. *
  7321. * Fetches a GPU clock counter snapshot (SI).
  7322. * Returns the 64 bit clock counter snapshot.
  7323. */
  7324. uint64_t cik_get_gpu_clock_counter(struct radeon_device *rdev)
  7325. {
  7326. uint64_t clock;
  7327. mutex_lock(&rdev->gpu_clock_mutex);
  7328. WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1);
  7329. clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) |
  7330. ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
  7331. mutex_unlock(&rdev->gpu_clock_mutex);
  7332. return clock;
  7333. }
  7334. static int cik_set_uvd_clock(struct radeon_device *rdev, u32 clock,
  7335. u32 cntl_reg, u32 status_reg)
  7336. {
  7337. int r, i;
  7338. struct atom_clock_dividers dividers;
  7339. uint32_t tmp;
  7340. r = radeon_atom_get_clock_dividers(rdev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
  7341. clock, false, &dividers);
  7342. if (r)
  7343. return r;
  7344. tmp = RREG32_SMC(cntl_reg);
  7345. tmp &= ~(DCLK_DIR_CNTL_EN|DCLK_DIVIDER_MASK);
  7346. tmp |= dividers.post_divider;
  7347. WREG32_SMC(cntl_reg, tmp);
  7348. for (i = 0; i < 100; i++) {
  7349. if (RREG32_SMC(status_reg) & DCLK_STATUS)
  7350. break;
  7351. mdelay(10);
  7352. }
  7353. if (i == 100)
  7354. return -ETIMEDOUT;
  7355. return 0;
  7356. }
  7357. int cik_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
  7358. {
  7359. int r = 0;
  7360. r = cik_set_uvd_clock(rdev, vclk, CG_VCLK_CNTL, CG_VCLK_STATUS);
  7361. if (r)
  7362. return r;
  7363. r = cik_set_uvd_clock(rdev, dclk, CG_DCLK_CNTL, CG_DCLK_STATUS);
  7364. return r;
  7365. }
  7366. static void cik_pcie_gen3_enable(struct radeon_device *rdev)
  7367. {
  7368. struct pci_dev *root = rdev->pdev->bus->self;
  7369. int bridge_pos, gpu_pos;
  7370. u32 speed_cntl, mask, current_data_rate;
  7371. int ret, i;
  7372. u16 tmp16;
  7373. if (radeon_pcie_gen2 == 0)
  7374. return;
  7375. if (rdev->flags & RADEON_IS_IGP)
  7376. return;
  7377. if (!(rdev->flags & RADEON_IS_PCIE))
  7378. return;
  7379. ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
  7380. if (ret != 0)
  7381. return;
  7382. if (!(mask & (DRM_PCIE_SPEED_50 | DRM_PCIE_SPEED_80)))
  7383. return;
  7384. speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
  7385. current_data_rate = (speed_cntl & LC_CURRENT_DATA_RATE_MASK) >>
  7386. LC_CURRENT_DATA_RATE_SHIFT;
  7387. if (mask & DRM_PCIE_SPEED_80) {
  7388. if (current_data_rate == 2) {
  7389. DRM_INFO("PCIE gen 3 link speeds already enabled\n");
  7390. return;
  7391. }
  7392. DRM_INFO("enabling PCIE gen 3 link speeds, disable with radeon.pcie_gen2=0\n");
  7393. } else if (mask & DRM_PCIE_SPEED_50) {
  7394. if (current_data_rate == 1) {
  7395. DRM_INFO("PCIE gen 2 link speeds already enabled\n");
  7396. return;
  7397. }
  7398. DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
  7399. }
  7400. bridge_pos = pci_pcie_cap(root);
  7401. if (!bridge_pos)
  7402. return;
  7403. gpu_pos = pci_pcie_cap(rdev->pdev);
  7404. if (!gpu_pos)
  7405. return;
  7406. if (mask & DRM_PCIE_SPEED_80) {
  7407. /* re-try equalization if gen3 is not already enabled */
  7408. if (current_data_rate != 2) {
  7409. u16 bridge_cfg, gpu_cfg;
  7410. u16 bridge_cfg2, gpu_cfg2;
  7411. u32 max_lw, current_lw, tmp;
  7412. pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
  7413. pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
  7414. tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD;
  7415. pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
  7416. tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD;
  7417. pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
  7418. tmp = RREG32_PCIE_PORT(PCIE_LC_STATUS1);
  7419. max_lw = (tmp & LC_DETECTED_LINK_WIDTH_MASK) >> LC_DETECTED_LINK_WIDTH_SHIFT;
  7420. current_lw = (tmp & LC_OPERATING_LINK_WIDTH_MASK) >> LC_OPERATING_LINK_WIDTH_SHIFT;
  7421. if (current_lw < max_lw) {
  7422. tmp = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
  7423. if (tmp & LC_RENEGOTIATION_SUPPORT) {
  7424. tmp &= ~(LC_LINK_WIDTH_MASK | LC_UPCONFIGURE_DIS);
  7425. tmp |= (max_lw << LC_LINK_WIDTH_SHIFT);
  7426. tmp |= LC_UPCONFIGURE_SUPPORT | LC_RENEGOTIATE_EN | LC_RECONFIG_NOW;
  7427. WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, tmp);
  7428. }
  7429. }
  7430. for (i = 0; i < 10; i++) {
  7431. /* check status */
  7432. pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_DEVSTA, &tmp16);
  7433. if (tmp16 & PCI_EXP_DEVSTA_TRPND)
  7434. break;
  7435. pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
  7436. pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
  7437. pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &bridge_cfg2);
  7438. pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &gpu_cfg2);
  7439. tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
  7440. tmp |= LC_SET_QUIESCE;
  7441. WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
  7442. tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
  7443. tmp |= LC_REDO_EQ;
  7444. WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
  7445. mdelay(100);
  7446. /* linkctl */
  7447. pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &tmp16);
  7448. tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
  7449. tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD);
  7450. pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
  7451. pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &tmp16);
  7452. tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
  7453. tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD);
  7454. pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
  7455. /* linkctl2 */
  7456. pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &tmp16);
  7457. tmp16 &= ~((1 << 4) | (7 << 9));
  7458. tmp16 |= (bridge_cfg2 & ((1 << 4) | (7 << 9)));
  7459. pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, tmp16);
  7460. pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
  7461. tmp16 &= ~((1 << 4) | (7 << 9));
  7462. tmp16 |= (gpu_cfg2 & ((1 << 4) | (7 << 9)));
  7463. pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
  7464. tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
  7465. tmp &= ~LC_SET_QUIESCE;
  7466. WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
  7467. }
  7468. }
  7469. }
  7470. /* set the link speed */
  7471. speed_cntl |= LC_FORCE_EN_SW_SPEED_CHANGE | LC_FORCE_DIS_HW_SPEED_CHANGE;
  7472. speed_cntl &= ~LC_FORCE_DIS_SW_SPEED_CHANGE;
  7473. WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
  7474. pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
  7475. tmp16 &= ~0xf;
  7476. if (mask & DRM_PCIE_SPEED_80)
  7477. tmp16 |= 3; /* gen3 */
  7478. else if (mask & DRM_PCIE_SPEED_50)
  7479. tmp16 |= 2; /* gen2 */
  7480. else
  7481. tmp16 |= 1; /* gen1 */
  7482. pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
  7483. speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
  7484. speed_cntl |= LC_INITIATE_LINK_SPEED_CHANGE;
  7485. WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
  7486. for (i = 0; i < rdev->usec_timeout; i++) {
  7487. speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
  7488. if ((speed_cntl & LC_INITIATE_LINK_SPEED_CHANGE) == 0)
  7489. break;
  7490. udelay(1);
  7491. }
  7492. }
  7493. static void cik_program_aspm(struct radeon_device *rdev)
  7494. {
  7495. u32 data, orig;
  7496. bool disable_l0s = false, disable_l1 = false, disable_plloff_in_l1 = false;
  7497. bool disable_clkreq = false;
  7498. if (radeon_aspm == 0)
  7499. return;
  7500. /* XXX double check IGPs */
  7501. if (rdev->flags & RADEON_IS_IGP)
  7502. return;
  7503. if (!(rdev->flags & RADEON_IS_PCIE))
  7504. return;
  7505. orig = data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL);
  7506. data &= ~LC_XMIT_N_FTS_MASK;
  7507. data |= LC_XMIT_N_FTS(0x24) | LC_XMIT_N_FTS_OVERRIDE_EN;
  7508. if (orig != data)
  7509. WREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL, data);
  7510. orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL3);
  7511. data |= LC_GO_TO_RECOVERY;
  7512. if (orig != data)
  7513. WREG32_PCIE_PORT(PCIE_LC_CNTL3, data);
  7514. orig = data = RREG32_PCIE_PORT(PCIE_P_CNTL);
  7515. data |= P_IGNORE_EDB_ERR;
  7516. if (orig != data)
  7517. WREG32_PCIE_PORT(PCIE_P_CNTL, data);
  7518. orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL);
  7519. data &= ~(LC_L0S_INACTIVITY_MASK | LC_L1_INACTIVITY_MASK);
  7520. data |= LC_PMI_TO_L1_DIS;
  7521. if (!disable_l0s)
  7522. data |= LC_L0S_INACTIVITY(7);
  7523. if (!disable_l1) {
  7524. data |= LC_L1_INACTIVITY(7);
  7525. data &= ~LC_PMI_TO_L1_DIS;
  7526. if (orig != data)
  7527. WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
  7528. if (!disable_plloff_in_l1) {
  7529. bool clk_req_support;
  7530. orig = data = RREG32_PCIE_PORT(PB0_PIF_PWRDOWN_0);
  7531. data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
  7532. data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
  7533. if (orig != data)
  7534. WREG32_PCIE_PORT(PB0_PIF_PWRDOWN_0, data);
  7535. orig = data = RREG32_PCIE_PORT(PB0_PIF_PWRDOWN_1);
  7536. data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
  7537. data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
  7538. if (orig != data)
  7539. WREG32_PCIE_PORT(PB0_PIF_PWRDOWN_1, data);
  7540. orig = data = RREG32_PCIE_PORT(PB1_PIF_PWRDOWN_0);
  7541. data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
  7542. data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
  7543. if (orig != data)
  7544. WREG32_PCIE_PORT(PB1_PIF_PWRDOWN_0, data);
  7545. orig = data = RREG32_PCIE_PORT(PB1_PIF_PWRDOWN_1);
  7546. data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
  7547. data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
  7548. if (orig != data)
  7549. WREG32_PCIE_PORT(PB1_PIF_PWRDOWN_1, data);
  7550. orig = data = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
  7551. data &= ~LC_DYN_LANES_PWR_STATE_MASK;
  7552. data |= LC_DYN_LANES_PWR_STATE(3);
  7553. if (orig != data)
  7554. WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, data);
  7555. if (!disable_clkreq) {
  7556. struct pci_dev *root = rdev->pdev->bus->self;
  7557. u32 lnkcap;
  7558. clk_req_support = false;
  7559. pcie_capability_read_dword(root, PCI_EXP_LNKCAP, &lnkcap);
  7560. if (lnkcap & PCI_EXP_LNKCAP_CLKPM)
  7561. clk_req_support = true;
  7562. } else {
  7563. clk_req_support = false;
  7564. }
  7565. if (clk_req_support) {
  7566. orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL2);
  7567. data |= LC_ALLOW_PDWN_IN_L1 | LC_ALLOW_PDWN_IN_L23;
  7568. if (orig != data)
  7569. WREG32_PCIE_PORT(PCIE_LC_CNTL2, data);
  7570. orig = data = RREG32_SMC(THM_CLK_CNTL);
  7571. data &= ~(CMON_CLK_SEL_MASK | TMON_CLK_SEL_MASK);
  7572. data |= CMON_CLK_SEL(1) | TMON_CLK_SEL(1);
  7573. if (orig != data)
  7574. WREG32_SMC(THM_CLK_CNTL, data);
  7575. orig = data = RREG32_SMC(MISC_CLK_CTRL);
  7576. data &= ~(DEEP_SLEEP_CLK_SEL_MASK | ZCLK_SEL_MASK);
  7577. data |= DEEP_SLEEP_CLK_SEL(1) | ZCLK_SEL(1);
  7578. if (orig != data)
  7579. WREG32_SMC(MISC_CLK_CTRL, data);
  7580. orig = data = RREG32_SMC(CG_CLKPIN_CNTL);
  7581. data &= ~BCLK_AS_XCLK;
  7582. if (orig != data)
  7583. WREG32_SMC(CG_CLKPIN_CNTL, data);
  7584. orig = data = RREG32_SMC(CG_CLKPIN_CNTL_2);
  7585. data &= ~FORCE_BIF_REFCLK_EN;
  7586. if (orig != data)
  7587. WREG32_SMC(CG_CLKPIN_CNTL_2, data);
  7588. orig = data = RREG32_SMC(MPLL_BYPASSCLK_SEL);
  7589. data &= ~MPLL_CLKOUT_SEL_MASK;
  7590. data |= MPLL_CLKOUT_SEL(4);
  7591. if (orig != data)
  7592. WREG32_SMC(MPLL_BYPASSCLK_SEL, data);
  7593. }
  7594. }
  7595. } else {
  7596. if (orig != data)
  7597. WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
  7598. }
  7599. orig = data = RREG32_PCIE_PORT(PCIE_CNTL2);
  7600. data |= SLV_MEM_LS_EN | MST_MEM_LS_EN | REPLAY_MEM_LS_EN;
  7601. if (orig != data)
  7602. WREG32_PCIE_PORT(PCIE_CNTL2, data);
  7603. if (!disable_l0s) {
  7604. data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL);
  7605. if((data & LC_N_FTS_MASK) == LC_N_FTS_MASK) {
  7606. data = RREG32_PCIE_PORT(PCIE_LC_STATUS1);
  7607. if ((data & LC_REVERSE_XMIT) && (data & LC_REVERSE_RCVR)) {
  7608. orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL);
  7609. data &= ~LC_L0S_INACTIVITY_MASK;
  7610. if (orig != data)
  7611. WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
  7612. }
  7613. }
  7614. }
  7615. }