cik.c 252 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973697469756976697769786979698069816982698369846985698669876988698969906991699269936994699569966997699869997000700170027003700470057006700770087009701070117012701370147015701670177018701970207021702270237024702570267027702870297030703170327033703470357036703770387039704070417042704370447045704670477048704970507051705270537054705570567057705870597060706170627063706470657066706770687069707070717072707370747075707670777078707970807081708270837084708570867087708870897090709170927093709470957096709770987099710071017102710371047105710671077108710971107111711271137114711571167117711871197120712171227123712471257126712771287129713071317132713371347135713671377138713971407141714271437144714571467147714871497150715171527153715471557156715771587159716071617162716371647165716671677168716971707171717271737174717571767177717871797180718171827183718471857186718771887189719071917192719371947195719671977198719972007201720272037204720572067207720872097210721172127213721472157216721772187219722072217222722372247225722672277228722972307231723272337234723572367237723872397240724172427243724472457246724772487249725072517252725372547255725672577258725972607261726272637264726572667267726872697270727172727273727472757276727772787279728072817282728372847285728672877288728972907291729272937294729572967297729872997300730173027303730473057306730773087309731073117312731373147315731673177318731973207321732273237324732573267327732873297330733173327333733473357336733773387339734073417342734373447345734673477348734973507351735273537354735573567357735873597360736173627363736473657366736773687369737073717372737373747375737673777378737973807381738273837384738573867387738873897390739173927393739473957396739773987399740074017402740374047405740674077408740974107411741274137414741574167417741874197420742174227423742474257426742774287429743074317432743374347435743674377438743974407441744274437444744574467447744874497450745174527453745474557456745774587459746074617462746374647465746674677468746974707471747274737474747574767477747874797480748174827483748474857486748774887489749074917492749374947495749674977498749975007501750275037504750575067507750875097510751175127513751475157516751775187519752075217522752375247525752675277528752975307531753275337534753575367537753875397540754175427543754475457546754775487549755075517552755375547555755675577558755975607561756275637564756575667567756875697570757175727573757475757576757775787579758075817582758375847585758675877588758975907591759275937594759575967597759875997600760176027603760476057606760776087609761076117612761376147615761676177618761976207621762276237624762576267627762876297630763176327633763476357636763776387639764076417642764376447645764676477648764976507651765276537654765576567657765876597660766176627663766476657666766776687669767076717672767376747675767676777678767976807681768276837684768576867687768876897690769176927693769476957696769776987699770077017702770377047705770677077708770977107711771277137714771577167717771877197720772177227723772477257726772777287729773077317732773377347735773677377738773977407741774277437744774577467747774877497750775177527753775477557756775777587759776077617762776377647765776677677768776977707771777277737774777577767777777877797780778177827783778477857786778777887789779077917792779377947795779677977798779978007801780278037804780578067807780878097810781178127813781478157816781778187819782078217822782378247825782678277828782978307831783278337834783578367837783878397840784178427843784478457846784778487849785078517852785378547855785678577858785978607861786278637864786578667867786878697870787178727873787478757876787778787879788078817882788378847885788678877888788978907891789278937894789578967897789878997900790179027903790479057906790779087909791079117912791379147915791679177918791979207921792279237924792579267927792879297930793179327933793479357936793779387939794079417942794379447945794679477948794979507951795279537954795579567957795879597960796179627963796479657966796779687969797079717972797379747975797679777978797979807981798279837984798579867987798879897990799179927993799479957996799779987999800080018002800380048005800680078008800980108011801280138014801580168017801880198020802180228023802480258026802780288029803080318032803380348035803680378038803980408041804280438044804580468047804880498050805180528053805480558056805780588059806080618062806380648065806680678068806980708071807280738074807580768077807880798080808180828083808480858086808780888089809080918092809380948095809680978098809981008101810281038104810581068107810881098110811181128113811481158116811781188119812081218122812381248125812681278128812981308131813281338134813581368137813881398140814181428143814481458146814781488149815081518152815381548155815681578158815981608161816281638164816581668167816881698170817181728173817481758176817781788179818081818182818381848185818681878188818981908191819281938194819581968197819881998200820182028203820482058206820782088209821082118212821382148215821682178218821982208221822282238224822582268227822882298230823182328233823482358236823782388239824082418242824382448245824682478248824982508251825282538254825582568257825882598260826182628263826482658266826782688269827082718272827382748275827682778278827982808281828282838284828582868287828882898290829182928293829482958296829782988299830083018302830383048305830683078308830983108311831283138314831583168317831883198320832183228323832483258326832783288329833083318332833383348335833683378338833983408341834283438344834583468347834883498350835183528353835483558356835783588359836083618362836383648365836683678368836983708371837283738374837583768377837883798380838183828383838483858386838783888389839083918392839383948395839683978398839984008401840284038404840584068407840884098410841184128413841484158416841784188419842084218422842384248425842684278428842984308431843284338434843584368437843884398440844184428443844484458446844784488449845084518452845384548455845684578458845984608461846284638464846584668467846884698470847184728473847484758476847784788479848084818482848384848485848684878488848984908491849284938494849584968497849884998500850185028503850485058506850785088509851085118512851385148515851685178518851985208521852285238524852585268527852885298530853185328533853485358536853785388539854085418542854385448545854685478548854985508551855285538554855585568557855885598560856185628563856485658566856785688569857085718572857385748575857685778578857985808581858285838584858585868587858885898590859185928593859485958596859785988599860086018602860386048605860686078608860986108611861286138614861586168617861886198620862186228623862486258626862786288629863086318632863386348635863686378638863986408641864286438644864586468647864886498650865186528653865486558656865786588659866086618662866386648665866686678668866986708671867286738674867586768677867886798680868186828683868486858686868786888689869086918692869386948695869686978698869987008701870287038704870587068707870887098710871187128713871487158716871787188719872087218722872387248725872687278728872987308731873287338734873587368737873887398740874187428743874487458746874787488749875087518752875387548755875687578758875987608761876287638764876587668767876887698770877187728773877487758776877787788779878087818782878387848785878687878788878987908791879287938794879587968797879887998800880188028803880488058806880788088809881088118812881388148815881688178818881988208821882288238824882588268827882888298830883188328833883488358836883788388839884088418842884388448845884688478848884988508851885288538854885588568857885888598860886188628863886488658866886788688869887088718872887388748875887688778878887988808881888288838884888588868887888888898890889188928893889488958896889788988899890089018902890389048905890689078908890989108911891289138914891589168917891889198920892189228923892489258926892789288929893089318932893389348935893689378938893989408941894289438944894589468947894889498950895189528953895489558956895789588959896089618962896389648965896689678968896989708971
  1. /*
  2. * Copyright 2012 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. * Authors: Alex Deucher
  23. */
  24. #include <linux/firmware.h>
  25. #include <linux/slab.h>
  26. #include <linux/module.h>
  27. #include "drmP.h"
  28. #include "radeon.h"
  29. #include "radeon_asic.h"
  30. #include "cikd.h"
  31. #include "atom.h"
  32. #include "cik_blit_shaders.h"
  33. #include "radeon_ucode.h"
  34. #include "clearstate_ci.h"
  35. MODULE_FIRMWARE("radeon/BONAIRE_pfp.bin");
  36. MODULE_FIRMWARE("radeon/BONAIRE_me.bin");
  37. MODULE_FIRMWARE("radeon/BONAIRE_ce.bin");
  38. MODULE_FIRMWARE("radeon/BONAIRE_mec.bin");
  39. MODULE_FIRMWARE("radeon/BONAIRE_mc.bin");
  40. MODULE_FIRMWARE("radeon/BONAIRE_rlc.bin");
  41. MODULE_FIRMWARE("radeon/BONAIRE_sdma.bin");
  42. MODULE_FIRMWARE("radeon/BONAIRE_smc.bin");
  43. MODULE_FIRMWARE("radeon/HAWAII_pfp.bin");
  44. MODULE_FIRMWARE("radeon/HAWAII_me.bin");
  45. MODULE_FIRMWARE("radeon/HAWAII_ce.bin");
  46. MODULE_FIRMWARE("radeon/HAWAII_mec.bin");
  47. MODULE_FIRMWARE("radeon/HAWAII_mc.bin");
  48. MODULE_FIRMWARE("radeon/HAWAII_rlc.bin");
  49. MODULE_FIRMWARE("radeon/HAWAII_sdma.bin");
  50. MODULE_FIRMWARE("radeon/HAWAII_smc.bin");
  51. MODULE_FIRMWARE("radeon/KAVERI_pfp.bin");
  52. MODULE_FIRMWARE("radeon/KAVERI_me.bin");
  53. MODULE_FIRMWARE("radeon/KAVERI_ce.bin");
  54. MODULE_FIRMWARE("radeon/KAVERI_mec.bin");
  55. MODULE_FIRMWARE("radeon/KAVERI_rlc.bin");
  56. MODULE_FIRMWARE("radeon/KAVERI_sdma.bin");
  57. MODULE_FIRMWARE("radeon/KABINI_pfp.bin");
  58. MODULE_FIRMWARE("radeon/KABINI_me.bin");
  59. MODULE_FIRMWARE("radeon/KABINI_ce.bin");
  60. MODULE_FIRMWARE("radeon/KABINI_mec.bin");
  61. MODULE_FIRMWARE("radeon/KABINI_rlc.bin");
  62. MODULE_FIRMWARE("radeon/KABINI_sdma.bin");
  63. extern int r600_ih_ring_alloc(struct radeon_device *rdev);
  64. extern void r600_ih_ring_fini(struct radeon_device *rdev);
  65. extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save);
  66. extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save);
  67. extern bool evergreen_is_display_hung(struct radeon_device *rdev);
  68. extern void sumo_rlc_fini(struct radeon_device *rdev);
  69. extern int sumo_rlc_init(struct radeon_device *rdev);
  70. extern void si_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
  71. extern void si_rlc_reset(struct radeon_device *rdev);
  72. extern void si_init_uvd_internal_cg(struct radeon_device *rdev);
  73. extern int cik_sdma_resume(struct radeon_device *rdev);
  74. extern void cik_sdma_enable(struct radeon_device *rdev, bool enable);
  75. extern void cik_sdma_fini(struct radeon_device *rdev);
  76. static void cik_rlc_stop(struct radeon_device *rdev);
  77. static void cik_pcie_gen3_enable(struct radeon_device *rdev);
  78. static void cik_program_aspm(struct radeon_device *rdev);
  79. static void cik_init_pg(struct radeon_device *rdev);
  80. static void cik_init_cg(struct radeon_device *rdev);
  81. static void cik_fini_pg(struct radeon_device *rdev);
  82. static void cik_fini_cg(struct radeon_device *rdev);
  83. static void cik_enable_gui_idle_interrupt(struct radeon_device *rdev,
  84. bool enable);
  85. /* get temperature in millidegrees */
  86. int ci_get_temp(struct radeon_device *rdev)
  87. {
  88. u32 temp;
  89. int actual_temp = 0;
  90. temp = (RREG32_SMC(CG_MULT_THERMAL_STATUS) & CTF_TEMP_MASK) >>
  91. CTF_TEMP_SHIFT;
  92. if (temp & 0x200)
  93. actual_temp = 255;
  94. else
  95. actual_temp = temp & 0x1ff;
  96. actual_temp = actual_temp * 1000;
  97. return actual_temp;
  98. }
  99. /* get temperature in millidegrees */
  100. int kv_get_temp(struct radeon_device *rdev)
  101. {
  102. u32 temp;
  103. int actual_temp = 0;
  104. temp = RREG32_SMC(0xC0300E0C);
  105. if (temp)
  106. actual_temp = (temp / 8) - 49;
  107. else
  108. actual_temp = 0;
  109. actual_temp = actual_temp * 1000;
  110. return actual_temp;
  111. }
  112. /*
  113. * Indirect registers accessor
  114. */
  115. u32 cik_pciep_rreg(struct radeon_device *rdev, u32 reg)
  116. {
  117. unsigned long flags;
  118. u32 r;
  119. spin_lock_irqsave(&rdev->pciep_idx_lock, flags);
  120. WREG32(PCIE_INDEX, reg);
  121. (void)RREG32(PCIE_INDEX);
  122. r = RREG32(PCIE_DATA);
  123. spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags);
  124. return r;
  125. }
  126. void cik_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
  127. {
  128. unsigned long flags;
  129. spin_lock_irqsave(&rdev->pciep_idx_lock, flags);
  130. WREG32(PCIE_INDEX, reg);
  131. (void)RREG32(PCIE_INDEX);
  132. WREG32(PCIE_DATA, v);
  133. (void)RREG32(PCIE_DATA);
  134. spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags);
  135. }
  136. static const u32 spectre_rlc_save_restore_register_list[] =
  137. {
  138. (0x0e00 << 16) | (0xc12c >> 2),
  139. 0x00000000,
  140. (0x0e00 << 16) | (0xc140 >> 2),
  141. 0x00000000,
  142. (0x0e00 << 16) | (0xc150 >> 2),
  143. 0x00000000,
  144. (0x0e00 << 16) | (0xc15c >> 2),
  145. 0x00000000,
  146. (0x0e00 << 16) | (0xc168 >> 2),
  147. 0x00000000,
  148. (0x0e00 << 16) | (0xc170 >> 2),
  149. 0x00000000,
  150. (0x0e00 << 16) | (0xc178 >> 2),
  151. 0x00000000,
  152. (0x0e00 << 16) | (0xc204 >> 2),
  153. 0x00000000,
  154. (0x0e00 << 16) | (0xc2b4 >> 2),
  155. 0x00000000,
  156. (0x0e00 << 16) | (0xc2b8 >> 2),
  157. 0x00000000,
  158. (0x0e00 << 16) | (0xc2bc >> 2),
  159. 0x00000000,
  160. (0x0e00 << 16) | (0xc2c0 >> 2),
  161. 0x00000000,
  162. (0x0e00 << 16) | (0x8228 >> 2),
  163. 0x00000000,
  164. (0x0e00 << 16) | (0x829c >> 2),
  165. 0x00000000,
  166. (0x0e00 << 16) | (0x869c >> 2),
  167. 0x00000000,
  168. (0x0600 << 16) | (0x98f4 >> 2),
  169. 0x00000000,
  170. (0x0e00 << 16) | (0x98f8 >> 2),
  171. 0x00000000,
  172. (0x0e00 << 16) | (0x9900 >> 2),
  173. 0x00000000,
  174. (0x0e00 << 16) | (0xc260 >> 2),
  175. 0x00000000,
  176. (0x0e00 << 16) | (0x90e8 >> 2),
  177. 0x00000000,
  178. (0x0e00 << 16) | (0x3c000 >> 2),
  179. 0x00000000,
  180. (0x0e00 << 16) | (0x3c00c >> 2),
  181. 0x00000000,
  182. (0x0e00 << 16) | (0x8c1c >> 2),
  183. 0x00000000,
  184. (0x0e00 << 16) | (0x9700 >> 2),
  185. 0x00000000,
  186. (0x0e00 << 16) | (0xcd20 >> 2),
  187. 0x00000000,
  188. (0x4e00 << 16) | (0xcd20 >> 2),
  189. 0x00000000,
  190. (0x5e00 << 16) | (0xcd20 >> 2),
  191. 0x00000000,
  192. (0x6e00 << 16) | (0xcd20 >> 2),
  193. 0x00000000,
  194. (0x7e00 << 16) | (0xcd20 >> 2),
  195. 0x00000000,
  196. (0x8e00 << 16) | (0xcd20 >> 2),
  197. 0x00000000,
  198. (0x9e00 << 16) | (0xcd20 >> 2),
  199. 0x00000000,
  200. (0xae00 << 16) | (0xcd20 >> 2),
  201. 0x00000000,
  202. (0xbe00 << 16) | (0xcd20 >> 2),
  203. 0x00000000,
  204. (0x0e00 << 16) | (0x89bc >> 2),
  205. 0x00000000,
  206. (0x0e00 << 16) | (0x8900 >> 2),
  207. 0x00000000,
  208. 0x3,
  209. (0x0e00 << 16) | (0xc130 >> 2),
  210. 0x00000000,
  211. (0x0e00 << 16) | (0xc134 >> 2),
  212. 0x00000000,
  213. (0x0e00 << 16) | (0xc1fc >> 2),
  214. 0x00000000,
  215. (0x0e00 << 16) | (0xc208 >> 2),
  216. 0x00000000,
  217. (0x0e00 << 16) | (0xc264 >> 2),
  218. 0x00000000,
  219. (0x0e00 << 16) | (0xc268 >> 2),
  220. 0x00000000,
  221. (0x0e00 << 16) | (0xc26c >> 2),
  222. 0x00000000,
  223. (0x0e00 << 16) | (0xc270 >> 2),
  224. 0x00000000,
  225. (0x0e00 << 16) | (0xc274 >> 2),
  226. 0x00000000,
  227. (0x0e00 << 16) | (0xc278 >> 2),
  228. 0x00000000,
  229. (0x0e00 << 16) | (0xc27c >> 2),
  230. 0x00000000,
  231. (0x0e00 << 16) | (0xc280 >> 2),
  232. 0x00000000,
  233. (0x0e00 << 16) | (0xc284 >> 2),
  234. 0x00000000,
  235. (0x0e00 << 16) | (0xc288 >> 2),
  236. 0x00000000,
  237. (0x0e00 << 16) | (0xc28c >> 2),
  238. 0x00000000,
  239. (0x0e00 << 16) | (0xc290 >> 2),
  240. 0x00000000,
  241. (0x0e00 << 16) | (0xc294 >> 2),
  242. 0x00000000,
  243. (0x0e00 << 16) | (0xc298 >> 2),
  244. 0x00000000,
  245. (0x0e00 << 16) | (0xc29c >> 2),
  246. 0x00000000,
  247. (0x0e00 << 16) | (0xc2a0 >> 2),
  248. 0x00000000,
  249. (0x0e00 << 16) | (0xc2a4 >> 2),
  250. 0x00000000,
  251. (0x0e00 << 16) | (0xc2a8 >> 2),
  252. 0x00000000,
  253. (0x0e00 << 16) | (0xc2ac >> 2),
  254. 0x00000000,
  255. (0x0e00 << 16) | (0xc2b0 >> 2),
  256. 0x00000000,
  257. (0x0e00 << 16) | (0x301d0 >> 2),
  258. 0x00000000,
  259. (0x0e00 << 16) | (0x30238 >> 2),
  260. 0x00000000,
  261. (0x0e00 << 16) | (0x30250 >> 2),
  262. 0x00000000,
  263. (0x0e00 << 16) | (0x30254 >> 2),
  264. 0x00000000,
  265. (0x0e00 << 16) | (0x30258 >> 2),
  266. 0x00000000,
  267. (0x0e00 << 16) | (0x3025c >> 2),
  268. 0x00000000,
  269. (0x4e00 << 16) | (0xc900 >> 2),
  270. 0x00000000,
  271. (0x5e00 << 16) | (0xc900 >> 2),
  272. 0x00000000,
  273. (0x6e00 << 16) | (0xc900 >> 2),
  274. 0x00000000,
  275. (0x7e00 << 16) | (0xc900 >> 2),
  276. 0x00000000,
  277. (0x8e00 << 16) | (0xc900 >> 2),
  278. 0x00000000,
  279. (0x9e00 << 16) | (0xc900 >> 2),
  280. 0x00000000,
  281. (0xae00 << 16) | (0xc900 >> 2),
  282. 0x00000000,
  283. (0xbe00 << 16) | (0xc900 >> 2),
  284. 0x00000000,
  285. (0x4e00 << 16) | (0xc904 >> 2),
  286. 0x00000000,
  287. (0x5e00 << 16) | (0xc904 >> 2),
  288. 0x00000000,
  289. (0x6e00 << 16) | (0xc904 >> 2),
  290. 0x00000000,
  291. (0x7e00 << 16) | (0xc904 >> 2),
  292. 0x00000000,
  293. (0x8e00 << 16) | (0xc904 >> 2),
  294. 0x00000000,
  295. (0x9e00 << 16) | (0xc904 >> 2),
  296. 0x00000000,
  297. (0xae00 << 16) | (0xc904 >> 2),
  298. 0x00000000,
  299. (0xbe00 << 16) | (0xc904 >> 2),
  300. 0x00000000,
  301. (0x4e00 << 16) | (0xc908 >> 2),
  302. 0x00000000,
  303. (0x5e00 << 16) | (0xc908 >> 2),
  304. 0x00000000,
  305. (0x6e00 << 16) | (0xc908 >> 2),
  306. 0x00000000,
  307. (0x7e00 << 16) | (0xc908 >> 2),
  308. 0x00000000,
  309. (0x8e00 << 16) | (0xc908 >> 2),
  310. 0x00000000,
  311. (0x9e00 << 16) | (0xc908 >> 2),
  312. 0x00000000,
  313. (0xae00 << 16) | (0xc908 >> 2),
  314. 0x00000000,
  315. (0xbe00 << 16) | (0xc908 >> 2),
  316. 0x00000000,
  317. (0x4e00 << 16) | (0xc90c >> 2),
  318. 0x00000000,
  319. (0x5e00 << 16) | (0xc90c >> 2),
  320. 0x00000000,
  321. (0x6e00 << 16) | (0xc90c >> 2),
  322. 0x00000000,
  323. (0x7e00 << 16) | (0xc90c >> 2),
  324. 0x00000000,
  325. (0x8e00 << 16) | (0xc90c >> 2),
  326. 0x00000000,
  327. (0x9e00 << 16) | (0xc90c >> 2),
  328. 0x00000000,
  329. (0xae00 << 16) | (0xc90c >> 2),
  330. 0x00000000,
  331. (0xbe00 << 16) | (0xc90c >> 2),
  332. 0x00000000,
  333. (0x4e00 << 16) | (0xc910 >> 2),
  334. 0x00000000,
  335. (0x5e00 << 16) | (0xc910 >> 2),
  336. 0x00000000,
  337. (0x6e00 << 16) | (0xc910 >> 2),
  338. 0x00000000,
  339. (0x7e00 << 16) | (0xc910 >> 2),
  340. 0x00000000,
  341. (0x8e00 << 16) | (0xc910 >> 2),
  342. 0x00000000,
  343. (0x9e00 << 16) | (0xc910 >> 2),
  344. 0x00000000,
  345. (0xae00 << 16) | (0xc910 >> 2),
  346. 0x00000000,
  347. (0xbe00 << 16) | (0xc910 >> 2),
  348. 0x00000000,
  349. (0x0e00 << 16) | (0xc99c >> 2),
  350. 0x00000000,
  351. (0x0e00 << 16) | (0x9834 >> 2),
  352. 0x00000000,
  353. (0x0000 << 16) | (0x30f00 >> 2),
  354. 0x00000000,
  355. (0x0001 << 16) | (0x30f00 >> 2),
  356. 0x00000000,
  357. (0x0000 << 16) | (0x30f04 >> 2),
  358. 0x00000000,
  359. (0x0001 << 16) | (0x30f04 >> 2),
  360. 0x00000000,
  361. (0x0000 << 16) | (0x30f08 >> 2),
  362. 0x00000000,
  363. (0x0001 << 16) | (0x30f08 >> 2),
  364. 0x00000000,
  365. (0x0000 << 16) | (0x30f0c >> 2),
  366. 0x00000000,
  367. (0x0001 << 16) | (0x30f0c >> 2),
  368. 0x00000000,
  369. (0x0600 << 16) | (0x9b7c >> 2),
  370. 0x00000000,
  371. (0x0e00 << 16) | (0x8a14 >> 2),
  372. 0x00000000,
  373. (0x0e00 << 16) | (0x8a18 >> 2),
  374. 0x00000000,
  375. (0x0600 << 16) | (0x30a00 >> 2),
  376. 0x00000000,
  377. (0x0e00 << 16) | (0x8bf0 >> 2),
  378. 0x00000000,
  379. (0x0e00 << 16) | (0x8bcc >> 2),
  380. 0x00000000,
  381. (0x0e00 << 16) | (0x8b24 >> 2),
  382. 0x00000000,
  383. (0x0e00 << 16) | (0x30a04 >> 2),
  384. 0x00000000,
  385. (0x0600 << 16) | (0x30a10 >> 2),
  386. 0x00000000,
  387. (0x0600 << 16) | (0x30a14 >> 2),
  388. 0x00000000,
  389. (0x0600 << 16) | (0x30a18 >> 2),
  390. 0x00000000,
  391. (0x0600 << 16) | (0x30a2c >> 2),
  392. 0x00000000,
  393. (0x0e00 << 16) | (0xc700 >> 2),
  394. 0x00000000,
  395. (0x0e00 << 16) | (0xc704 >> 2),
  396. 0x00000000,
  397. (0x0e00 << 16) | (0xc708 >> 2),
  398. 0x00000000,
  399. (0x0e00 << 16) | (0xc768 >> 2),
  400. 0x00000000,
  401. (0x0400 << 16) | (0xc770 >> 2),
  402. 0x00000000,
  403. (0x0400 << 16) | (0xc774 >> 2),
  404. 0x00000000,
  405. (0x0400 << 16) | (0xc778 >> 2),
  406. 0x00000000,
  407. (0x0400 << 16) | (0xc77c >> 2),
  408. 0x00000000,
  409. (0x0400 << 16) | (0xc780 >> 2),
  410. 0x00000000,
  411. (0x0400 << 16) | (0xc784 >> 2),
  412. 0x00000000,
  413. (0x0400 << 16) | (0xc788 >> 2),
  414. 0x00000000,
  415. (0x0400 << 16) | (0xc78c >> 2),
  416. 0x00000000,
  417. (0x0400 << 16) | (0xc798 >> 2),
  418. 0x00000000,
  419. (0x0400 << 16) | (0xc79c >> 2),
  420. 0x00000000,
  421. (0x0400 << 16) | (0xc7a0 >> 2),
  422. 0x00000000,
  423. (0x0400 << 16) | (0xc7a4 >> 2),
  424. 0x00000000,
  425. (0x0400 << 16) | (0xc7a8 >> 2),
  426. 0x00000000,
  427. (0x0400 << 16) | (0xc7ac >> 2),
  428. 0x00000000,
  429. (0x0400 << 16) | (0xc7b0 >> 2),
  430. 0x00000000,
  431. (0x0400 << 16) | (0xc7b4 >> 2),
  432. 0x00000000,
  433. (0x0e00 << 16) | (0x9100 >> 2),
  434. 0x00000000,
  435. (0x0e00 << 16) | (0x3c010 >> 2),
  436. 0x00000000,
  437. (0x0e00 << 16) | (0x92a8 >> 2),
  438. 0x00000000,
  439. (0x0e00 << 16) | (0x92ac >> 2),
  440. 0x00000000,
  441. (0x0e00 << 16) | (0x92b4 >> 2),
  442. 0x00000000,
  443. (0x0e00 << 16) | (0x92b8 >> 2),
  444. 0x00000000,
  445. (0x0e00 << 16) | (0x92bc >> 2),
  446. 0x00000000,
  447. (0x0e00 << 16) | (0x92c0 >> 2),
  448. 0x00000000,
  449. (0x0e00 << 16) | (0x92c4 >> 2),
  450. 0x00000000,
  451. (0x0e00 << 16) | (0x92c8 >> 2),
  452. 0x00000000,
  453. (0x0e00 << 16) | (0x92cc >> 2),
  454. 0x00000000,
  455. (0x0e00 << 16) | (0x92d0 >> 2),
  456. 0x00000000,
  457. (0x0e00 << 16) | (0x8c00 >> 2),
  458. 0x00000000,
  459. (0x0e00 << 16) | (0x8c04 >> 2),
  460. 0x00000000,
  461. (0x0e00 << 16) | (0x8c20 >> 2),
  462. 0x00000000,
  463. (0x0e00 << 16) | (0x8c38 >> 2),
  464. 0x00000000,
  465. (0x0e00 << 16) | (0x8c3c >> 2),
  466. 0x00000000,
  467. (0x0e00 << 16) | (0xae00 >> 2),
  468. 0x00000000,
  469. (0x0e00 << 16) | (0x9604 >> 2),
  470. 0x00000000,
  471. (0x0e00 << 16) | (0xac08 >> 2),
  472. 0x00000000,
  473. (0x0e00 << 16) | (0xac0c >> 2),
  474. 0x00000000,
  475. (0x0e00 << 16) | (0xac10 >> 2),
  476. 0x00000000,
  477. (0x0e00 << 16) | (0xac14 >> 2),
  478. 0x00000000,
  479. (0x0e00 << 16) | (0xac58 >> 2),
  480. 0x00000000,
  481. (0x0e00 << 16) | (0xac68 >> 2),
  482. 0x00000000,
  483. (0x0e00 << 16) | (0xac6c >> 2),
  484. 0x00000000,
  485. (0x0e00 << 16) | (0xac70 >> 2),
  486. 0x00000000,
  487. (0x0e00 << 16) | (0xac74 >> 2),
  488. 0x00000000,
  489. (0x0e00 << 16) | (0xac78 >> 2),
  490. 0x00000000,
  491. (0x0e00 << 16) | (0xac7c >> 2),
  492. 0x00000000,
  493. (0x0e00 << 16) | (0xac80 >> 2),
  494. 0x00000000,
  495. (0x0e00 << 16) | (0xac84 >> 2),
  496. 0x00000000,
  497. (0x0e00 << 16) | (0xac88 >> 2),
  498. 0x00000000,
  499. (0x0e00 << 16) | (0xac8c >> 2),
  500. 0x00000000,
  501. (0x0e00 << 16) | (0x970c >> 2),
  502. 0x00000000,
  503. (0x0e00 << 16) | (0x9714 >> 2),
  504. 0x00000000,
  505. (0x0e00 << 16) | (0x9718 >> 2),
  506. 0x00000000,
  507. (0x0e00 << 16) | (0x971c >> 2),
  508. 0x00000000,
  509. (0x0e00 << 16) | (0x31068 >> 2),
  510. 0x00000000,
  511. (0x4e00 << 16) | (0x31068 >> 2),
  512. 0x00000000,
  513. (0x5e00 << 16) | (0x31068 >> 2),
  514. 0x00000000,
  515. (0x6e00 << 16) | (0x31068 >> 2),
  516. 0x00000000,
  517. (0x7e00 << 16) | (0x31068 >> 2),
  518. 0x00000000,
  519. (0x8e00 << 16) | (0x31068 >> 2),
  520. 0x00000000,
  521. (0x9e00 << 16) | (0x31068 >> 2),
  522. 0x00000000,
  523. (0xae00 << 16) | (0x31068 >> 2),
  524. 0x00000000,
  525. (0xbe00 << 16) | (0x31068 >> 2),
  526. 0x00000000,
  527. (0x0e00 << 16) | (0xcd10 >> 2),
  528. 0x00000000,
  529. (0x0e00 << 16) | (0xcd14 >> 2),
  530. 0x00000000,
  531. (0x0e00 << 16) | (0x88b0 >> 2),
  532. 0x00000000,
  533. (0x0e00 << 16) | (0x88b4 >> 2),
  534. 0x00000000,
  535. (0x0e00 << 16) | (0x88b8 >> 2),
  536. 0x00000000,
  537. (0x0e00 << 16) | (0x88bc >> 2),
  538. 0x00000000,
  539. (0x0400 << 16) | (0x89c0 >> 2),
  540. 0x00000000,
  541. (0x0e00 << 16) | (0x88c4 >> 2),
  542. 0x00000000,
  543. (0x0e00 << 16) | (0x88c8 >> 2),
  544. 0x00000000,
  545. (0x0e00 << 16) | (0x88d0 >> 2),
  546. 0x00000000,
  547. (0x0e00 << 16) | (0x88d4 >> 2),
  548. 0x00000000,
  549. (0x0e00 << 16) | (0x88d8 >> 2),
  550. 0x00000000,
  551. (0x0e00 << 16) | (0x8980 >> 2),
  552. 0x00000000,
  553. (0x0e00 << 16) | (0x30938 >> 2),
  554. 0x00000000,
  555. (0x0e00 << 16) | (0x3093c >> 2),
  556. 0x00000000,
  557. (0x0e00 << 16) | (0x30940 >> 2),
  558. 0x00000000,
  559. (0x0e00 << 16) | (0x89a0 >> 2),
  560. 0x00000000,
  561. (0x0e00 << 16) | (0x30900 >> 2),
  562. 0x00000000,
  563. (0x0e00 << 16) | (0x30904 >> 2),
  564. 0x00000000,
  565. (0x0e00 << 16) | (0x89b4 >> 2),
  566. 0x00000000,
  567. (0x0e00 << 16) | (0x3c210 >> 2),
  568. 0x00000000,
  569. (0x0e00 << 16) | (0x3c214 >> 2),
  570. 0x00000000,
  571. (0x0e00 << 16) | (0x3c218 >> 2),
  572. 0x00000000,
  573. (0x0e00 << 16) | (0x8904 >> 2),
  574. 0x00000000,
  575. 0x5,
  576. (0x0e00 << 16) | (0x8c28 >> 2),
  577. (0x0e00 << 16) | (0x8c2c >> 2),
  578. (0x0e00 << 16) | (0x8c30 >> 2),
  579. (0x0e00 << 16) | (0x8c34 >> 2),
  580. (0x0e00 << 16) | (0x9600 >> 2),
  581. };
  582. static const u32 kalindi_rlc_save_restore_register_list[] =
  583. {
  584. (0x0e00 << 16) | (0xc12c >> 2),
  585. 0x00000000,
  586. (0x0e00 << 16) | (0xc140 >> 2),
  587. 0x00000000,
  588. (0x0e00 << 16) | (0xc150 >> 2),
  589. 0x00000000,
  590. (0x0e00 << 16) | (0xc15c >> 2),
  591. 0x00000000,
  592. (0x0e00 << 16) | (0xc168 >> 2),
  593. 0x00000000,
  594. (0x0e00 << 16) | (0xc170 >> 2),
  595. 0x00000000,
  596. (0x0e00 << 16) | (0xc204 >> 2),
  597. 0x00000000,
  598. (0x0e00 << 16) | (0xc2b4 >> 2),
  599. 0x00000000,
  600. (0x0e00 << 16) | (0xc2b8 >> 2),
  601. 0x00000000,
  602. (0x0e00 << 16) | (0xc2bc >> 2),
  603. 0x00000000,
  604. (0x0e00 << 16) | (0xc2c0 >> 2),
  605. 0x00000000,
  606. (0x0e00 << 16) | (0x8228 >> 2),
  607. 0x00000000,
  608. (0x0e00 << 16) | (0x829c >> 2),
  609. 0x00000000,
  610. (0x0e00 << 16) | (0x869c >> 2),
  611. 0x00000000,
  612. (0x0600 << 16) | (0x98f4 >> 2),
  613. 0x00000000,
  614. (0x0e00 << 16) | (0x98f8 >> 2),
  615. 0x00000000,
  616. (0x0e00 << 16) | (0x9900 >> 2),
  617. 0x00000000,
  618. (0x0e00 << 16) | (0xc260 >> 2),
  619. 0x00000000,
  620. (0x0e00 << 16) | (0x90e8 >> 2),
  621. 0x00000000,
  622. (0x0e00 << 16) | (0x3c000 >> 2),
  623. 0x00000000,
  624. (0x0e00 << 16) | (0x3c00c >> 2),
  625. 0x00000000,
  626. (0x0e00 << 16) | (0x8c1c >> 2),
  627. 0x00000000,
  628. (0x0e00 << 16) | (0x9700 >> 2),
  629. 0x00000000,
  630. (0x0e00 << 16) | (0xcd20 >> 2),
  631. 0x00000000,
  632. (0x4e00 << 16) | (0xcd20 >> 2),
  633. 0x00000000,
  634. (0x5e00 << 16) | (0xcd20 >> 2),
  635. 0x00000000,
  636. (0x6e00 << 16) | (0xcd20 >> 2),
  637. 0x00000000,
  638. (0x7e00 << 16) | (0xcd20 >> 2),
  639. 0x00000000,
  640. (0x0e00 << 16) | (0x89bc >> 2),
  641. 0x00000000,
  642. (0x0e00 << 16) | (0x8900 >> 2),
  643. 0x00000000,
  644. 0x3,
  645. (0x0e00 << 16) | (0xc130 >> 2),
  646. 0x00000000,
  647. (0x0e00 << 16) | (0xc134 >> 2),
  648. 0x00000000,
  649. (0x0e00 << 16) | (0xc1fc >> 2),
  650. 0x00000000,
  651. (0x0e00 << 16) | (0xc208 >> 2),
  652. 0x00000000,
  653. (0x0e00 << 16) | (0xc264 >> 2),
  654. 0x00000000,
  655. (0x0e00 << 16) | (0xc268 >> 2),
  656. 0x00000000,
  657. (0x0e00 << 16) | (0xc26c >> 2),
  658. 0x00000000,
  659. (0x0e00 << 16) | (0xc270 >> 2),
  660. 0x00000000,
  661. (0x0e00 << 16) | (0xc274 >> 2),
  662. 0x00000000,
  663. (0x0e00 << 16) | (0xc28c >> 2),
  664. 0x00000000,
  665. (0x0e00 << 16) | (0xc290 >> 2),
  666. 0x00000000,
  667. (0x0e00 << 16) | (0xc294 >> 2),
  668. 0x00000000,
  669. (0x0e00 << 16) | (0xc298 >> 2),
  670. 0x00000000,
  671. (0x0e00 << 16) | (0xc2a0 >> 2),
  672. 0x00000000,
  673. (0x0e00 << 16) | (0xc2a4 >> 2),
  674. 0x00000000,
  675. (0x0e00 << 16) | (0xc2a8 >> 2),
  676. 0x00000000,
  677. (0x0e00 << 16) | (0xc2ac >> 2),
  678. 0x00000000,
  679. (0x0e00 << 16) | (0x301d0 >> 2),
  680. 0x00000000,
  681. (0x0e00 << 16) | (0x30238 >> 2),
  682. 0x00000000,
  683. (0x0e00 << 16) | (0x30250 >> 2),
  684. 0x00000000,
  685. (0x0e00 << 16) | (0x30254 >> 2),
  686. 0x00000000,
  687. (0x0e00 << 16) | (0x30258 >> 2),
  688. 0x00000000,
  689. (0x0e00 << 16) | (0x3025c >> 2),
  690. 0x00000000,
  691. (0x4e00 << 16) | (0xc900 >> 2),
  692. 0x00000000,
  693. (0x5e00 << 16) | (0xc900 >> 2),
  694. 0x00000000,
  695. (0x6e00 << 16) | (0xc900 >> 2),
  696. 0x00000000,
  697. (0x7e00 << 16) | (0xc900 >> 2),
  698. 0x00000000,
  699. (0x4e00 << 16) | (0xc904 >> 2),
  700. 0x00000000,
  701. (0x5e00 << 16) | (0xc904 >> 2),
  702. 0x00000000,
  703. (0x6e00 << 16) | (0xc904 >> 2),
  704. 0x00000000,
  705. (0x7e00 << 16) | (0xc904 >> 2),
  706. 0x00000000,
  707. (0x4e00 << 16) | (0xc908 >> 2),
  708. 0x00000000,
  709. (0x5e00 << 16) | (0xc908 >> 2),
  710. 0x00000000,
  711. (0x6e00 << 16) | (0xc908 >> 2),
  712. 0x00000000,
  713. (0x7e00 << 16) | (0xc908 >> 2),
  714. 0x00000000,
  715. (0x4e00 << 16) | (0xc90c >> 2),
  716. 0x00000000,
  717. (0x5e00 << 16) | (0xc90c >> 2),
  718. 0x00000000,
  719. (0x6e00 << 16) | (0xc90c >> 2),
  720. 0x00000000,
  721. (0x7e00 << 16) | (0xc90c >> 2),
  722. 0x00000000,
  723. (0x4e00 << 16) | (0xc910 >> 2),
  724. 0x00000000,
  725. (0x5e00 << 16) | (0xc910 >> 2),
  726. 0x00000000,
  727. (0x6e00 << 16) | (0xc910 >> 2),
  728. 0x00000000,
  729. (0x7e00 << 16) | (0xc910 >> 2),
  730. 0x00000000,
  731. (0x0e00 << 16) | (0xc99c >> 2),
  732. 0x00000000,
  733. (0x0e00 << 16) | (0x9834 >> 2),
  734. 0x00000000,
  735. (0x0000 << 16) | (0x30f00 >> 2),
  736. 0x00000000,
  737. (0x0000 << 16) | (0x30f04 >> 2),
  738. 0x00000000,
  739. (0x0000 << 16) | (0x30f08 >> 2),
  740. 0x00000000,
  741. (0x0000 << 16) | (0x30f0c >> 2),
  742. 0x00000000,
  743. (0x0600 << 16) | (0x9b7c >> 2),
  744. 0x00000000,
  745. (0x0e00 << 16) | (0x8a14 >> 2),
  746. 0x00000000,
  747. (0x0e00 << 16) | (0x8a18 >> 2),
  748. 0x00000000,
  749. (0x0600 << 16) | (0x30a00 >> 2),
  750. 0x00000000,
  751. (0x0e00 << 16) | (0x8bf0 >> 2),
  752. 0x00000000,
  753. (0x0e00 << 16) | (0x8bcc >> 2),
  754. 0x00000000,
  755. (0x0e00 << 16) | (0x8b24 >> 2),
  756. 0x00000000,
  757. (0x0e00 << 16) | (0x30a04 >> 2),
  758. 0x00000000,
  759. (0x0600 << 16) | (0x30a10 >> 2),
  760. 0x00000000,
  761. (0x0600 << 16) | (0x30a14 >> 2),
  762. 0x00000000,
  763. (0x0600 << 16) | (0x30a18 >> 2),
  764. 0x00000000,
  765. (0x0600 << 16) | (0x30a2c >> 2),
  766. 0x00000000,
  767. (0x0e00 << 16) | (0xc700 >> 2),
  768. 0x00000000,
  769. (0x0e00 << 16) | (0xc704 >> 2),
  770. 0x00000000,
  771. (0x0e00 << 16) | (0xc708 >> 2),
  772. 0x00000000,
  773. (0x0e00 << 16) | (0xc768 >> 2),
  774. 0x00000000,
  775. (0x0400 << 16) | (0xc770 >> 2),
  776. 0x00000000,
  777. (0x0400 << 16) | (0xc774 >> 2),
  778. 0x00000000,
  779. (0x0400 << 16) | (0xc798 >> 2),
  780. 0x00000000,
  781. (0x0400 << 16) | (0xc79c >> 2),
  782. 0x00000000,
  783. (0x0e00 << 16) | (0x9100 >> 2),
  784. 0x00000000,
  785. (0x0e00 << 16) | (0x3c010 >> 2),
  786. 0x00000000,
  787. (0x0e00 << 16) | (0x8c00 >> 2),
  788. 0x00000000,
  789. (0x0e00 << 16) | (0x8c04 >> 2),
  790. 0x00000000,
  791. (0x0e00 << 16) | (0x8c20 >> 2),
  792. 0x00000000,
  793. (0x0e00 << 16) | (0x8c38 >> 2),
  794. 0x00000000,
  795. (0x0e00 << 16) | (0x8c3c >> 2),
  796. 0x00000000,
  797. (0x0e00 << 16) | (0xae00 >> 2),
  798. 0x00000000,
  799. (0x0e00 << 16) | (0x9604 >> 2),
  800. 0x00000000,
  801. (0x0e00 << 16) | (0xac08 >> 2),
  802. 0x00000000,
  803. (0x0e00 << 16) | (0xac0c >> 2),
  804. 0x00000000,
  805. (0x0e00 << 16) | (0xac10 >> 2),
  806. 0x00000000,
  807. (0x0e00 << 16) | (0xac14 >> 2),
  808. 0x00000000,
  809. (0x0e00 << 16) | (0xac58 >> 2),
  810. 0x00000000,
  811. (0x0e00 << 16) | (0xac68 >> 2),
  812. 0x00000000,
  813. (0x0e00 << 16) | (0xac6c >> 2),
  814. 0x00000000,
  815. (0x0e00 << 16) | (0xac70 >> 2),
  816. 0x00000000,
  817. (0x0e00 << 16) | (0xac74 >> 2),
  818. 0x00000000,
  819. (0x0e00 << 16) | (0xac78 >> 2),
  820. 0x00000000,
  821. (0x0e00 << 16) | (0xac7c >> 2),
  822. 0x00000000,
  823. (0x0e00 << 16) | (0xac80 >> 2),
  824. 0x00000000,
  825. (0x0e00 << 16) | (0xac84 >> 2),
  826. 0x00000000,
  827. (0x0e00 << 16) | (0xac88 >> 2),
  828. 0x00000000,
  829. (0x0e00 << 16) | (0xac8c >> 2),
  830. 0x00000000,
  831. (0x0e00 << 16) | (0x970c >> 2),
  832. 0x00000000,
  833. (0x0e00 << 16) | (0x9714 >> 2),
  834. 0x00000000,
  835. (0x0e00 << 16) | (0x9718 >> 2),
  836. 0x00000000,
  837. (0x0e00 << 16) | (0x971c >> 2),
  838. 0x00000000,
  839. (0x0e00 << 16) | (0x31068 >> 2),
  840. 0x00000000,
  841. (0x4e00 << 16) | (0x31068 >> 2),
  842. 0x00000000,
  843. (0x5e00 << 16) | (0x31068 >> 2),
  844. 0x00000000,
  845. (0x6e00 << 16) | (0x31068 >> 2),
  846. 0x00000000,
  847. (0x7e00 << 16) | (0x31068 >> 2),
  848. 0x00000000,
  849. (0x0e00 << 16) | (0xcd10 >> 2),
  850. 0x00000000,
  851. (0x0e00 << 16) | (0xcd14 >> 2),
  852. 0x00000000,
  853. (0x0e00 << 16) | (0x88b0 >> 2),
  854. 0x00000000,
  855. (0x0e00 << 16) | (0x88b4 >> 2),
  856. 0x00000000,
  857. (0x0e00 << 16) | (0x88b8 >> 2),
  858. 0x00000000,
  859. (0x0e00 << 16) | (0x88bc >> 2),
  860. 0x00000000,
  861. (0x0400 << 16) | (0x89c0 >> 2),
  862. 0x00000000,
  863. (0x0e00 << 16) | (0x88c4 >> 2),
  864. 0x00000000,
  865. (0x0e00 << 16) | (0x88c8 >> 2),
  866. 0x00000000,
  867. (0x0e00 << 16) | (0x88d0 >> 2),
  868. 0x00000000,
  869. (0x0e00 << 16) | (0x88d4 >> 2),
  870. 0x00000000,
  871. (0x0e00 << 16) | (0x88d8 >> 2),
  872. 0x00000000,
  873. (0x0e00 << 16) | (0x8980 >> 2),
  874. 0x00000000,
  875. (0x0e00 << 16) | (0x30938 >> 2),
  876. 0x00000000,
  877. (0x0e00 << 16) | (0x3093c >> 2),
  878. 0x00000000,
  879. (0x0e00 << 16) | (0x30940 >> 2),
  880. 0x00000000,
  881. (0x0e00 << 16) | (0x89a0 >> 2),
  882. 0x00000000,
  883. (0x0e00 << 16) | (0x30900 >> 2),
  884. 0x00000000,
  885. (0x0e00 << 16) | (0x30904 >> 2),
  886. 0x00000000,
  887. (0x0e00 << 16) | (0x89b4 >> 2),
  888. 0x00000000,
  889. (0x0e00 << 16) | (0x3e1fc >> 2),
  890. 0x00000000,
  891. (0x0e00 << 16) | (0x3c210 >> 2),
  892. 0x00000000,
  893. (0x0e00 << 16) | (0x3c214 >> 2),
  894. 0x00000000,
  895. (0x0e00 << 16) | (0x3c218 >> 2),
  896. 0x00000000,
  897. (0x0e00 << 16) | (0x8904 >> 2),
  898. 0x00000000,
  899. 0x5,
  900. (0x0e00 << 16) | (0x8c28 >> 2),
  901. (0x0e00 << 16) | (0x8c2c >> 2),
  902. (0x0e00 << 16) | (0x8c30 >> 2),
  903. (0x0e00 << 16) | (0x8c34 >> 2),
  904. (0x0e00 << 16) | (0x9600 >> 2),
  905. };
  906. static const u32 bonaire_golden_spm_registers[] =
  907. {
  908. 0x30800, 0xe0ffffff, 0xe0000000
  909. };
  910. static const u32 bonaire_golden_common_registers[] =
  911. {
  912. 0xc770, 0xffffffff, 0x00000800,
  913. 0xc774, 0xffffffff, 0x00000800,
  914. 0xc798, 0xffffffff, 0x00007fbf,
  915. 0xc79c, 0xffffffff, 0x00007faf
  916. };
  917. static const u32 bonaire_golden_registers[] =
  918. {
  919. 0x3354, 0x00000333, 0x00000333,
  920. 0x3350, 0x000c0fc0, 0x00040200,
  921. 0x9a10, 0x00010000, 0x00058208,
  922. 0x3c000, 0xffff1fff, 0x00140000,
  923. 0x3c200, 0xfdfc0fff, 0x00000100,
  924. 0x3c234, 0x40000000, 0x40000200,
  925. 0x9830, 0xffffffff, 0x00000000,
  926. 0x9834, 0xf00fffff, 0x00000400,
  927. 0x9838, 0x0002021c, 0x00020200,
  928. 0xc78, 0x00000080, 0x00000000,
  929. 0x5bb0, 0x000000f0, 0x00000070,
  930. 0x5bc0, 0xf0311fff, 0x80300000,
  931. 0x98f8, 0x73773777, 0x12010001,
  932. 0x350c, 0x00810000, 0x408af000,
  933. 0x7030, 0x31000111, 0x00000011,
  934. 0x2f48, 0x73773777, 0x12010001,
  935. 0x220c, 0x00007fb6, 0x0021a1b1,
  936. 0x2210, 0x00007fb6, 0x002021b1,
  937. 0x2180, 0x00007fb6, 0x00002191,
  938. 0x2218, 0x00007fb6, 0x002121b1,
  939. 0x221c, 0x00007fb6, 0x002021b1,
  940. 0x21dc, 0x00007fb6, 0x00002191,
  941. 0x21e0, 0x00007fb6, 0x00002191,
  942. 0x3628, 0x0000003f, 0x0000000a,
  943. 0x362c, 0x0000003f, 0x0000000a,
  944. 0x2ae4, 0x00073ffe, 0x000022a2,
  945. 0x240c, 0x000007ff, 0x00000000,
  946. 0x8a14, 0xf000003f, 0x00000007,
  947. 0x8bf0, 0x00002001, 0x00000001,
  948. 0x8b24, 0xffffffff, 0x00ffffff,
  949. 0x30a04, 0x0000ff0f, 0x00000000,
  950. 0x28a4c, 0x07ffffff, 0x06000000,
  951. 0x4d8, 0x00000fff, 0x00000100,
  952. 0x3e78, 0x00000001, 0x00000002,
  953. 0x9100, 0x03000000, 0x0362c688,
  954. 0x8c00, 0x000000ff, 0x00000001,
  955. 0xe40, 0x00001fff, 0x00001fff,
  956. 0x9060, 0x0000007f, 0x00000020,
  957. 0x9508, 0x00010000, 0x00010000,
  958. 0xac14, 0x000003ff, 0x000000f3,
  959. 0xac0c, 0xffffffff, 0x00001032
  960. };
  961. static const u32 bonaire_mgcg_cgcg_init[] =
  962. {
  963. 0xc420, 0xffffffff, 0xfffffffc,
  964. 0x30800, 0xffffffff, 0xe0000000,
  965. 0x3c2a0, 0xffffffff, 0x00000100,
  966. 0x3c208, 0xffffffff, 0x00000100,
  967. 0x3c2c0, 0xffffffff, 0xc0000100,
  968. 0x3c2c8, 0xffffffff, 0xc0000100,
  969. 0x3c2c4, 0xffffffff, 0xc0000100,
  970. 0x55e4, 0xffffffff, 0x00600100,
  971. 0x3c280, 0xffffffff, 0x00000100,
  972. 0x3c214, 0xffffffff, 0x06000100,
  973. 0x3c220, 0xffffffff, 0x00000100,
  974. 0x3c218, 0xffffffff, 0x06000100,
  975. 0x3c204, 0xffffffff, 0x00000100,
  976. 0x3c2e0, 0xffffffff, 0x00000100,
  977. 0x3c224, 0xffffffff, 0x00000100,
  978. 0x3c200, 0xffffffff, 0x00000100,
  979. 0x3c230, 0xffffffff, 0x00000100,
  980. 0x3c234, 0xffffffff, 0x00000100,
  981. 0x3c250, 0xffffffff, 0x00000100,
  982. 0x3c254, 0xffffffff, 0x00000100,
  983. 0x3c258, 0xffffffff, 0x00000100,
  984. 0x3c25c, 0xffffffff, 0x00000100,
  985. 0x3c260, 0xffffffff, 0x00000100,
  986. 0x3c27c, 0xffffffff, 0x00000100,
  987. 0x3c278, 0xffffffff, 0x00000100,
  988. 0x3c210, 0xffffffff, 0x06000100,
  989. 0x3c290, 0xffffffff, 0x00000100,
  990. 0x3c274, 0xffffffff, 0x00000100,
  991. 0x3c2b4, 0xffffffff, 0x00000100,
  992. 0x3c2b0, 0xffffffff, 0x00000100,
  993. 0x3c270, 0xffffffff, 0x00000100,
  994. 0x30800, 0xffffffff, 0xe0000000,
  995. 0x3c020, 0xffffffff, 0x00010000,
  996. 0x3c024, 0xffffffff, 0x00030002,
  997. 0x3c028, 0xffffffff, 0x00040007,
  998. 0x3c02c, 0xffffffff, 0x00060005,
  999. 0x3c030, 0xffffffff, 0x00090008,
  1000. 0x3c034, 0xffffffff, 0x00010000,
  1001. 0x3c038, 0xffffffff, 0x00030002,
  1002. 0x3c03c, 0xffffffff, 0x00040007,
  1003. 0x3c040, 0xffffffff, 0x00060005,
  1004. 0x3c044, 0xffffffff, 0x00090008,
  1005. 0x3c048, 0xffffffff, 0x00010000,
  1006. 0x3c04c, 0xffffffff, 0x00030002,
  1007. 0x3c050, 0xffffffff, 0x00040007,
  1008. 0x3c054, 0xffffffff, 0x00060005,
  1009. 0x3c058, 0xffffffff, 0x00090008,
  1010. 0x3c05c, 0xffffffff, 0x00010000,
  1011. 0x3c060, 0xffffffff, 0x00030002,
  1012. 0x3c064, 0xffffffff, 0x00040007,
  1013. 0x3c068, 0xffffffff, 0x00060005,
  1014. 0x3c06c, 0xffffffff, 0x00090008,
  1015. 0x3c070, 0xffffffff, 0x00010000,
  1016. 0x3c074, 0xffffffff, 0x00030002,
  1017. 0x3c078, 0xffffffff, 0x00040007,
  1018. 0x3c07c, 0xffffffff, 0x00060005,
  1019. 0x3c080, 0xffffffff, 0x00090008,
  1020. 0x3c084, 0xffffffff, 0x00010000,
  1021. 0x3c088, 0xffffffff, 0x00030002,
  1022. 0x3c08c, 0xffffffff, 0x00040007,
  1023. 0x3c090, 0xffffffff, 0x00060005,
  1024. 0x3c094, 0xffffffff, 0x00090008,
  1025. 0x3c098, 0xffffffff, 0x00010000,
  1026. 0x3c09c, 0xffffffff, 0x00030002,
  1027. 0x3c0a0, 0xffffffff, 0x00040007,
  1028. 0x3c0a4, 0xffffffff, 0x00060005,
  1029. 0x3c0a8, 0xffffffff, 0x00090008,
  1030. 0x3c000, 0xffffffff, 0x96e00200,
  1031. 0x8708, 0xffffffff, 0x00900100,
  1032. 0xc424, 0xffffffff, 0x0020003f,
  1033. 0x38, 0xffffffff, 0x0140001c,
  1034. 0x3c, 0x000f0000, 0x000f0000,
  1035. 0x220, 0xffffffff, 0xC060000C,
  1036. 0x224, 0xc0000fff, 0x00000100,
  1037. 0xf90, 0xffffffff, 0x00000100,
  1038. 0xf98, 0x00000101, 0x00000000,
  1039. 0x20a8, 0xffffffff, 0x00000104,
  1040. 0x55e4, 0xff000fff, 0x00000100,
  1041. 0x30cc, 0xc0000fff, 0x00000104,
  1042. 0xc1e4, 0x00000001, 0x00000001,
  1043. 0xd00c, 0xff000ff0, 0x00000100,
  1044. 0xd80c, 0xff000ff0, 0x00000100
  1045. };
  1046. static const u32 spectre_golden_spm_registers[] =
  1047. {
  1048. 0x30800, 0xe0ffffff, 0xe0000000
  1049. };
  1050. static const u32 spectre_golden_common_registers[] =
  1051. {
  1052. 0xc770, 0xffffffff, 0x00000800,
  1053. 0xc774, 0xffffffff, 0x00000800,
  1054. 0xc798, 0xffffffff, 0x00007fbf,
  1055. 0xc79c, 0xffffffff, 0x00007faf
  1056. };
  1057. static const u32 spectre_golden_registers[] =
  1058. {
  1059. 0x3c000, 0xffff1fff, 0x96940200,
  1060. 0x3c00c, 0xffff0001, 0xff000000,
  1061. 0x3c200, 0xfffc0fff, 0x00000100,
  1062. 0x6ed8, 0x00010101, 0x00010000,
  1063. 0x9834, 0xf00fffff, 0x00000400,
  1064. 0x9838, 0xfffffffc, 0x00020200,
  1065. 0x5bb0, 0x000000f0, 0x00000070,
  1066. 0x5bc0, 0xf0311fff, 0x80300000,
  1067. 0x98f8, 0x73773777, 0x12010001,
  1068. 0x9b7c, 0x00ff0000, 0x00fc0000,
  1069. 0x2f48, 0x73773777, 0x12010001,
  1070. 0x8a14, 0xf000003f, 0x00000007,
  1071. 0x8b24, 0xffffffff, 0x00ffffff,
  1072. 0x28350, 0x3f3f3fff, 0x00000082,
  1073. 0x28355, 0x0000003f, 0x00000000,
  1074. 0x3e78, 0x00000001, 0x00000002,
  1075. 0x913c, 0xffff03df, 0x00000004,
  1076. 0xc768, 0x00000008, 0x00000008,
  1077. 0x8c00, 0x000008ff, 0x00000800,
  1078. 0x9508, 0x00010000, 0x00010000,
  1079. 0xac0c, 0xffffffff, 0x54763210,
  1080. 0x214f8, 0x01ff01ff, 0x00000002,
  1081. 0x21498, 0x007ff800, 0x00200000,
  1082. 0x2015c, 0xffffffff, 0x00000f40,
  1083. 0x30934, 0xffffffff, 0x00000001
  1084. };
  1085. static const u32 spectre_mgcg_cgcg_init[] =
  1086. {
  1087. 0xc420, 0xffffffff, 0xfffffffc,
  1088. 0x30800, 0xffffffff, 0xe0000000,
  1089. 0x3c2a0, 0xffffffff, 0x00000100,
  1090. 0x3c208, 0xffffffff, 0x00000100,
  1091. 0x3c2c0, 0xffffffff, 0x00000100,
  1092. 0x3c2c8, 0xffffffff, 0x00000100,
  1093. 0x3c2c4, 0xffffffff, 0x00000100,
  1094. 0x55e4, 0xffffffff, 0x00600100,
  1095. 0x3c280, 0xffffffff, 0x00000100,
  1096. 0x3c214, 0xffffffff, 0x06000100,
  1097. 0x3c220, 0xffffffff, 0x00000100,
  1098. 0x3c218, 0xffffffff, 0x06000100,
  1099. 0x3c204, 0xffffffff, 0x00000100,
  1100. 0x3c2e0, 0xffffffff, 0x00000100,
  1101. 0x3c224, 0xffffffff, 0x00000100,
  1102. 0x3c200, 0xffffffff, 0x00000100,
  1103. 0x3c230, 0xffffffff, 0x00000100,
  1104. 0x3c234, 0xffffffff, 0x00000100,
  1105. 0x3c250, 0xffffffff, 0x00000100,
  1106. 0x3c254, 0xffffffff, 0x00000100,
  1107. 0x3c258, 0xffffffff, 0x00000100,
  1108. 0x3c25c, 0xffffffff, 0x00000100,
  1109. 0x3c260, 0xffffffff, 0x00000100,
  1110. 0x3c27c, 0xffffffff, 0x00000100,
  1111. 0x3c278, 0xffffffff, 0x00000100,
  1112. 0x3c210, 0xffffffff, 0x06000100,
  1113. 0x3c290, 0xffffffff, 0x00000100,
  1114. 0x3c274, 0xffffffff, 0x00000100,
  1115. 0x3c2b4, 0xffffffff, 0x00000100,
  1116. 0x3c2b0, 0xffffffff, 0x00000100,
  1117. 0x3c270, 0xffffffff, 0x00000100,
  1118. 0x30800, 0xffffffff, 0xe0000000,
  1119. 0x3c020, 0xffffffff, 0x00010000,
  1120. 0x3c024, 0xffffffff, 0x00030002,
  1121. 0x3c028, 0xffffffff, 0x00040007,
  1122. 0x3c02c, 0xffffffff, 0x00060005,
  1123. 0x3c030, 0xffffffff, 0x00090008,
  1124. 0x3c034, 0xffffffff, 0x00010000,
  1125. 0x3c038, 0xffffffff, 0x00030002,
  1126. 0x3c03c, 0xffffffff, 0x00040007,
  1127. 0x3c040, 0xffffffff, 0x00060005,
  1128. 0x3c044, 0xffffffff, 0x00090008,
  1129. 0x3c048, 0xffffffff, 0x00010000,
  1130. 0x3c04c, 0xffffffff, 0x00030002,
  1131. 0x3c050, 0xffffffff, 0x00040007,
  1132. 0x3c054, 0xffffffff, 0x00060005,
  1133. 0x3c058, 0xffffffff, 0x00090008,
  1134. 0x3c05c, 0xffffffff, 0x00010000,
  1135. 0x3c060, 0xffffffff, 0x00030002,
  1136. 0x3c064, 0xffffffff, 0x00040007,
  1137. 0x3c068, 0xffffffff, 0x00060005,
  1138. 0x3c06c, 0xffffffff, 0x00090008,
  1139. 0x3c070, 0xffffffff, 0x00010000,
  1140. 0x3c074, 0xffffffff, 0x00030002,
  1141. 0x3c078, 0xffffffff, 0x00040007,
  1142. 0x3c07c, 0xffffffff, 0x00060005,
  1143. 0x3c080, 0xffffffff, 0x00090008,
  1144. 0x3c084, 0xffffffff, 0x00010000,
  1145. 0x3c088, 0xffffffff, 0x00030002,
  1146. 0x3c08c, 0xffffffff, 0x00040007,
  1147. 0x3c090, 0xffffffff, 0x00060005,
  1148. 0x3c094, 0xffffffff, 0x00090008,
  1149. 0x3c098, 0xffffffff, 0x00010000,
  1150. 0x3c09c, 0xffffffff, 0x00030002,
  1151. 0x3c0a0, 0xffffffff, 0x00040007,
  1152. 0x3c0a4, 0xffffffff, 0x00060005,
  1153. 0x3c0a8, 0xffffffff, 0x00090008,
  1154. 0x3c0ac, 0xffffffff, 0x00010000,
  1155. 0x3c0b0, 0xffffffff, 0x00030002,
  1156. 0x3c0b4, 0xffffffff, 0x00040007,
  1157. 0x3c0b8, 0xffffffff, 0x00060005,
  1158. 0x3c0bc, 0xffffffff, 0x00090008,
  1159. 0x3c000, 0xffffffff, 0x96e00200,
  1160. 0x8708, 0xffffffff, 0x00900100,
  1161. 0xc424, 0xffffffff, 0x0020003f,
  1162. 0x38, 0xffffffff, 0x0140001c,
  1163. 0x3c, 0x000f0000, 0x000f0000,
  1164. 0x220, 0xffffffff, 0xC060000C,
  1165. 0x224, 0xc0000fff, 0x00000100,
  1166. 0xf90, 0xffffffff, 0x00000100,
  1167. 0xf98, 0x00000101, 0x00000000,
  1168. 0x20a8, 0xffffffff, 0x00000104,
  1169. 0x55e4, 0xff000fff, 0x00000100,
  1170. 0x30cc, 0xc0000fff, 0x00000104,
  1171. 0xc1e4, 0x00000001, 0x00000001,
  1172. 0xd00c, 0xff000ff0, 0x00000100,
  1173. 0xd80c, 0xff000ff0, 0x00000100
  1174. };
  1175. static const u32 kalindi_golden_spm_registers[] =
  1176. {
  1177. 0x30800, 0xe0ffffff, 0xe0000000
  1178. };
  1179. static const u32 kalindi_golden_common_registers[] =
  1180. {
  1181. 0xc770, 0xffffffff, 0x00000800,
  1182. 0xc774, 0xffffffff, 0x00000800,
  1183. 0xc798, 0xffffffff, 0x00007fbf,
  1184. 0xc79c, 0xffffffff, 0x00007faf
  1185. };
  1186. static const u32 kalindi_golden_registers[] =
  1187. {
  1188. 0x3c000, 0xffffdfff, 0x6e944040,
  1189. 0x55e4, 0xff607fff, 0xfc000100,
  1190. 0x3c220, 0xff000fff, 0x00000100,
  1191. 0x3c224, 0xff000fff, 0x00000100,
  1192. 0x3c200, 0xfffc0fff, 0x00000100,
  1193. 0x6ed8, 0x00010101, 0x00010000,
  1194. 0x9830, 0xffffffff, 0x00000000,
  1195. 0x9834, 0xf00fffff, 0x00000400,
  1196. 0x5bb0, 0x000000f0, 0x00000070,
  1197. 0x5bc0, 0xf0311fff, 0x80300000,
  1198. 0x98f8, 0x73773777, 0x12010001,
  1199. 0x98fc, 0xffffffff, 0x00000010,
  1200. 0x9b7c, 0x00ff0000, 0x00fc0000,
  1201. 0x8030, 0x00001f0f, 0x0000100a,
  1202. 0x2f48, 0x73773777, 0x12010001,
  1203. 0x2408, 0x000fffff, 0x000c007f,
  1204. 0x8a14, 0xf000003f, 0x00000007,
  1205. 0x8b24, 0x3fff3fff, 0x00ffcfff,
  1206. 0x30a04, 0x0000ff0f, 0x00000000,
  1207. 0x28a4c, 0x07ffffff, 0x06000000,
  1208. 0x4d8, 0x00000fff, 0x00000100,
  1209. 0x3e78, 0x00000001, 0x00000002,
  1210. 0xc768, 0x00000008, 0x00000008,
  1211. 0x8c00, 0x000000ff, 0x00000003,
  1212. 0x214f8, 0x01ff01ff, 0x00000002,
  1213. 0x21498, 0x007ff800, 0x00200000,
  1214. 0x2015c, 0xffffffff, 0x00000f40,
  1215. 0x88c4, 0x001f3ae3, 0x00000082,
  1216. 0x88d4, 0x0000001f, 0x00000010,
  1217. 0x30934, 0xffffffff, 0x00000000
  1218. };
  1219. static const u32 kalindi_mgcg_cgcg_init[] =
  1220. {
  1221. 0xc420, 0xffffffff, 0xfffffffc,
  1222. 0x30800, 0xffffffff, 0xe0000000,
  1223. 0x3c2a0, 0xffffffff, 0x00000100,
  1224. 0x3c208, 0xffffffff, 0x00000100,
  1225. 0x3c2c0, 0xffffffff, 0x00000100,
  1226. 0x3c2c8, 0xffffffff, 0x00000100,
  1227. 0x3c2c4, 0xffffffff, 0x00000100,
  1228. 0x55e4, 0xffffffff, 0x00600100,
  1229. 0x3c280, 0xffffffff, 0x00000100,
  1230. 0x3c214, 0xffffffff, 0x06000100,
  1231. 0x3c220, 0xffffffff, 0x00000100,
  1232. 0x3c218, 0xffffffff, 0x06000100,
  1233. 0x3c204, 0xffffffff, 0x00000100,
  1234. 0x3c2e0, 0xffffffff, 0x00000100,
  1235. 0x3c224, 0xffffffff, 0x00000100,
  1236. 0x3c200, 0xffffffff, 0x00000100,
  1237. 0x3c230, 0xffffffff, 0x00000100,
  1238. 0x3c234, 0xffffffff, 0x00000100,
  1239. 0x3c250, 0xffffffff, 0x00000100,
  1240. 0x3c254, 0xffffffff, 0x00000100,
  1241. 0x3c258, 0xffffffff, 0x00000100,
  1242. 0x3c25c, 0xffffffff, 0x00000100,
  1243. 0x3c260, 0xffffffff, 0x00000100,
  1244. 0x3c27c, 0xffffffff, 0x00000100,
  1245. 0x3c278, 0xffffffff, 0x00000100,
  1246. 0x3c210, 0xffffffff, 0x06000100,
  1247. 0x3c290, 0xffffffff, 0x00000100,
  1248. 0x3c274, 0xffffffff, 0x00000100,
  1249. 0x3c2b4, 0xffffffff, 0x00000100,
  1250. 0x3c2b0, 0xffffffff, 0x00000100,
  1251. 0x3c270, 0xffffffff, 0x00000100,
  1252. 0x30800, 0xffffffff, 0xe0000000,
  1253. 0x3c020, 0xffffffff, 0x00010000,
  1254. 0x3c024, 0xffffffff, 0x00030002,
  1255. 0x3c028, 0xffffffff, 0x00040007,
  1256. 0x3c02c, 0xffffffff, 0x00060005,
  1257. 0x3c030, 0xffffffff, 0x00090008,
  1258. 0x3c034, 0xffffffff, 0x00010000,
  1259. 0x3c038, 0xffffffff, 0x00030002,
  1260. 0x3c03c, 0xffffffff, 0x00040007,
  1261. 0x3c040, 0xffffffff, 0x00060005,
  1262. 0x3c044, 0xffffffff, 0x00090008,
  1263. 0x3c000, 0xffffffff, 0x96e00200,
  1264. 0x8708, 0xffffffff, 0x00900100,
  1265. 0xc424, 0xffffffff, 0x0020003f,
  1266. 0x38, 0xffffffff, 0x0140001c,
  1267. 0x3c, 0x000f0000, 0x000f0000,
  1268. 0x220, 0xffffffff, 0xC060000C,
  1269. 0x224, 0xc0000fff, 0x00000100,
  1270. 0x20a8, 0xffffffff, 0x00000104,
  1271. 0x55e4, 0xff000fff, 0x00000100,
  1272. 0x30cc, 0xc0000fff, 0x00000104,
  1273. 0xc1e4, 0x00000001, 0x00000001,
  1274. 0xd00c, 0xff000ff0, 0x00000100,
  1275. 0xd80c, 0xff000ff0, 0x00000100
  1276. };
  1277. static const u32 hawaii_golden_spm_registers[] =
  1278. {
  1279. 0x30800, 0xe0ffffff, 0xe0000000
  1280. };
  1281. static const u32 hawaii_golden_common_registers[] =
  1282. {
  1283. 0x30800, 0xffffffff, 0xe0000000,
  1284. 0x28350, 0xffffffff, 0x3a00161a,
  1285. 0x28354, 0xffffffff, 0x0000002e,
  1286. 0x9a10, 0xffffffff, 0x00018208,
  1287. 0x98f8, 0xffffffff, 0x12011003
  1288. };
  1289. static const u32 hawaii_golden_registers[] =
  1290. {
  1291. 0x3354, 0x00000333, 0x00000333,
  1292. 0x9a10, 0x00010000, 0x00058208,
  1293. 0x9830, 0xffffffff, 0x00000000,
  1294. 0x9834, 0xf00fffff, 0x00000400,
  1295. 0x9838, 0x0002021c, 0x00020200,
  1296. 0xc78, 0x00000080, 0x00000000,
  1297. 0x5bb0, 0x000000f0, 0x00000070,
  1298. 0x5bc0, 0xf0311fff, 0x80300000,
  1299. 0x350c, 0x00810000, 0x408af000,
  1300. 0x7030, 0x31000111, 0x00000011,
  1301. 0x2f48, 0x73773777, 0x12010001,
  1302. 0x2120, 0x0000007f, 0x0000001b,
  1303. 0x21dc, 0x00007fb6, 0x00002191,
  1304. 0x3628, 0x0000003f, 0x0000000a,
  1305. 0x362c, 0x0000003f, 0x0000000a,
  1306. 0x2ae4, 0x00073ffe, 0x000022a2,
  1307. 0x240c, 0x000007ff, 0x00000000,
  1308. 0x8bf0, 0x00002001, 0x00000001,
  1309. 0x8b24, 0xffffffff, 0x00ffffff,
  1310. 0x30a04, 0x0000ff0f, 0x00000000,
  1311. 0x28a4c, 0x07ffffff, 0x06000000,
  1312. 0x3e78, 0x00000001, 0x00000002,
  1313. 0xc768, 0x00000008, 0x00000008,
  1314. 0xc770, 0x00000f00, 0x00000800,
  1315. 0xc774, 0x00000f00, 0x00000800,
  1316. 0xc798, 0x00ffffff, 0x00ff7fbf,
  1317. 0xc79c, 0x00ffffff, 0x00ff7faf,
  1318. 0x8c00, 0x000000ff, 0x00000800,
  1319. 0xe40, 0x00001fff, 0x00001fff,
  1320. 0x9060, 0x0000007f, 0x00000020,
  1321. 0x9508, 0x00010000, 0x00010000,
  1322. 0xae00, 0x00100000, 0x000ff07c,
  1323. 0xac14, 0x000003ff, 0x0000000f,
  1324. 0xac10, 0xffffffff, 0x7564fdec,
  1325. 0xac0c, 0xffffffff, 0x3120b9a8,
  1326. 0xac08, 0x20000000, 0x0f9c0000
  1327. };
  1328. static const u32 hawaii_mgcg_cgcg_init[] =
  1329. {
  1330. 0xc420, 0xffffffff, 0xfffffffd,
  1331. 0x30800, 0xffffffff, 0xe0000000,
  1332. 0x3c2a0, 0xffffffff, 0x00000100,
  1333. 0x3c208, 0xffffffff, 0x00000100,
  1334. 0x3c2c0, 0xffffffff, 0x00000100,
  1335. 0x3c2c8, 0xffffffff, 0x00000100,
  1336. 0x3c2c4, 0xffffffff, 0x00000100,
  1337. 0x55e4, 0xffffffff, 0x00200100,
  1338. 0x3c280, 0xffffffff, 0x00000100,
  1339. 0x3c214, 0xffffffff, 0x06000100,
  1340. 0x3c220, 0xffffffff, 0x00000100,
  1341. 0x3c218, 0xffffffff, 0x06000100,
  1342. 0x3c204, 0xffffffff, 0x00000100,
  1343. 0x3c2e0, 0xffffffff, 0x00000100,
  1344. 0x3c224, 0xffffffff, 0x00000100,
  1345. 0x3c200, 0xffffffff, 0x00000100,
  1346. 0x3c230, 0xffffffff, 0x00000100,
  1347. 0x3c234, 0xffffffff, 0x00000100,
  1348. 0x3c250, 0xffffffff, 0x00000100,
  1349. 0x3c254, 0xffffffff, 0x00000100,
  1350. 0x3c258, 0xffffffff, 0x00000100,
  1351. 0x3c25c, 0xffffffff, 0x00000100,
  1352. 0x3c260, 0xffffffff, 0x00000100,
  1353. 0x3c27c, 0xffffffff, 0x00000100,
  1354. 0x3c278, 0xffffffff, 0x00000100,
  1355. 0x3c210, 0xffffffff, 0x06000100,
  1356. 0x3c290, 0xffffffff, 0x00000100,
  1357. 0x3c274, 0xffffffff, 0x00000100,
  1358. 0x3c2b4, 0xffffffff, 0x00000100,
  1359. 0x3c2b0, 0xffffffff, 0x00000100,
  1360. 0x3c270, 0xffffffff, 0x00000100,
  1361. 0x30800, 0xffffffff, 0xe0000000,
  1362. 0x3c020, 0xffffffff, 0x00010000,
  1363. 0x3c024, 0xffffffff, 0x00030002,
  1364. 0x3c028, 0xffffffff, 0x00040007,
  1365. 0x3c02c, 0xffffffff, 0x00060005,
  1366. 0x3c030, 0xffffffff, 0x00090008,
  1367. 0x3c034, 0xffffffff, 0x00010000,
  1368. 0x3c038, 0xffffffff, 0x00030002,
  1369. 0x3c03c, 0xffffffff, 0x00040007,
  1370. 0x3c040, 0xffffffff, 0x00060005,
  1371. 0x3c044, 0xffffffff, 0x00090008,
  1372. 0x3c048, 0xffffffff, 0x00010000,
  1373. 0x3c04c, 0xffffffff, 0x00030002,
  1374. 0x3c050, 0xffffffff, 0x00040007,
  1375. 0x3c054, 0xffffffff, 0x00060005,
  1376. 0x3c058, 0xffffffff, 0x00090008,
  1377. 0x3c05c, 0xffffffff, 0x00010000,
  1378. 0x3c060, 0xffffffff, 0x00030002,
  1379. 0x3c064, 0xffffffff, 0x00040007,
  1380. 0x3c068, 0xffffffff, 0x00060005,
  1381. 0x3c06c, 0xffffffff, 0x00090008,
  1382. 0x3c070, 0xffffffff, 0x00010000,
  1383. 0x3c074, 0xffffffff, 0x00030002,
  1384. 0x3c078, 0xffffffff, 0x00040007,
  1385. 0x3c07c, 0xffffffff, 0x00060005,
  1386. 0x3c080, 0xffffffff, 0x00090008,
  1387. 0x3c084, 0xffffffff, 0x00010000,
  1388. 0x3c088, 0xffffffff, 0x00030002,
  1389. 0x3c08c, 0xffffffff, 0x00040007,
  1390. 0x3c090, 0xffffffff, 0x00060005,
  1391. 0x3c094, 0xffffffff, 0x00090008,
  1392. 0x3c098, 0xffffffff, 0x00010000,
  1393. 0x3c09c, 0xffffffff, 0x00030002,
  1394. 0x3c0a0, 0xffffffff, 0x00040007,
  1395. 0x3c0a4, 0xffffffff, 0x00060005,
  1396. 0x3c0a8, 0xffffffff, 0x00090008,
  1397. 0x3c0ac, 0xffffffff, 0x00010000,
  1398. 0x3c0b0, 0xffffffff, 0x00030002,
  1399. 0x3c0b4, 0xffffffff, 0x00040007,
  1400. 0x3c0b8, 0xffffffff, 0x00060005,
  1401. 0x3c0bc, 0xffffffff, 0x00090008,
  1402. 0x3c0c0, 0xffffffff, 0x00010000,
  1403. 0x3c0c4, 0xffffffff, 0x00030002,
  1404. 0x3c0c8, 0xffffffff, 0x00040007,
  1405. 0x3c0cc, 0xffffffff, 0x00060005,
  1406. 0x3c0d0, 0xffffffff, 0x00090008,
  1407. 0x3c0d4, 0xffffffff, 0x00010000,
  1408. 0x3c0d8, 0xffffffff, 0x00030002,
  1409. 0x3c0dc, 0xffffffff, 0x00040007,
  1410. 0x3c0e0, 0xffffffff, 0x00060005,
  1411. 0x3c0e4, 0xffffffff, 0x00090008,
  1412. 0x3c0e8, 0xffffffff, 0x00010000,
  1413. 0x3c0ec, 0xffffffff, 0x00030002,
  1414. 0x3c0f0, 0xffffffff, 0x00040007,
  1415. 0x3c0f4, 0xffffffff, 0x00060005,
  1416. 0x3c0f8, 0xffffffff, 0x00090008,
  1417. 0xc318, 0xffffffff, 0x00020200,
  1418. 0x3350, 0xffffffff, 0x00000200,
  1419. 0x15c0, 0xffffffff, 0x00000400,
  1420. 0x55e8, 0xffffffff, 0x00000000,
  1421. 0x2f50, 0xffffffff, 0x00000902,
  1422. 0x3c000, 0xffffffff, 0x96940200,
  1423. 0x8708, 0xffffffff, 0x00900100,
  1424. 0xc424, 0xffffffff, 0x0020003f,
  1425. 0x38, 0xffffffff, 0x0140001c,
  1426. 0x3c, 0x000f0000, 0x000f0000,
  1427. 0x220, 0xffffffff, 0xc060000c,
  1428. 0x224, 0xc0000fff, 0x00000100,
  1429. 0xf90, 0xffffffff, 0x00000100,
  1430. 0xf98, 0x00000101, 0x00000000,
  1431. 0x20a8, 0xffffffff, 0x00000104,
  1432. 0x55e4, 0xff000fff, 0x00000100,
  1433. 0x30cc, 0xc0000fff, 0x00000104,
  1434. 0xc1e4, 0x00000001, 0x00000001,
  1435. 0xd00c, 0xff000ff0, 0x00000100,
  1436. 0xd80c, 0xff000ff0, 0x00000100
  1437. };
  1438. static void cik_init_golden_registers(struct radeon_device *rdev)
  1439. {
  1440. switch (rdev->family) {
  1441. case CHIP_BONAIRE:
  1442. radeon_program_register_sequence(rdev,
  1443. bonaire_mgcg_cgcg_init,
  1444. (const u32)ARRAY_SIZE(bonaire_mgcg_cgcg_init));
  1445. radeon_program_register_sequence(rdev,
  1446. bonaire_golden_registers,
  1447. (const u32)ARRAY_SIZE(bonaire_golden_registers));
  1448. radeon_program_register_sequence(rdev,
  1449. bonaire_golden_common_registers,
  1450. (const u32)ARRAY_SIZE(bonaire_golden_common_registers));
  1451. radeon_program_register_sequence(rdev,
  1452. bonaire_golden_spm_registers,
  1453. (const u32)ARRAY_SIZE(bonaire_golden_spm_registers));
  1454. break;
  1455. case CHIP_KABINI:
  1456. radeon_program_register_sequence(rdev,
  1457. kalindi_mgcg_cgcg_init,
  1458. (const u32)ARRAY_SIZE(kalindi_mgcg_cgcg_init));
  1459. radeon_program_register_sequence(rdev,
  1460. kalindi_golden_registers,
  1461. (const u32)ARRAY_SIZE(kalindi_golden_registers));
  1462. radeon_program_register_sequence(rdev,
  1463. kalindi_golden_common_registers,
  1464. (const u32)ARRAY_SIZE(kalindi_golden_common_registers));
  1465. radeon_program_register_sequence(rdev,
  1466. kalindi_golden_spm_registers,
  1467. (const u32)ARRAY_SIZE(kalindi_golden_spm_registers));
  1468. break;
  1469. case CHIP_KAVERI:
  1470. radeon_program_register_sequence(rdev,
  1471. spectre_mgcg_cgcg_init,
  1472. (const u32)ARRAY_SIZE(spectre_mgcg_cgcg_init));
  1473. radeon_program_register_sequence(rdev,
  1474. spectre_golden_registers,
  1475. (const u32)ARRAY_SIZE(spectre_golden_registers));
  1476. radeon_program_register_sequence(rdev,
  1477. spectre_golden_common_registers,
  1478. (const u32)ARRAY_SIZE(spectre_golden_common_registers));
  1479. radeon_program_register_sequence(rdev,
  1480. spectre_golden_spm_registers,
  1481. (const u32)ARRAY_SIZE(spectre_golden_spm_registers));
  1482. break;
  1483. case CHIP_HAWAII:
  1484. radeon_program_register_sequence(rdev,
  1485. hawaii_mgcg_cgcg_init,
  1486. (const u32)ARRAY_SIZE(hawaii_mgcg_cgcg_init));
  1487. radeon_program_register_sequence(rdev,
  1488. hawaii_golden_registers,
  1489. (const u32)ARRAY_SIZE(hawaii_golden_registers));
  1490. radeon_program_register_sequence(rdev,
  1491. hawaii_golden_common_registers,
  1492. (const u32)ARRAY_SIZE(hawaii_golden_common_registers));
  1493. radeon_program_register_sequence(rdev,
  1494. hawaii_golden_spm_registers,
  1495. (const u32)ARRAY_SIZE(hawaii_golden_spm_registers));
  1496. break;
  1497. default:
  1498. break;
  1499. }
  1500. }
  1501. /**
  1502. * cik_get_xclk - get the xclk
  1503. *
  1504. * @rdev: radeon_device pointer
  1505. *
  1506. * Returns the reference clock used by the gfx engine
  1507. * (CIK).
  1508. */
  1509. u32 cik_get_xclk(struct radeon_device *rdev)
  1510. {
  1511. u32 reference_clock = rdev->clock.spll.reference_freq;
  1512. if (rdev->flags & RADEON_IS_IGP) {
  1513. if (RREG32_SMC(GENERAL_PWRMGT) & GPU_COUNTER_CLK)
  1514. return reference_clock / 2;
  1515. } else {
  1516. if (RREG32_SMC(CG_CLKPIN_CNTL) & XTALIN_DIVIDE)
  1517. return reference_clock / 4;
  1518. }
  1519. return reference_clock;
  1520. }
  1521. /**
  1522. * cik_mm_rdoorbell - read a doorbell dword
  1523. *
  1524. * @rdev: radeon_device pointer
  1525. * @offset: byte offset into the aperture
  1526. *
  1527. * Returns the value in the doorbell aperture at the
  1528. * requested offset (CIK).
  1529. */
  1530. u32 cik_mm_rdoorbell(struct radeon_device *rdev, u32 offset)
  1531. {
  1532. if (offset < rdev->doorbell.size) {
  1533. return readl(((void __iomem *)rdev->doorbell.ptr) + offset);
  1534. } else {
  1535. DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", offset);
  1536. return 0;
  1537. }
  1538. }
  1539. /**
  1540. * cik_mm_wdoorbell - write a doorbell dword
  1541. *
  1542. * @rdev: radeon_device pointer
  1543. * @offset: byte offset into the aperture
  1544. * @v: value to write
  1545. *
  1546. * Writes @v to the doorbell aperture at the
  1547. * requested offset (CIK).
  1548. */
  1549. void cik_mm_wdoorbell(struct radeon_device *rdev, u32 offset, u32 v)
  1550. {
  1551. if (offset < rdev->doorbell.size) {
  1552. writel(v, ((void __iomem *)rdev->doorbell.ptr) + offset);
  1553. } else {
  1554. DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", offset);
  1555. }
  1556. }
  1557. #define BONAIRE_IO_MC_REGS_SIZE 36
  1558. static const u32 bonaire_io_mc_regs[BONAIRE_IO_MC_REGS_SIZE][2] =
  1559. {
  1560. {0x00000070, 0x04400000},
  1561. {0x00000071, 0x80c01803},
  1562. {0x00000072, 0x00004004},
  1563. {0x00000073, 0x00000100},
  1564. {0x00000074, 0x00ff0000},
  1565. {0x00000075, 0x34000000},
  1566. {0x00000076, 0x08000014},
  1567. {0x00000077, 0x00cc08ec},
  1568. {0x00000078, 0x00000400},
  1569. {0x00000079, 0x00000000},
  1570. {0x0000007a, 0x04090000},
  1571. {0x0000007c, 0x00000000},
  1572. {0x0000007e, 0x4408a8e8},
  1573. {0x0000007f, 0x00000304},
  1574. {0x00000080, 0x00000000},
  1575. {0x00000082, 0x00000001},
  1576. {0x00000083, 0x00000002},
  1577. {0x00000084, 0xf3e4f400},
  1578. {0x00000085, 0x052024e3},
  1579. {0x00000087, 0x00000000},
  1580. {0x00000088, 0x01000000},
  1581. {0x0000008a, 0x1c0a0000},
  1582. {0x0000008b, 0xff010000},
  1583. {0x0000008d, 0xffffefff},
  1584. {0x0000008e, 0xfff3efff},
  1585. {0x0000008f, 0xfff3efbf},
  1586. {0x00000092, 0xf7ffffff},
  1587. {0x00000093, 0xffffff7f},
  1588. {0x00000095, 0x00101101},
  1589. {0x00000096, 0x00000fff},
  1590. {0x00000097, 0x00116fff},
  1591. {0x00000098, 0x60010000},
  1592. {0x00000099, 0x10010000},
  1593. {0x0000009a, 0x00006000},
  1594. {0x0000009b, 0x00001000},
  1595. {0x0000009f, 0x00b48000}
  1596. };
  1597. #define HAWAII_IO_MC_REGS_SIZE 22
  1598. static const u32 hawaii_io_mc_regs[HAWAII_IO_MC_REGS_SIZE][2] =
  1599. {
  1600. {0x0000007d, 0x40000000},
  1601. {0x0000007e, 0x40180304},
  1602. {0x0000007f, 0x0000ff00},
  1603. {0x00000081, 0x00000000},
  1604. {0x00000083, 0x00000800},
  1605. {0x00000086, 0x00000000},
  1606. {0x00000087, 0x00000100},
  1607. {0x00000088, 0x00020100},
  1608. {0x00000089, 0x00000000},
  1609. {0x0000008b, 0x00040000},
  1610. {0x0000008c, 0x00000100},
  1611. {0x0000008e, 0xff010000},
  1612. {0x00000090, 0xffffefff},
  1613. {0x00000091, 0xfff3efff},
  1614. {0x00000092, 0xfff3efbf},
  1615. {0x00000093, 0xf7ffffff},
  1616. {0x00000094, 0xffffff7f},
  1617. {0x00000095, 0x00000fff},
  1618. {0x00000096, 0x00116fff},
  1619. {0x00000097, 0x60010000},
  1620. {0x00000098, 0x10010000},
  1621. {0x0000009f, 0x00c79000}
  1622. };
  1623. /**
  1624. * cik_srbm_select - select specific register instances
  1625. *
  1626. * @rdev: radeon_device pointer
  1627. * @me: selected ME (micro engine)
  1628. * @pipe: pipe
  1629. * @queue: queue
  1630. * @vmid: VMID
  1631. *
  1632. * Switches the currently active registers instances. Some
  1633. * registers are instanced per VMID, others are instanced per
  1634. * me/pipe/queue combination.
  1635. */
  1636. static void cik_srbm_select(struct radeon_device *rdev,
  1637. u32 me, u32 pipe, u32 queue, u32 vmid)
  1638. {
  1639. u32 srbm_gfx_cntl = (PIPEID(pipe & 0x3) |
  1640. MEID(me & 0x3) |
  1641. VMID(vmid & 0xf) |
  1642. QUEUEID(queue & 0x7));
  1643. WREG32(SRBM_GFX_CNTL, srbm_gfx_cntl);
  1644. }
  1645. /* ucode loading */
  1646. /**
  1647. * ci_mc_load_microcode - load MC ucode into the hw
  1648. *
  1649. * @rdev: radeon_device pointer
  1650. *
  1651. * Load the GDDR MC ucode into the hw (CIK).
  1652. * Returns 0 on success, error on failure.
  1653. */
  1654. static int ci_mc_load_microcode(struct radeon_device *rdev)
  1655. {
  1656. const __be32 *fw_data;
  1657. u32 running, blackout = 0;
  1658. u32 *io_mc_regs;
  1659. int i, ucode_size, regs_size;
  1660. if (!rdev->mc_fw)
  1661. return -EINVAL;
  1662. switch (rdev->family) {
  1663. case CHIP_BONAIRE:
  1664. io_mc_regs = (u32 *)&bonaire_io_mc_regs;
  1665. ucode_size = CIK_MC_UCODE_SIZE;
  1666. regs_size = BONAIRE_IO_MC_REGS_SIZE;
  1667. break;
  1668. case CHIP_HAWAII:
  1669. io_mc_regs = (u32 *)&hawaii_io_mc_regs;
  1670. ucode_size = HAWAII_MC_UCODE_SIZE;
  1671. regs_size = HAWAII_IO_MC_REGS_SIZE;
  1672. break;
  1673. default:
  1674. return -EINVAL;
  1675. }
  1676. running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
  1677. if (running == 0) {
  1678. if (running) {
  1679. blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
  1680. WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);
  1681. }
  1682. /* reset the engine and set to writable */
  1683. WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
  1684. WREG32(MC_SEQ_SUP_CNTL, 0x00000010);
  1685. /* load mc io regs */
  1686. for (i = 0; i < regs_size; i++) {
  1687. WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]);
  1688. WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]);
  1689. }
  1690. /* load the MC ucode */
  1691. fw_data = (const __be32 *)rdev->mc_fw->data;
  1692. for (i = 0; i < ucode_size; i++)
  1693. WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++));
  1694. /* put the engine back into the active state */
  1695. WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
  1696. WREG32(MC_SEQ_SUP_CNTL, 0x00000004);
  1697. WREG32(MC_SEQ_SUP_CNTL, 0x00000001);
  1698. /* wait for training to complete */
  1699. for (i = 0; i < rdev->usec_timeout; i++) {
  1700. if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL) & TRAIN_DONE_D0)
  1701. break;
  1702. udelay(1);
  1703. }
  1704. for (i = 0; i < rdev->usec_timeout; i++) {
  1705. if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL) & TRAIN_DONE_D1)
  1706. break;
  1707. udelay(1);
  1708. }
  1709. if (running)
  1710. WREG32(MC_SHARED_BLACKOUT_CNTL, blackout);
  1711. }
  1712. return 0;
  1713. }
  1714. /**
  1715. * cik_init_microcode - load ucode images from disk
  1716. *
  1717. * @rdev: radeon_device pointer
  1718. *
  1719. * Use the firmware interface to load the ucode images into
  1720. * the driver (not loaded into hw).
  1721. * Returns 0 on success, error on failure.
  1722. */
  1723. static int cik_init_microcode(struct radeon_device *rdev)
  1724. {
  1725. const char *chip_name;
  1726. size_t pfp_req_size, me_req_size, ce_req_size,
  1727. mec_req_size, rlc_req_size, mc_req_size = 0,
  1728. sdma_req_size, smc_req_size = 0;
  1729. char fw_name[30];
  1730. int err;
  1731. DRM_DEBUG("\n");
  1732. switch (rdev->family) {
  1733. case CHIP_BONAIRE:
  1734. chip_name = "BONAIRE";
  1735. pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
  1736. me_req_size = CIK_ME_UCODE_SIZE * 4;
  1737. ce_req_size = CIK_CE_UCODE_SIZE * 4;
  1738. mec_req_size = CIK_MEC_UCODE_SIZE * 4;
  1739. rlc_req_size = BONAIRE_RLC_UCODE_SIZE * 4;
  1740. mc_req_size = CIK_MC_UCODE_SIZE * 4;
  1741. sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
  1742. smc_req_size = ALIGN(BONAIRE_SMC_UCODE_SIZE, 4);
  1743. break;
  1744. case CHIP_HAWAII:
  1745. chip_name = "HAWAII";
  1746. pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
  1747. me_req_size = CIK_ME_UCODE_SIZE * 4;
  1748. ce_req_size = CIK_CE_UCODE_SIZE * 4;
  1749. mec_req_size = CIK_MEC_UCODE_SIZE * 4;
  1750. rlc_req_size = BONAIRE_RLC_UCODE_SIZE * 4;
  1751. mc_req_size = HAWAII_MC_UCODE_SIZE * 4;
  1752. sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
  1753. smc_req_size = ALIGN(HAWAII_SMC_UCODE_SIZE, 4);
  1754. break;
  1755. case CHIP_KAVERI:
  1756. chip_name = "KAVERI";
  1757. pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
  1758. me_req_size = CIK_ME_UCODE_SIZE * 4;
  1759. ce_req_size = CIK_CE_UCODE_SIZE * 4;
  1760. mec_req_size = CIK_MEC_UCODE_SIZE * 4;
  1761. rlc_req_size = KV_RLC_UCODE_SIZE * 4;
  1762. sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
  1763. break;
  1764. case CHIP_KABINI:
  1765. chip_name = "KABINI";
  1766. pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
  1767. me_req_size = CIK_ME_UCODE_SIZE * 4;
  1768. ce_req_size = CIK_CE_UCODE_SIZE * 4;
  1769. mec_req_size = CIK_MEC_UCODE_SIZE * 4;
  1770. rlc_req_size = KB_RLC_UCODE_SIZE * 4;
  1771. sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
  1772. break;
  1773. default: BUG();
  1774. }
  1775. DRM_INFO("Loading %s Microcode\n", chip_name);
  1776. snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
  1777. err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
  1778. if (err)
  1779. goto out;
  1780. if (rdev->pfp_fw->size != pfp_req_size) {
  1781. printk(KERN_ERR
  1782. "cik_cp: Bogus length %zu in firmware \"%s\"\n",
  1783. rdev->pfp_fw->size, fw_name);
  1784. err = -EINVAL;
  1785. goto out;
  1786. }
  1787. snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
  1788. err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
  1789. if (err)
  1790. goto out;
  1791. if (rdev->me_fw->size != me_req_size) {
  1792. printk(KERN_ERR
  1793. "cik_cp: Bogus length %zu in firmware \"%s\"\n",
  1794. rdev->me_fw->size, fw_name);
  1795. err = -EINVAL;
  1796. }
  1797. snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name);
  1798. err = request_firmware(&rdev->ce_fw, fw_name, rdev->dev);
  1799. if (err)
  1800. goto out;
  1801. if (rdev->ce_fw->size != ce_req_size) {
  1802. printk(KERN_ERR
  1803. "cik_cp: Bogus length %zu in firmware \"%s\"\n",
  1804. rdev->ce_fw->size, fw_name);
  1805. err = -EINVAL;
  1806. }
  1807. snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec.bin", chip_name);
  1808. err = request_firmware(&rdev->mec_fw, fw_name, rdev->dev);
  1809. if (err)
  1810. goto out;
  1811. if (rdev->mec_fw->size != mec_req_size) {
  1812. printk(KERN_ERR
  1813. "cik_cp: Bogus length %zu in firmware \"%s\"\n",
  1814. rdev->mec_fw->size, fw_name);
  1815. err = -EINVAL;
  1816. }
  1817. snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", chip_name);
  1818. err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
  1819. if (err)
  1820. goto out;
  1821. if (rdev->rlc_fw->size != rlc_req_size) {
  1822. printk(KERN_ERR
  1823. "cik_rlc: Bogus length %zu in firmware \"%s\"\n",
  1824. rdev->rlc_fw->size, fw_name);
  1825. err = -EINVAL;
  1826. }
  1827. snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma.bin", chip_name);
  1828. err = request_firmware(&rdev->sdma_fw, fw_name, rdev->dev);
  1829. if (err)
  1830. goto out;
  1831. if (rdev->sdma_fw->size != sdma_req_size) {
  1832. printk(KERN_ERR
  1833. "cik_sdma: Bogus length %zu in firmware \"%s\"\n",
  1834. rdev->sdma_fw->size, fw_name);
  1835. err = -EINVAL;
  1836. }
  1837. /* No SMC, MC ucode on APUs */
  1838. if (!(rdev->flags & RADEON_IS_IGP)) {
  1839. snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
  1840. err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
  1841. if (err)
  1842. goto out;
  1843. if (rdev->mc_fw->size != mc_req_size) {
  1844. printk(KERN_ERR
  1845. "cik_mc: Bogus length %zu in firmware \"%s\"\n",
  1846. rdev->mc_fw->size, fw_name);
  1847. err = -EINVAL;
  1848. }
  1849. snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
  1850. err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
  1851. if (err) {
  1852. printk(KERN_ERR
  1853. "smc: error loading firmware \"%s\"\n",
  1854. fw_name);
  1855. release_firmware(rdev->smc_fw);
  1856. rdev->smc_fw = NULL;
  1857. err = 0;
  1858. } else if (rdev->smc_fw->size != smc_req_size) {
  1859. printk(KERN_ERR
  1860. "cik_smc: Bogus length %zu in firmware \"%s\"\n",
  1861. rdev->smc_fw->size, fw_name);
  1862. err = -EINVAL;
  1863. }
  1864. }
  1865. out:
  1866. if (err) {
  1867. if (err != -EINVAL)
  1868. printk(KERN_ERR
  1869. "cik_cp: Failed to load firmware \"%s\"\n",
  1870. fw_name);
  1871. release_firmware(rdev->pfp_fw);
  1872. rdev->pfp_fw = NULL;
  1873. release_firmware(rdev->me_fw);
  1874. rdev->me_fw = NULL;
  1875. release_firmware(rdev->ce_fw);
  1876. rdev->ce_fw = NULL;
  1877. release_firmware(rdev->rlc_fw);
  1878. rdev->rlc_fw = NULL;
  1879. release_firmware(rdev->mc_fw);
  1880. rdev->mc_fw = NULL;
  1881. release_firmware(rdev->smc_fw);
  1882. rdev->smc_fw = NULL;
  1883. }
  1884. return err;
  1885. }
  1886. /*
  1887. * Core functions
  1888. */
  1889. /**
  1890. * cik_tiling_mode_table_init - init the hw tiling table
  1891. *
  1892. * @rdev: radeon_device pointer
  1893. *
  1894. * Starting with SI, the tiling setup is done globally in a
  1895. * set of 32 tiling modes. Rather than selecting each set of
  1896. * parameters per surface as on older asics, we just select
  1897. * which index in the tiling table we want to use, and the
  1898. * surface uses those parameters (CIK).
  1899. */
  1900. static void cik_tiling_mode_table_init(struct radeon_device *rdev)
  1901. {
  1902. const u32 num_tile_mode_states = 32;
  1903. const u32 num_secondary_tile_mode_states = 16;
  1904. u32 reg_offset, gb_tile_moden, split_equal_to_row_size;
  1905. u32 num_pipe_configs;
  1906. u32 num_rbs = rdev->config.cik.max_backends_per_se *
  1907. rdev->config.cik.max_shader_engines;
  1908. switch (rdev->config.cik.mem_row_size_in_kb) {
  1909. case 1:
  1910. split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_1KB;
  1911. break;
  1912. case 2:
  1913. default:
  1914. split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_2KB;
  1915. break;
  1916. case 4:
  1917. split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_4KB;
  1918. break;
  1919. }
  1920. num_pipe_configs = rdev->config.cik.max_tile_pipes;
  1921. if (num_pipe_configs > 8)
  1922. num_pipe_configs = 16;
  1923. if (num_pipe_configs == 16) {
  1924. for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
  1925. switch (reg_offset) {
  1926. case 0:
  1927. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  1928. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  1929. PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
  1930. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
  1931. break;
  1932. case 1:
  1933. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  1934. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  1935. PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
  1936. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
  1937. break;
  1938. case 2:
  1939. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  1940. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  1941. PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
  1942. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
  1943. break;
  1944. case 3:
  1945. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  1946. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  1947. PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
  1948. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
  1949. break;
  1950. case 4:
  1951. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  1952. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  1953. PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
  1954. TILE_SPLIT(split_equal_to_row_size));
  1955. break;
  1956. case 5:
  1957. gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  1958. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
  1959. break;
  1960. case 6:
  1961. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  1962. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  1963. PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
  1964. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
  1965. break;
  1966. case 7:
  1967. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  1968. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  1969. PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
  1970. TILE_SPLIT(split_equal_to_row_size));
  1971. break;
  1972. case 8:
  1973. gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
  1974. PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16));
  1975. break;
  1976. case 9:
  1977. gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  1978. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
  1979. break;
  1980. case 10:
  1981. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  1982. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
  1983. PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
  1984. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  1985. break;
  1986. case 11:
  1987. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  1988. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
  1989. PIPE_CONFIG(ADDR_SURF_P16_32x32_8x16) |
  1990. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  1991. break;
  1992. case 12:
  1993. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  1994. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
  1995. PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
  1996. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  1997. break;
  1998. case 13:
  1999. gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  2000. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
  2001. break;
  2002. case 14:
  2003. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2004. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  2005. PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
  2006. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2007. break;
  2008. case 16:
  2009. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  2010. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  2011. PIPE_CONFIG(ADDR_SURF_P16_32x32_8x16) |
  2012. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2013. break;
  2014. case 17:
  2015. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  2016. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  2017. PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
  2018. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2019. break;
  2020. case 27:
  2021. gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  2022. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
  2023. break;
  2024. case 28:
  2025. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2026. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
  2027. PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
  2028. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2029. break;
  2030. case 29:
  2031. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  2032. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
  2033. PIPE_CONFIG(ADDR_SURF_P16_32x32_8x16) |
  2034. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2035. break;
  2036. case 30:
  2037. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  2038. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
  2039. PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
  2040. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2041. break;
  2042. default:
  2043. gb_tile_moden = 0;
  2044. break;
  2045. }
  2046. rdev->config.cik.tile_mode_array[reg_offset] = gb_tile_moden;
  2047. WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
  2048. }
  2049. for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
  2050. switch (reg_offset) {
  2051. case 0:
  2052. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2053. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
  2054. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  2055. NUM_BANKS(ADDR_SURF_16_BANK));
  2056. break;
  2057. case 1:
  2058. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2059. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
  2060. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  2061. NUM_BANKS(ADDR_SURF_16_BANK));
  2062. break;
  2063. case 2:
  2064. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2065. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2066. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
  2067. NUM_BANKS(ADDR_SURF_16_BANK));
  2068. break;
  2069. case 3:
  2070. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2071. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2072. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
  2073. NUM_BANKS(ADDR_SURF_16_BANK));
  2074. break;
  2075. case 4:
  2076. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2077. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2078. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
  2079. NUM_BANKS(ADDR_SURF_8_BANK));
  2080. break;
  2081. case 5:
  2082. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2083. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2084. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
  2085. NUM_BANKS(ADDR_SURF_4_BANK));
  2086. break;
  2087. case 6:
  2088. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2089. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2090. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
  2091. NUM_BANKS(ADDR_SURF_2_BANK));
  2092. break;
  2093. case 8:
  2094. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2095. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
  2096. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  2097. NUM_BANKS(ADDR_SURF_16_BANK));
  2098. break;
  2099. case 9:
  2100. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2101. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
  2102. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  2103. NUM_BANKS(ADDR_SURF_16_BANK));
  2104. break;
  2105. case 10:
  2106. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2107. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2108. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
  2109. NUM_BANKS(ADDR_SURF_16_BANK));
  2110. break;
  2111. case 11:
  2112. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2113. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2114. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
  2115. NUM_BANKS(ADDR_SURF_8_BANK));
  2116. break;
  2117. case 12:
  2118. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2119. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2120. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
  2121. NUM_BANKS(ADDR_SURF_4_BANK));
  2122. break;
  2123. case 13:
  2124. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2125. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2126. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
  2127. NUM_BANKS(ADDR_SURF_2_BANK));
  2128. break;
  2129. case 14:
  2130. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2131. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2132. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
  2133. NUM_BANKS(ADDR_SURF_2_BANK));
  2134. break;
  2135. default:
  2136. gb_tile_moden = 0;
  2137. break;
  2138. }
  2139. WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
  2140. }
  2141. } else if (num_pipe_configs == 8) {
  2142. for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
  2143. switch (reg_offset) {
  2144. case 0:
  2145. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2146. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2147. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  2148. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
  2149. break;
  2150. case 1:
  2151. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2152. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2153. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  2154. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
  2155. break;
  2156. case 2:
  2157. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2158. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2159. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  2160. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
  2161. break;
  2162. case 3:
  2163. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2164. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2165. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  2166. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
  2167. break;
  2168. case 4:
  2169. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2170. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2171. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  2172. TILE_SPLIT(split_equal_to_row_size));
  2173. break;
  2174. case 5:
  2175. gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  2176. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
  2177. break;
  2178. case 6:
  2179. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  2180. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2181. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  2182. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
  2183. break;
  2184. case 7:
  2185. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  2186. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2187. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  2188. TILE_SPLIT(split_equal_to_row_size));
  2189. break;
  2190. case 8:
  2191. gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
  2192. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16));
  2193. break;
  2194. case 9:
  2195. gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  2196. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
  2197. break;
  2198. case 10:
  2199. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2200. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
  2201. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  2202. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2203. break;
  2204. case 11:
  2205. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  2206. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
  2207. PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
  2208. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2209. break;
  2210. case 12:
  2211. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  2212. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
  2213. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  2214. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2215. break;
  2216. case 13:
  2217. gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  2218. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
  2219. break;
  2220. case 14:
  2221. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2222. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  2223. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  2224. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2225. break;
  2226. case 16:
  2227. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  2228. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  2229. PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
  2230. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2231. break;
  2232. case 17:
  2233. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  2234. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  2235. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  2236. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2237. break;
  2238. case 27:
  2239. gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  2240. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
  2241. break;
  2242. case 28:
  2243. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2244. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
  2245. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  2246. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2247. break;
  2248. case 29:
  2249. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  2250. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
  2251. PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
  2252. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2253. break;
  2254. case 30:
  2255. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  2256. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
  2257. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  2258. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2259. break;
  2260. default:
  2261. gb_tile_moden = 0;
  2262. break;
  2263. }
  2264. rdev->config.cik.tile_mode_array[reg_offset] = gb_tile_moden;
  2265. WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
  2266. }
  2267. for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
  2268. switch (reg_offset) {
  2269. case 0:
  2270. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2271. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
  2272. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2273. NUM_BANKS(ADDR_SURF_16_BANK));
  2274. break;
  2275. case 1:
  2276. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2277. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
  2278. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  2279. NUM_BANKS(ADDR_SURF_16_BANK));
  2280. break;
  2281. case 2:
  2282. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2283. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2284. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  2285. NUM_BANKS(ADDR_SURF_16_BANK));
  2286. break;
  2287. case 3:
  2288. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2289. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2290. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  2291. NUM_BANKS(ADDR_SURF_16_BANK));
  2292. break;
  2293. case 4:
  2294. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2295. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2296. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
  2297. NUM_BANKS(ADDR_SURF_8_BANK));
  2298. break;
  2299. case 5:
  2300. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2301. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2302. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
  2303. NUM_BANKS(ADDR_SURF_4_BANK));
  2304. break;
  2305. case 6:
  2306. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2307. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2308. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
  2309. NUM_BANKS(ADDR_SURF_2_BANK));
  2310. break;
  2311. case 8:
  2312. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2313. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
  2314. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2315. NUM_BANKS(ADDR_SURF_16_BANK));
  2316. break;
  2317. case 9:
  2318. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2319. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
  2320. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2321. NUM_BANKS(ADDR_SURF_16_BANK));
  2322. break;
  2323. case 10:
  2324. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2325. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
  2326. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  2327. NUM_BANKS(ADDR_SURF_16_BANK));
  2328. break;
  2329. case 11:
  2330. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2331. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2332. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  2333. NUM_BANKS(ADDR_SURF_16_BANK));
  2334. break;
  2335. case 12:
  2336. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2337. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2338. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
  2339. NUM_BANKS(ADDR_SURF_8_BANK));
  2340. break;
  2341. case 13:
  2342. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2343. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2344. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
  2345. NUM_BANKS(ADDR_SURF_4_BANK));
  2346. break;
  2347. case 14:
  2348. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2349. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2350. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
  2351. NUM_BANKS(ADDR_SURF_2_BANK));
  2352. break;
  2353. default:
  2354. gb_tile_moden = 0;
  2355. break;
  2356. }
  2357. WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
  2358. }
  2359. } else if (num_pipe_configs == 4) {
  2360. if (num_rbs == 4) {
  2361. for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
  2362. switch (reg_offset) {
  2363. case 0:
  2364. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2365. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2366. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  2367. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
  2368. break;
  2369. case 1:
  2370. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2371. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2372. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  2373. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
  2374. break;
  2375. case 2:
  2376. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2377. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2378. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  2379. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
  2380. break;
  2381. case 3:
  2382. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2383. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2384. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  2385. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
  2386. break;
  2387. case 4:
  2388. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2389. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2390. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  2391. TILE_SPLIT(split_equal_to_row_size));
  2392. break;
  2393. case 5:
  2394. gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  2395. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
  2396. break;
  2397. case 6:
  2398. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  2399. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2400. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  2401. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
  2402. break;
  2403. case 7:
  2404. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  2405. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2406. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  2407. TILE_SPLIT(split_equal_to_row_size));
  2408. break;
  2409. case 8:
  2410. gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
  2411. PIPE_CONFIG(ADDR_SURF_P4_16x16));
  2412. break;
  2413. case 9:
  2414. gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  2415. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
  2416. break;
  2417. case 10:
  2418. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2419. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
  2420. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  2421. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2422. break;
  2423. case 11:
  2424. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  2425. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
  2426. PIPE_CONFIG(ADDR_SURF_P4_8x16) |
  2427. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2428. break;
  2429. case 12:
  2430. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  2431. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
  2432. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  2433. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2434. break;
  2435. case 13:
  2436. gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  2437. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
  2438. break;
  2439. case 14:
  2440. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2441. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  2442. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  2443. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2444. break;
  2445. case 16:
  2446. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  2447. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  2448. PIPE_CONFIG(ADDR_SURF_P4_8x16) |
  2449. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2450. break;
  2451. case 17:
  2452. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  2453. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  2454. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  2455. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2456. break;
  2457. case 27:
  2458. gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  2459. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
  2460. break;
  2461. case 28:
  2462. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  2463. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
  2464. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  2465. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2466. break;
  2467. case 29:
  2468. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  2469. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
  2470. PIPE_CONFIG(ADDR_SURF_P4_8x16) |
  2471. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2472. break;
  2473. case 30:
  2474. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  2475. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
  2476. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  2477. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2478. break;
  2479. default:
  2480. gb_tile_moden = 0;
  2481. break;
  2482. }
  2483. rdev->config.cik.tile_mode_array[reg_offset] = gb_tile_moden;
  2484. WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
  2485. }
  2486. } else if (num_rbs < 4) {
  2487. for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
  2488. switch (reg_offset) {
  2489. case 0:
  2490. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2491. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2492. PIPE_CONFIG(ADDR_SURF_P4_8x16) |
  2493. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
  2494. break;
  2495. case 1:
  2496. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2497. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2498. PIPE_CONFIG(ADDR_SURF_P4_8x16) |
  2499. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
  2500. break;
  2501. case 2:
  2502. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2503. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2504. PIPE_CONFIG(ADDR_SURF_P4_8x16) |
  2505. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
  2506. break;
  2507. case 3:
  2508. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2509. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2510. PIPE_CONFIG(ADDR_SURF_P4_8x16) |
  2511. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
  2512. break;
  2513. case 4:
  2514. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2515. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2516. PIPE_CONFIG(ADDR_SURF_P4_8x16) |
  2517. TILE_SPLIT(split_equal_to_row_size));
  2518. break;
  2519. case 5:
  2520. gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  2521. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
  2522. break;
  2523. case 6:
  2524. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  2525. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2526. PIPE_CONFIG(ADDR_SURF_P4_8x16) |
  2527. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
  2528. break;
  2529. case 7:
  2530. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  2531. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2532. PIPE_CONFIG(ADDR_SURF_P4_8x16) |
  2533. TILE_SPLIT(split_equal_to_row_size));
  2534. break;
  2535. case 8:
  2536. gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
  2537. PIPE_CONFIG(ADDR_SURF_P4_8x16));
  2538. break;
  2539. case 9:
  2540. gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  2541. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
  2542. break;
  2543. case 10:
  2544. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2545. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
  2546. PIPE_CONFIG(ADDR_SURF_P4_8x16) |
  2547. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2548. break;
  2549. case 11:
  2550. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  2551. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
  2552. PIPE_CONFIG(ADDR_SURF_P4_8x16) |
  2553. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2554. break;
  2555. case 12:
  2556. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  2557. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
  2558. PIPE_CONFIG(ADDR_SURF_P4_8x16) |
  2559. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2560. break;
  2561. case 13:
  2562. gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  2563. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
  2564. break;
  2565. case 14:
  2566. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2567. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  2568. PIPE_CONFIG(ADDR_SURF_P4_8x16) |
  2569. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2570. break;
  2571. case 16:
  2572. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  2573. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  2574. PIPE_CONFIG(ADDR_SURF_P4_8x16) |
  2575. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2576. break;
  2577. case 17:
  2578. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  2579. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  2580. PIPE_CONFIG(ADDR_SURF_P4_8x16) |
  2581. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2582. break;
  2583. case 27:
  2584. gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  2585. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
  2586. break;
  2587. case 28:
  2588. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  2589. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
  2590. PIPE_CONFIG(ADDR_SURF_P4_8x16) |
  2591. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2592. break;
  2593. case 29:
  2594. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  2595. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
  2596. PIPE_CONFIG(ADDR_SURF_P4_8x16) |
  2597. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2598. break;
  2599. case 30:
  2600. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  2601. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
  2602. PIPE_CONFIG(ADDR_SURF_P4_8x16) |
  2603. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2604. break;
  2605. default:
  2606. gb_tile_moden = 0;
  2607. break;
  2608. }
  2609. rdev->config.cik.tile_mode_array[reg_offset] = gb_tile_moden;
  2610. WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
  2611. }
  2612. }
  2613. for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
  2614. switch (reg_offset) {
  2615. case 0:
  2616. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2617. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
  2618. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2619. NUM_BANKS(ADDR_SURF_16_BANK));
  2620. break;
  2621. case 1:
  2622. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2623. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
  2624. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2625. NUM_BANKS(ADDR_SURF_16_BANK));
  2626. break;
  2627. case 2:
  2628. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2629. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2630. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  2631. NUM_BANKS(ADDR_SURF_16_BANK));
  2632. break;
  2633. case 3:
  2634. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2635. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2636. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  2637. NUM_BANKS(ADDR_SURF_16_BANK));
  2638. break;
  2639. case 4:
  2640. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2641. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2642. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  2643. NUM_BANKS(ADDR_SURF_16_BANK));
  2644. break;
  2645. case 5:
  2646. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2647. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2648. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  2649. NUM_BANKS(ADDR_SURF_8_BANK));
  2650. break;
  2651. case 6:
  2652. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2653. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2654. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
  2655. NUM_BANKS(ADDR_SURF_4_BANK));
  2656. break;
  2657. case 8:
  2658. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
  2659. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
  2660. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2661. NUM_BANKS(ADDR_SURF_16_BANK));
  2662. break;
  2663. case 9:
  2664. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
  2665. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
  2666. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2667. NUM_BANKS(ADDR_SURF_16_BANK));
  2668. break;
  2669. case 10:
  2670. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2671. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
  2672. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2673. NUM_BANKS(ADDR_SURF_16_BANK));
  2674. break;
  2675. case 11:
  2676. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2677. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
  2678. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2679. NUM_BANKS(ADDR_SURF_16_BANK));
  2680. break;
  2681. case 12:
  2682. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2683. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2684. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  2685. NUM_BANKS(ADDR_SURF_16_BANK));
  2686. break;
  2687. case 13:
  2688. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2689. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2690. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  2691. NUM_BANKS(ADDR_SURF_8_BANK));
  2692. break;
  2693. case 14:
  2694. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2695. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2696. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
  2697. NUM_BANKS(ADDR_SURF_4_BANK));
  2698. break;
  2699. default:
  2700. gb_tile_moden = 0;
  2701. break;
  2702. }
  2703. WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
  2704. }
  2705. } else if (num_pipe_configs == 2) {
  2706. for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
  2707. switch (reg_offset) {
  2708. case 0:
  2709. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2710. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2711. PIPE_CONFIG(ADDR_SURF_P2) |
  2712. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
  2713. break;
  2714. case 1:
  2715. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2716. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2717. PIPE_CONFIG(ADDR_SURF_P2) |
  2718. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
  2719. break;
  2720. case 2:
  2721. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2722. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2723. PIPE_CONFIG(ADDR_SURF_P2) |
  2724. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
  2725. break;
  2726. case 3:
  2727. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2728. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2729. PIPE_CONFIG(ADDR_SURF_P2) |
  2730. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
  2731. break;
  2732. case 4:
  2733. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2734. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2735. PIPE_CONFIG(ADDR_SURF_P2) |
  2736. TILE_SPLIT(split_equal_to_row_size));
  2737. break;
  2738. case 5:
  2739. gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  2740. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
  2741. break;
  2742. case 6:
  2743. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  2744. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2745. PIPE_CONFIG(ADDR_SURF_P2) |
  2746. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
  2747. break;
  2748. case 7:
  2749. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  2750. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2751. PIPE_CONFIG(ADDR_SURF_P2) |
  2752. TILE_SPLIT(split_equal_to_row_size));
  2753. break;
  2754. case 8:
  2755. gb_tile_moden = ARRAY_MODE(ARRAY_LINEAR_ALIGNED);
  2756. break;
  2757. case 9:
  2758. gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  2759. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
  2760. break;
  2761. case 10:
  2762. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2763. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
  2764. PIPE_CONFIG(ADDR_SURF_P2) |
  2765. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2766. break;
  2767. case 11:
  2768. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  2769. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
  2770. PIPE_CONFIG(ADDR_SURF_P2) |
  2771. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2772. break;
  2773. case 12:
  2774. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  2775. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
  2776. PIPE_CONFIG(ADDR_SURF_P2) |
  2777. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2778. break;
  2779. case 13:
  2780. gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  2781. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
  2782. break;
  2783. case 14:
  2784. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2785. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  2786. PIPE_CONFIG(ADDR_SURF_P2) |
  2787. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2788. break;
  2789. case 16:
  2790. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  2791. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  2792. PIPE_CONFIG(ADDR_SURF_P2) |
  2793. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2794. break;
  2795. case 17:
  2796. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  2797. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  2798. PIPE_CONFIG(ADDR_SURF_P2) |
  2799. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2800. break;
  2801. case 27:
  2802. gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  2803. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
  2804. break;
  2805. case 28:
  2806. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  2807. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
  2808. PIPE_CONFIG(ADDR_SURF_P2) |
  2809. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2810. break;
  2811. case 29:
  2812. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  2813. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
  2814. PIPE_CONFIG(ADDR_SURF_P2) |
  2815. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2816. break;
  2817. case 30:
  2818. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  2819. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
  2820. PIPE_CONFIG(ADDR_SURF_P2) |
  2821. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2822. break;
  2823. default:
  2824. gb_tile_moden = 0;
  2825. break;
  2826. }
  2827. rdev->config.cik.tile_mode_array[reg_offset] = gb_tile_moden;
  2828. WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
  2829. }
  2830. for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
  2831. switch (reg_offset) {
  2832. case 0:
  2833. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
  2834. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
  2835. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2836. NUM_BANKS(ADDR_SURF_16_BANK));
  2837. break;
  2838. case 1:
  2839. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
  2840. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
  2841. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2842. NUM_BANKS(ADDR_SURF_16_BANK));
  2843. break;
  2844. case 2:
  2845. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2846. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
  2847. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2848. NUM_BANKS(ADDR_SURF_16_BANK));
  2849. break;
  2850. case 3:
  2851. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2852. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2853. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2854. NUM_BANKS(ADDR_SURF_16_BANK));
  2855. break;
  2856. case 4:
  2857. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2858. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2859. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2860. NUM_BANKS(ADDR_SURF_16_BANK));
  2861. break;
  2862. case 5:
  2863. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2864. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2865. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2866. NUM_BANKS(ADDR_SURF_16_BANK));
  2867. break;
  2868. case 6:
  2869. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2870. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2871. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  2872. NUM_BANKS(ADDR_SURF_8_BANK));
  2873. break;
  2874. case 8:
  2875. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
  2876. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
  2877. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2878. NUM_BANKS(ADDR_SURF_16_BANK));
  2879. break;
  2880. case 9:
  2881. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
  2882. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
  2883. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2884. NUM_BANKS(ADDR_SURF_16_BANK));
  2885. break;
  2886. case 10:
  2887. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
  2888. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
  2889. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2890. NUM_BANKS(ADDR_SURF_16_BANK));
  2891. break;
  2892. case 11:
  2893. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
  2894. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
  2895. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2896. NUM_BANKS(ADDR_SURF_16_BANK));
  2897. break;
  2898. case 12:
  2899. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2900. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
  2901. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2902. NUM_BANKS(ADDR_SURF_16_BANK));
  2903. break;
  2904. case 13:
  2905. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2906. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2907. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2908. NUM_BANKS(ADDR_SURF_16_BANK));
  2909. break;
  2910. case 14:
  2911. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2912. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2913. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  2914. NUM_BANKS(ADDR_SURF_8_BANK));
  2915. break;
  2916. default:
  2917. gb_tile_moden = 0;
  2918. break;
  2919. }
  2920. WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
  2921. }
  2922. } else
  2923. DRM_ERROR("unknown num pipe config: 0x%x\n", num_pipe_configs);
  2924. }
  2925. /**
  2926. * cik_select_se_sh - select which SE, SH to address
  2927. *
  2928. * @rdev: radeon_device pointer
  2929. * @se_num: shader engine to address
  2930. * @sh_num: sh block to address
  2931. *
  2932. * Select which SE, SH combinations to address. Certain
  2933. * registers are instanced per SE or SH. 0xffffffff means
  2934. * broadcast to all SEs or SHs (CIK).
  2935. */
  2936. static void cik_select_se_sh(struct radeon_device *rdev,
  2937. u32 se_num, u32 sh_num)
  2938. {
  2939. u32 data = INSTANCE_BROADCAST_WRITES;
  2940. if ((se_num == 0xffffffff) && (sh_num == 0xffffffff))
  2941. data |= SH_BROADCAST_WRITES | SE_BROADCAST_WRITES;
  2942. else if (se_num == 0xffffffff)
  2943. data |= SE_BROADCAST_WRITES | SH_INDEX(sh_num);
  2944. else if (sh_num == 0xffffffff)
  2945. data |= SH_BROADCAST_WRITES | SE_INDEX(se_num);
  2946. else
  2947. data |= SH_INDEX(sh_num) | SE_INDEX(se_num);
  2948. WREG32(GRBM_GFX_INDEX, data);
  2949. }
  2950. /**
  2951. * cik_create_bitmask - create a bitmask
  2952. *
  2953. * @bit_width: length of the mask
  2954. *
  2955. * create a variable length bit mask (CIK).
  2956. * Returns the bitmask.
  2957. */
  2958. static u32 cik_create_bitmask(u32 bit_width)
  2959. {
  2960. u32 i, mask = 0;
  2961. for (i = 0; i < bit_width; i++) {
  2962. mask <<= 1;
  2963. mask |= 1;
  2964. }
  2965. return mask;
  2966. }
  2967. /**
  2968. * cik_select_se_sh - select which SE, SH to address
  2969. *
  2970. * @rdev: radeon_device pointer
  2971. * @max_rb_num: max RBs (render backends) for the asic
  2972. * @se_num: number of SEs (shader engines) for the asic
  2973. * @sh_per_se: number of SH blocks per SE for the asic
  2974. *
  2975. * Calculates the bitmask of disabled RBs (CIK).
  2976. * Returns the disabled RB bitmask.
  2977. */
  2978. static u32 cik_get_rb_disabled(struct radeon_device *rdev,
  2979. u32 max_rb_num, u32 se_num,
  2980. u32 sh_per_se)
  2981. {
  2982. u32 data, mask;
  2983. data = RREG32(CC_RB_BACKEND_DISABLE);
  2984. if (data & 1)
  2985. data &= BACKEND_DISABLE_MASK;
  2986. else
  2987. data = 0;
  2988. data |= RREG32(GC_USER_RB_BACKEND_DISABLE);
  2989. data >>= BACKEND_DISABLE_SHIFT;
  2990. mask = cik_create_bitmask(max_rb_num / se_num / sh_per_se);
  2991. return data & mask;
  2992. }
  2993. /**
  2994. * cik_setup_rb - setup the RBs on the asic
  2995. *
  2996. * @rdev: radeon_device pointer
  2997. * @se_num: number of SEs (shader engines) for the asic
  2998. * @sh_per_se: number of SH blocks per SE for the asic
  2999. * @max_rb_num: max RBs (render backends) for the asic
  3000. *
  3001. * Configures per-SE/SH RB registers (CIK).
  3002. */
  3003. static void cik_setup_rb(struct radeon_device *rdev,
  3004. u32 se_num, u32 sh_per_se,
  3005. u32 max_rb_num)
  3006. {
  3007. int i, j;
  3008. u32 data, mask;
  3009. u32 disabled_rbs = 0;
  3010. u32 enabled_rbs = 0;
  3011. for (i = 0; i < se_num; i++) {
  3012. for (j = 0; j < sh_per_se; j++) {
  3013. cik_select_se_sh(rdev, i, j);
  3014. data = cik_get_rb_disabled(rdev, max_rb_num, se_num, sh_per_se);
  3015. if (rdev->family == CHIP_HAWAII)
  3016. disabled_rbs |= data << ((i * sh_per_se + j) * HAWAII_RB_BITMAP_WIDTH_PER_SH);
  3017. else
  3018. disabled_rbs |= data << ((i * sh_per_se + j) * CIK_RB_BITMAP_WIDTH_PER_SH);
  3019. }
  3020. }
  3021. cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
  3022. mask = 1;
  3023. for (i = 0; i < max_rb_num; i++) {
  3024. if (!(disabled_rbs & mask))
  3025. enabled_rbs |= mask;
  3026. mask <<= 1;
  3027. }
  3028. for (i = 0; i < se_num; i++) {
  3029. cik_select_se_sh(rdev, i, 0xffffffff);
  3030. data = 0;
  3031. for (j = 0; j < sh_per_se; j++) {
  3032. switch (enabled_rbs & 3) {
  3033. case 0:
  3034. if (j == 0)
  3035. data |= PKR_MAP(RASTER_CONFIG_RB_MAP_3);
  3036. else
  3037. data |= PKR_MAP(RASTER_CONFIG_RB_MAP_0);
  3038. break;
  3039. case 1:
  3040. data |= (RASTER_CONFIG_RB_MAP_0 << (i * sh_per_se + j) * 2);
  3041. break;
  3042. case 2:
  3043. data |= (RASTER_CONFIG_RB_MAP_3 << (i * sh_per_se + j) * 2);
  3044. break;
  3045. case 3:
  3046. default:
  3047. data |= (RASTER_CONFIG_RB_MAP_2 << (i * sh_per_se + j) * 2);
  3048. break;
  3049. }
  3050. enabled_rbs >>= 2;
  3051. }
  3052. WREG32(PA_SC_RASTER_CONFIG, data);
  3053. }
  3054. cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
  3055. }
  3056. /**
  3057. * cik_gpu_init - setup the 3D engine
  3058. *
  3059. * @rdev: radeon_device pointer
  3060. *
  3061. * Configures the 3D engine and tiling configuration
  3062. * registers so that the 3D engine is usable.
  3063. */
  3064. static void cik_gpu_init(struct radeon_device *rdev)
  3065. {
  3066. u32 gb_addr_config = RREG32(GB_ADDR_CONFIG);
  3067. u32 mc_shared_chmap, mc_arb_ramcfg;
  3068. u32 hdp_host_path_cntl;
  3069. u32 tmp;
  3070. int i, j;
  3071. switch (rdev->family) {
  3072. case CHIP_BONAIRE:
  3073. rdev->config.cik.max_shader_engines = 2;
  3074. rdev->config.cik.max_tile_pipes = 4;
  3075. rdev->config.cik.max_cu_per_sh = 7;
  3076. rdev->config.cik.max_sh_per_se = 1;
  3077. rdev->config.cik.max_backends_per_se = 2;
  3078. rdev->config.cik.max_texture_channel_caches = 4;
  3079. rdev->config.cik.max_gprs = 256;
  3080. rdev->config.cik.max_gs_threads = 32;
  3081. rdev->config.cik.max_hw_contexts = 8;
  3082. rdev->config.cik.sc_prim_fifo_size_frontend = 0x20;
  3083. rdev->config.cik.sc_prim_fifo_size_backend = 0x100;
  3084. rdev->config.cik.sc_hiz_tile_fifo_size = 0x30;
  3085. rdev->config.cik.sc_earlyz_tile_fifo_size = 0x130;
  3086. gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
  3087. break;
  3088. case CHIP_HAWAII:
  3089. rdev->config.cik.max_shader_engines = 4;
  3090. rdev->config.cik.max_tile_pipes = 16;
  3091. rdev->config.cik.max_cu_per_sh = 11;
  3092. rdev->config.cik.max_sh_per_se = 1;
  3093. rdev->config.cik.max_backends_per_se = 4;
  3094. rdev->config.cik.max_texture_channel_caches = 16;
  3095. rdev->config.cik.max_gprs = 256;
  3096. rdev->config.cik.max_gs_threads = 32;
  3097. rdev->config.cik.max_hw_contexts = 8;
  3098. rdev->config.cik.sc_prim_fifo_size_frontend = 0x20;
  3099. rdev->config.cik.sc_prim_fifo_size_backend = 0x100;
  3100. rdev->config.cik.sc_hiz_tile_fifo_size = 0x30;
  3101. rdev->config.cik.sc_earlyz_tile_fifo_size = 0x130;
  3102. gb_addr_config = HAWAII_GB_ADDR_CONFIG_GOLDEN;
  3103. break;
  3104. case CHIP_KAVERI:
  3105. rdev->config.cik.max_shader_engines = 1;
  3106. rdev->config.cik.max_tile_pipes = 4;
  3107. if ((rdev->pdev->device == 0x1304) ||
  3108. (rdev->pdev->device == 0x1305) ||
  3109. (rdev->pdev->device == 0x130C) ||
  3110. (rdev->pdev->device == 0x130F) ||
  3111. (rdev->pdev->device == 0x1310) ||
  3112. (rdev->pdev->device == 0x1311) ||
  3113. (rdev->pdev->device == 0x131C)) {
  3114. rdev->config.cik.max_cu_per_sh = 8;
  3115. rdev->config.cik.max_backends_per_se = 2;
  3116. } else if ((rdev->pdev->device == 0x1309) ||
  3117. (rdev->pdev->device == 0x130A) ||
  3118. (rdev->pdev->device == 0x130D) ||
  3119. (rdev->pdev->device == 0x1313) ||
  3120. (rdev->pdev->device == 0x131D)) {
  3121. rdev->config.cik.max_cu_per_sh = 6;
  3122. rdev->config.cik.max_backends_per_se = 2;
  3123. } else if ((rdev->pdev->device == 0x1306) ||
  3124. (rdev->pdev->device == 0x1307) ||
  3125. (rdev->pdev->device == 0x130B) ||
  3126. (rdev->pdev->device == 0x130E) ||
  3127. (rdev->pdev->device == 0x1315) ||
  3128. (rdev->pdev->device == 0x131B)) {
  3129. rdev->config.cik.max_cu_per_sh = 4;
  3130. rdev->config.cik.max_backends_per_se = 1;
  3131. } else {
  3132. rdev->config.cik.max_cu_per_sh = 3;
  3133. rdev->config.cik.max_backends_per_se = 1;
  3134. }
  3135. rdev->config.cik.max_sh_per_se = 1;
  3136. rdev->config.cik.max_texture_channel_caches = 4;
  3137. rdev->config.cik.max_gprs = 256;
  3138. rdev->config.cik.max_gs_threads = 16;
  3139. rdev->config.cik.max_hw_contexts = 8;
  3140. rdev->config.cik.sc_prim_fifo_size_frontend = 0x20;
  3141. rdev->config.cik.sc_prim_fifo_size_backend = 0x100;
  3142. rdev->config.cik.sc_hiz_tile_fifo_size = 0x30;
  3143. rdev->config.cik.sc_earlyz_tile_fifo_size = 0x130;
  3144. gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
  3145. break;
  3146. case CHIP_KABINI:
  3147. default:
  3148. rdev->config.cik.max_shader_engines = 1;
  3149. rdev->config.cik.max_tile_pipes = 2;
  3150. rdev->config.cik.max_cu_per_sh = 2;
  3151. rdev->config.cik.max_sh_per_se = 1;
  3152. rdev->config.cik.max_backends_per_se = 1;
  3153. rdev->config.cik.max_texture_channel_caches = 2;
  3154. rdev->config.cik.max_gprs = 256;
  3155. rdev->config.cik.max_gs_threads = 16;
  3156. rdev->config.cik.max_hw_contexts = 8;
  3157. rdev->config.cik.sc_prim_fifo_size_frontend = 0x20;
  3158. rdev->config.cik.sc_prim_fifo_size_backend = 0x100;
  3159. rdev->config.cik.sc_hiz_tile_fifo_size = 0x30;
  3160. rdev->config.cik.sc_earlyz_tile_fifo_size = 0x130;
  3161. gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
  3162. break;
  3163. }
  3164. /* Initialize HDP */
  3165. for (i = 0, j = 0; i < 32; i++, j += 0x18) {
  3166. WREG32((0x2c14 + j), 0x00000000);
  3167. WREG32((0x2c18 + j), 0x00000000);
  3168. WREG32((0x2c1c + j), 0x00000000);
  3169. WREG32((0x2c20 + j), 0x00000000);
  3170. WREG32((0x2c24 + j), 0x00000000);
  3171. }
  3172. WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
  3173. WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN);
  3174. mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
  3175. mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
  3176. rdev->config.cik.num_tile_pipes = rdev->config.cik.max_tile_pipes;
  3177. rdev->config.cik.mem_max_burst_length_bytes = 256;
  3178. tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT;
  3179. rdev->config.cik.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
  3180. if (rdev->config.cik.mem_row_size_in_kb > 4)
  3181. rdev->config.cik.mem_row_size_in_kb = 4;
  3182. /* XXX use MC settings? */
  3183. rdev->config.cik.shader_engine_tile_size = 32;
  3184. rdev->config.cik.num_gpus = 1;
  3185. rdev->config.cik.multi_gpu_tile_size = 64;
  3186. /* fix up row size */
  3187. gb_addr_config &= ~ROW_SIZE_MASK;
  3188. switch (rdev->config.cik.mem_row_size_in_kb) {
  3189. case 1:
  3190. default:
  3191. gb_addr_config |= ROW_SIZE(0);
  3192. break;
  3193. case 2:
  3194. gb_addr_config |= ROW_SIZE(1);
  3195. break;
  3196. case 4:
  3197. gb_addr_config |= ROW_SIZE(2);
  3198. break;
  3199. }
  3200. /* setup tiling info dword. gb_addr_config is not adequate since it does
  3201. * not have bank info, so create a custom tiling dword.
  3202. * bits 3:0 num_pipes
  3203. * bits 7:4 num_banks
  3204. * bits 11:8 group_size
  3205. * bits 15:12 row_size
  3206. */
  3207. rdev->config.cik.tile_config = 0;
  3208. switch (rdev->config.cik.num_tile_pipes) {
  3209. case 1:
  3210. rdev->config.cik.tile_config |= (0 << 0);
  3211. break;
  3212. case 2:
  3213. rdev->config.cik.tile_config |= (1 << 0);
  3214. break;
  3215. case 4:
  3216. rdev->config.cik.tile_config |= (2 << 0);
  3217. break;
  3218. case 8:
  3219. default:
  3220. /* XXX what about 12? */
  3221. rdev->config.cik.tile_config |= (3 << 0);
  3222. break;
  3223. }
  3224. rdev->config.cik.tile_config |=
  3225. ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4;
  3226. rdev->config.cik.tile_config |=
  3227. ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
  3228. rdev->config.cik.tile_config |=
  3229. ((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12;
  3230. WREG32(GB_ADDR_CONFIG, gb_addr_config);
  3231. WREG32(HDP_ADDR_CONFIG, gb_addr_config);
  3232. WREG32(DMIF_ADDR_CALC, gb_addr_config);
  3233. WREG32(SDMA0_TILING_CONFIG + SDMA0_REGISTER_OFFSET, gb_addr_config & 0x70);
  3234. WREG32(SDMA0_TILING_CONFIG + SDMA1_REGISTER_OFFSET, gb_addr_config & 0x70);
  3235. WREG32(UVD_UDEC_ADDR_CONFIG, gb_addr_config);
  3236. WREG32(UVD_UDEC_DB_ADDR_CONFIG, gb_addr_config);
  3237. WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config);
  3238. cik_tiling_mode_table_init(rdev);
  3239. cik_setup_rb(rdev, rdev->config.cik.max_shader_engines,
  3240. rdev->config.cik.max_sh_per_se,
  3241. rdev->config.cik.max_backends_per_se);
  3242. /* set HW defaults for 3D engine */
  3243. WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60));
  3244. WREG32(SX_DEBUG_1, 0x20);
  3245. WREG32(TA_CNTL_AUX, 0x00010000);
  3246. tmp = RREG32(SPI_CONFIG_CNTL);
  3247. tmp |= 0x03000000;
  3248. WREG32(SPI_CONFIG_CNTL, tmp);
  3249. WREG32(SQ_CONFIG, 1);
  3250. WREG32(DB_DEBUG, 0);
  3251. tmp = RREG32(DB_DEBUG2) & ~0xf00fffff;
  3252. tmp |= 0x00000400;
  3253. WREG32(DB_DEBUG2, tmp);
  3254. tmp = RREG32(DB_DEBUG3) & ~0x0002021c;
  3255. tmp |= 0x00020200;
  3256. WREG32(DB_DEBUG3, tmp);
  3257. tmp = RREG32(CB_HW_CONTROL) & ~0x00010000;
  3258. tmp |= 0x00018208;
  3259. WREG32(CB_HW_CONTROL, tmp);
  3260. WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
  3261. WREG32(PA_SC_FIFO_SIZE, (SC_FRONTEND_PRIM_FIFO_SIZE(rdev->config.cik.sc_prim_fifo_size_frontend) |
  3262. SC_BACKEND_PRIM_FIFO_SIZE(rdev->config.cik.sc_prim_fifo_size_backend) |
  3263. SC_HIZ_TILE_FIFO_SIZE(rdev->config.cik.sc_hiz_tile_fifo_size) |
  3264. SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.cik.sc_earlyz_tile_fifo_size)));
  3265. WREG32(VGT_NUM_INSTANCES, 1);
  3266. WREG32(CP_PERFMON_CNTL, 0);
  3267. WREG32(SQ_CONFIG, 0);
  3268. WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
  3269. FORCE_EOV_MAX_REZ_CNT(255)));
  3270. WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC) |
  3271. AUTO_INVLD_EN(ES_AND_GS_AUTO));
  3272. WREG32(VGT_GS_VERTEX_REUSE, 16);
  3273. WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
  3274. tmp = RREG32(HDP_MISC_CNTL);
  3275. tmp |= HDP_FLUSH_INVALIDATE_CACHE;
  3276. WREG32(HDP_MISC_CNTL, tmp);
  3277. hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
  3278. WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
  3279. WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
  3280. WREG32(PA_SC_ENHANCE, ENABLE_PA_SC_OUT_OF_ORDER);
  3281. udelay(50);
  3282. }
  3283. /*
  3284. * GPU scratch registers helpers function.
  3285. */
  3286. /**
  3287. * cik_scratch_init - setup driver info for CP scratch regs
  3288. *
  3289. * @rdev: radeon_device pointer
  3290. *
  3291. * Set up the number and offset of the CP scratch registers.
  3292. * NOTE: use of CP scratch registers is a legacy inferface and
  3293. * is not used by default on newer asics (r6xx+). On newer asics,
  3294. * memory buffers are used for fences rather than scratch regs.
  3295. */
  3296. static void cik_scratch_init(struct radeon_device *rdev)
  3297. {
  3298. int i;
  3299. rdev->scratch.num_reg = 7;
  3300. rdev->scratch.reg_base = SCRATCH_REG0;
  3301. for (i = 0; i < rdev->scratch.num_reg; i++) {
  3302. rdev->scratch.free[i] = true;
  3303. rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
  3304. }
  3305. }
  3306. /**
  3307. * cik_ring_test - basic gfx ring test
  3308. *
  3309. * @rdev: radeon_device pointer
  3310. * @ring: radeon_ring structure holding ring information
  3311. *
  3312. * Allocate a scratch register and write to it using the gfx ring (CIK).
  3313. * Provides a basic gfx ring test to verify that the ring is working.
  3314. * Used by cik_cp_gfx_resume();
  3315. * Returns 0 on success, error on failure.
  3316. */
  3317. int cik_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
  3318. {
  3319. uint32_t scratch;
  3320. uint32_t tmp = 0;
  3321. unsigned i;
  3322. int r;
  3323. r = radeon_scratch_get(rdev, &scratch);
  3324. if (r) {
  3325. DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
  3326. return r;
  3327. }
  3328. WREG32(scratch, 0xCAFEDEAD);
  3329. r = radeon_ring_lock(rdev, ring, 3);
  3330. if (r) {
  3331. DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n", ring->idx, r);
  3332. radeon_scratch_free(rdev, scratch);
  3333. return r;
  3334. }
  3335. radeon_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
  3336. radeon_ring_write(ring, ((scratch - PACKET3_SET_UCONFIG_REG_START) >> 2));
  3337. radeon_ring_write(ring, 0xDEADBEEF);
  3338. radeon_ring_unlock_commit(rdev, ring);
  3339. for (i = 0; i < rdev->usec_timeout; i++) {
  3340. tmp = RREG32(scratch);
  3341. if (tmp == 0xDEADBEEF)
  3342. break;
  3343. DRM_UDELAY(1);
  3344. }
  3345. if (i < rdev->usec_timeout) {
  3346. DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
  3347. } else {
  3348. DRM_ERROR("radeon: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
  3349. ring->idx, scratch, tmp);
  3350. r = -EINVAL;
  3351. }
  3352. radeon_scratch_free(rdev, scratch);
  3353. return r;
  3354. }
  3355. /**
  3356. * cik_fence_gfx_ring_emit - emit a fence on the gfx ring
  3357. *
  3358. * @rdev: radeon_device pointer
  3359. * @fence: radeon fence object
  3360. *
  3361. * Emits a fence sequnce number on the gfx ring and flushes
  3362. * GPU caches.
  3363. */
  3364. void cik_fence_gfx_ring_emit(struct radeon_device *rdev,
  3365. struct radeon_fence *fence)
  3366. {
  3367. struct radeon_ring *ring = &rdev->ring[fence->ring];
  3368. u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
  3369. /* EVENT_WRITE_EOP - flush caches, send int */
  3370. radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
  3371. radeon_ring_write(ring, (EOP_TCL1_ACTION_EN |
  3372. EOP_TC_ACTION_EN |
  3373. EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
  3374. EVENT_INDEX(5)));
  3375. radeon_ring_write(ring, addr & 0xfffffffc);
  3376. radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | DATA_SEL(1) | INT_SEL(2));
  3377. radeon_ring_write(ring, fence->seq);
  3378. radeon_ring_write(ring, 0);
  3379. /* HDP flush */
  3380. /* We should be using the new WAIT_REG_MEM special op packet here
  3381. * but it causes the CP to hang
  3382. */
  3383. radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
  3384. radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
  3385. WRITE_DATA_DST_SEL(0)));
  3386. radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
  3387. radeon_ring_write(ring, 0);
  3388. radeon_ring_write(ring, 0);
  3389. }
  3390. /**
  3391. * cik_fence_compute_ring_emit - emit a fence on the compute ring
  3392. *
  3393. * @rdev: radeon_device pointer
  3394. * @fence: radeon fence object
  3395. *
  3396. * Emits a fence sequnce number on the compute ring and flushes
  3397. * GPU caches.
  3398. */
  3399. void cik_fence_compute_ring_emit(struct radeon_device *rdev,
  3400. struct radeon_fence *fence)
  3401. {
  3402. struct radeon_ring *ring = &rdev->ring[fence->ring];
  3403. u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
  3404. /* RELEASE_MEM - flush caches, send int */
  3405. radeon_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 5));
  3406. radeon_ring_write(ring, (EOP_TCL1_ACTION_EN |
  3407. EOP_TC_ACTION_EN |
  3408. EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
  3409. EVENT_INDEX(5)));
  3410. radeon_ring_write(ring, DATA_SEL(1) | INT_SEL(2));
  3411. radeon_ring_write(ring, addr & 0xfffffffc);
  3412. radeon_ring_write(ring, upper_32_bits(addr));
  3413. radeon_ring_write(ring, fence->seq);
  3414. radeon_ring_write(ring, 0);
  3415. /* HDP flush */
  3416. /* We should be using the new WAIT_REG_MEM special op packet here
  3417. * but it causes the CP to hang
  3418. */
  3419. radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
  3420. radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
  3421. WRITE_DATA_DST_SEL(0)));
  3422. radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
  3423. radeon_ring_write(ring, 0);
  3424. radeon_ring_write(ring, 0);
  3425. }
  3426. bool cik_semaphore_ring_emit(struct radeon_device *rdev,
  3427. struct radeon_ring *ring,
  3428. struct radeon_semaphore *semaphore,
  3429. bool emit_wait)
  3430. {
  3431. /* TODO: figure out why semaphore cause lockups */
  3432. #if 0
  3433. uint64_t addr = semaphore->gpu_addr;
  3434. unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL;
  3435. radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
  3436. radeon_ring_write(ring, addr & 0xffffffff);
  3437. radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | sel);
  3438. return true;
  3439. #else
  3440. return false;
  3441. #endif
  3442. }
  3443. /**
  3444. * cik_copy_cpdma - copy pages using the CP DMA engine
  3445. *
  3446. * @rdev: radeon_device pointer
  3447. * @src_offset: src GPU address
  3448. * @dst_offset: dst GPU address
  3449. * @num_gpu_pages: number of GPU pages to xfer
  3450. * @fence: radeon fence object
  3451. *
  3452. * Copy GPU paging using the CP DMA engine (CIK+).
  3453. * Used by the radeon ttm implementation to move pages if
  3454. * registered as the asic copy callback.
  3455. */
  3456. int cik_copy_cpdma(struct radeon_device *rdev,
  3457. uint64_t src_offset, uint64_t dst_offset,
  3458. unsigned num_gpu_pages,
  3459. struct radeon_fence **fence)
  3460. {
  3461. struct radeon_semaphore *sem = NULL;
  3462. int ring_index = rdev->asic->copy.blit_ring_index;
  3463. struct radeon_ring *ring = &rdev->ring[ring_index];
  3464. u32 size_in_bytes, cur_size_in_bytes, control;
  3465. int i, num_loops;
  3466. int r = 0;
  3467. r = radeon_semaphore_create(rdev, &sem);
  3468. if (r) {
  3469. DRM_ERROR("radeon: moving bo (%d).\n", r);
  3470. return r;
  3471. }
  3472. size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
  3473. num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff);
  3474. r = radeon_ring_lock(rdev, ring, num_loops * 7 + 18);
  3475. if (r) {
  3476. DRM_ERROR("radeon: moving bo (%d).\n", r);
  3477. radeon_semaphore_free(rdev, &sem, NULL);
  3478. return r;
  3479. }
  3480. radeon_semaphore_sync_to(sem, *fence);
  3481. radeon_semaphore_sync_rings(rdev, sem, ring->idx);
  3482. for (i = 0; i < num_loops; i++) {
  3483. cur_size_in_bytes = size_in_bytes;
  3484. if (cur_size_in_bytes > 0x1fffff)
  3485. cur_size_in_bytes = 0x1fffff;
  3486. size_in_bytes -= cur_size_in_bytes;
  3487. control = 0;
  3488. if (size_in_bytes == 0)
  3489. control |= PACKET3_DMA_DATA_CP_SYNC;
  3490. radeon_ring_write(ring, PACKET3(PACKET3_DMA_DATA, 5));
  3491. radeon_ring_write(ring, control);
  3492. radeon_ring_write(ring, lower_32_bits(src_offset));
  3493. radeon_ring_write(ring, upper_32_bits(src_offset));
  3494. radeon_ring_write(ring, lower_32_bits(dst_offset));
  3495. radeon_ring_write(ring, upper_32_bits(dst_offset));
  3496. radeon_ring_write(ring, cur_size_in_bytes);
  3497. src_offset += cur_size_in_bytes;
  3498. dst_offset += cur_size_in_bytes;
  3499. }
  3500. r = radeon_fence_emit(rdev, fence, ring->idx);
  3501. if (r) {
  3502. radeon_ring_unlock_undo(rdev, ring);
  3503. return r;
  3504. }
  3505. radeon_ring_unlock_commit(rdev, ring);
  3506. radeon_semaphore_free(rdev, &sem, *fence);
  3507. return r;
  3508. }
  3509. /*
  3510. * IB stuff
  3511. */
  3512. /**
  3513. * cik_ring_ib_execute - emit an IB (Indirect Buffer) on the gfx ring
  3514. *
  3515. * @rdev: radeon_device pointer
  3516. * @ib: radeon indirect buffer object
  3517. *
  3518. * Emits an DE (drawing engine) or CE (constant engine) IB
  3519. * on the gfx ring. IBs are usually generated by userspace
  3520. * acceleration drivers and submitted to the kernel for
  3521. * sheduling on the ring. This function schedules the IB
  3522. * on the gfx ring for execution by the GPU.
  3523. */
  3524. void cik_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
  3525. {
  3526. struct radeon_ring *ring = &rdev->ring[ib->ring];
  3527. u32 header, control = INDIRECT_BUFFER_VALID;
  3528. if (ib->is_const_ib) {
  3529. /* set switch buffer packet before const IB */
  3530. radeon_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
  3531. radeon_ring_write(ring, 0);
  3532. header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
  3533. } else {
  3534. u32 next_rptr;
  3535. if (ring->rptr_save_reg) {
  3536. next_rptr = ring->wptr + 3 + 4;
  3537. radeon_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
  3538. radeon_ring_write(ring, ((ring->rptr_save_reg -
  3539. PACKET3_SET_UCONFIG_REG_START) >> 2));
  3540. radeon_ring_write(ring, next_rptr);
  3541. } else if (rdev->wb.enabled) {
  3542. next_rptr = ring->wptr + 5 + 4;
  3543. radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
  3544. radeon_ring_write(ring, WRITE_DATA_DST_SEL(1));
  3545. radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
  3546. radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
  3547. radeon_ring_write(ring, next_rptr);
  3548. }
  3549. header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
  3550. }
  3551. control |= ib->length_dw |
  3552. (ib->vm ? (ib->vm->id << 24) : 0);
  3553. radeon_ring_write(ring, header);
  3554. radeon_ring_write(ring,
  3555. #ifdef __BIG_ENDIAN
  3556. (2 << 0) |
  3557. #endif
  3558. (ib->gpu_addr & 0xFFFFFFFC));
  3559. radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
  3560. radeon_ring_write(ring, control);
  3561. }
  3562. /**
  3563. * cik_ib_test - basic gfx ring IB test
  3564. *
  3565. * @rdev: radeon_device pointer
  3566. * @ring: radeon_ring structure holding ring information
  3567. *
  3568. * Allocate an IB and execute it on the gfx ring (CIK).
  3569. * Provides a basic gfx ring test to verify that IBs are working.
  3570. * Returns 0 on success, error on failure.
  3571. */
  3572. int cik_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
  3573. {
  3574. struct radeon_ib ib;
  3575. uint32_t scratch;
  3576. uint32_t tmp = 0;
  3577. unsigned i;
  3578. int r;
  3579. r = radeon_scratch_get(rdev, &scratch);
  3580. if (r) {
  3581. DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
  3582. return r;
  3583. }
  3584. WREG32(scratch, 0xCAFEDEAD);
  3585. r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
  3586. if (r) {
  3587. DRM_ERROR("radeon: failed to get ib (%d).\n", r);
  3588. radeon_scratch_free(rdev, scratch);
  3589. return r;
  3590. }
  3591. ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
  3592. ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START) >> 2);
  3593. ib.ptr[2] = 0xDEADBEEF;
  3594. ib.length_dw = 3;
  3595. r = radeon_ib_schedule(rdev, &ib, NULL);
  3596. if (r) {
  3597. radeon_scratch_free(rdev, scratch);
  3598. radeon_ib_free(rdev, &ib);
  3599. DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
  3600. return r;
  3601. }
  3602. r = radeon_fence_wait(ib.fence, false);
  3603. if (r) {
  3604. DRM_ERROR("radeon: fence wait failed (%d).\n", r);
  3605. radeon_scratch_free(rdev, scratch);
  3606. radeon_ib_free(rdev, &ib);
  3607. return r;
  3608. }
  3609. for (i = 0; i < rdev->usec_timeout; i++) {
  3610. tmp = RREG32(scratch);
  3611. if (tmp == 0xDEADBEEF)
  3612. break;
  3613. DRM_UDELAY(1);
  3614. }
  3615. if (i < rdev->usec_timeout) {
  3616. DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
  3617. } else {
  3618. DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
  3619. scratch, tmp);
  3620. r = -EINVAL;
  3621. }
  3622. radeon_scratch_free(rdev, scratch);
  3623. radeon_ib_free(rdev, &ib);
  3624. return r;
  3625. }
  3626. /*
  3627. * CP.
  3628. * On CIK, gfx and compute now have independant command processors.
  3629. *
  3630. * GFX
  3631. * Gfx consists of a single ring and can process both gfx jobs and
  3632. * compute jobs. The gfx CP consists of three microengines (ME):
  3633. * PFP - Pre-Fetch Parser
  3634. * ME - Micro Engine
  3635. * CE - Constant Engine
  3636. * The PFP and ME make up what is considered the Drawing Engine (DE).
  3637. * The CE is an asynchronous engine used for updating buffer desciptors
  3638. * used by the DE so that they can be loaded into cache in parallel
  3639. * while the DE is processing state update packets.
  3640. *
  3641. * Compute
  3642. * The compute CP consists of two microengines (ME):
  3643. * MEC1 - Compute MicroEngine 1
  3644. * MEC2 - Compute MicroEngine 2
  3645. * Each MEC supports 4 compute pipes and each pipe supports 8 queues.
  3646. * The queues are exposed to userspace and are programmed directly
  3647. * by the compute runtime.
  3648. */
  3649. /**
  3650. * cik_cp_gfx_enable - enable/disable the gfx CP MEs
  3651. *
  3652. * @rdev: radeon_device pointer
  3653. * @enable: enable or disable the MEs
  3654. *
  3655. * Halts or unhalts the gfx MEs.
  3656. */
  3657. static void cik_cp_gfx_enable(struct radeon_device *rdev, bool enable)
  3658. {
  3659. if (enable)
  3660. WREG32(CP_ME_CNTL, 0);
  3661. else {
  3662. WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT));
  3663. rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
  3664. }
  3665. udelay(50);
  3666. }
  3667. /**
  3668. * cik_cp_gfx_load_microcode - load the gfx CP ME ucode
  3669. *
  3670. * @rdev: radeon_device pointer
  3671. *
  3672. * Loads the gfx PFP, ME, and CE ucode.
  3673. * Returns 0 for success, -EINVAL if the ucode is not available.
  3674. */
  3675. static int cik_cp_gfx_load_microcode(struct radeon_device *rdev)
  3676. {
  3677. const __be32 *fw_data;
  3678. int i;
  3679. if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw)
  3680. return -EINVAL;
  3681. cik_cp_gfx_enable(rdev, false);
  3682. /* PFP */
  3683. fw_data = (const __be32 *)rdev->pfp_fw->data;
  3684. WREG32(CP_PFP_UCODE_ADDR, 0);
  3685. for (i = 0; i < CIK_PFP_UCODE_SIZE; i++)
  3686. WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
  3687. WREG32(CP_PFP_UCODE_ADDR, 0);
  3688. /* CE */
  3689. fw_data = (const __be32 *)rdev->ce_fw->data;
  3690. WREG32(CP_CE_UCODE_ADDR, 0);
  3691. for (i = 0; i < CIK_CE_UCODE_SIZE; i++)
  3692. WREG32(CP_CE_UCODE_DATA, be32_to_cpup(fw_data++));
  3693. WREG32(CP_CE_UCODE_ADDR, 0);
  3694. /* ME */
  3695. fw_data = (const __be32 *)rdev->me_fw->data;
  3696. WREG32(CP_ME_RAM_WADDR, 0);
  3697. for (i = 0; i < CIK_ME_UCODE_SIZE; i++)
  3698. WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
  3699. WREG32(CP_ME_RAM_WADDR, 0);
  3700. WREG32(CP_PFP_UCODE_ADDR, 0);
  3701. WREG32(CP_CE_UCODE_ADDR, 0);
  3702. WREG32(CP_ME_RAM_WADDR, 0);
  3703. WREG32(CP_ME_RAM_RADDR, 0);
  3704. return 0;
  3705. }
  3706. /**
  3707. * cik_cp_gfx_start - start the gfx ring
  3708. *
  3709. * @rdev: radeon_device pointer
  3710. *
  3711. * Enables the ring and loads the clear state context and other
  3712. * packets required to init the ring.
  3713. * Returns 0 for success, error for failure.
  3714. */
  3715. static int cik_cp_gfx_start(struct radeon_device *rdev)
  3716. {
  3717. struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
  3718. int r, i;
  3719. /* init the CP */
  3720. WREG32(CP_MAX_CONTEXT, rdev->config.cik.max_hw_contexts - 1);
  3721. WREG32(CP_ENDIAN_SWAP, 0);
  3722. WREG32(CP_DEVICE_ID, 1);
  3723. cik_cp_gfx_enable(rdev, true);
  3724. r = radeon_ring_lock(rdev, ring, cik_default_size + 17);
  3725. if (r) {
  3726. DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
  3727. return r;
  3728. }
  3729. /* init the CE partitions. CE only used for gfx on CIK */
  3730. radeon_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
  3731. radeon_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
  3732. radeon_ring_write(ring, 0xc000);
  3733. radeon_ring_write(ring, 0xc000);
  3734. /* setup clear context state */
  3735. radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
  3736. radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
  3737. radeon_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
  3738. radeon_ring_write(ring, 0x80000000);
  3739. radeon_ring_write(ring, 0x80000000);
  3740. for (i = 0; i < cik_default_size; i++)
  3741. radeon_ring_write(ring, cik_default_state[i]);
  3742. radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
  3743. radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
  3744. /* set clear context state */
  3745. radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
  3746. radeon_ring_write(ring, 0);
  3747. radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
  3748. radeon_ring_write(ring, 0x00000316);
  3749. radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
  3750. radeon_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */
  3751. radeon_ring_unlock_commit(rdev, ring);
  3752. return 0;
  3753. }
  3754. /**
  3755. * cik_cp_gfx_fini - stop the gfx ring
  3756. *
  3757. * @rdev: radeon_device pointer
  3758. *
  3759. * Stop the gfx ring and tear down the driver ring
  3760. * info.
  3761. */
  3762. static void cik_cp_gfx_fini(struct radeon_device *rdev)
  3763. {
  3764. cik_cp_gfx_enable(rdev, false);
  3765. radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
  3766. }
  3767. /**
  3768. * cik_cp_gfx_resume - setup the gfx ring buffer registers
  3769. *
  3770. * @rdev: radeon_device pointer
  3771. *
  3772. * Program the location and size of the gfx ring buffer
  3773. * and test it to make sure it's working.
  3774. * Returns 0 for success, error for failure.
  3775. */
  3776. static int cik_cp_gfx_resume(struct radeon_device *rdev)
  3777. {
  3778. struct radeon_ring *ring;
  3779. u32 tmp;
  3780. u32 rb_bufsz;
  3781. u64 rb_addr;
  3782. int r;
  3783. WREG32(CP_SEM_WAIT_TIMER, 0x0);
  3784. if (rdev->family != CHIP_HAWAII)
  3785. WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
  3786. /* Set the write pointer delay */
  3787. WREG32(CP_RB_WPTR_DELAY, 0);
  3788. /* set the RB to use vmid 0 */
  3789. WREG32(CP_RB_VMID, 0);
  3790. WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
  3791. /* ring 0 - compute and gfx */
  3792. /* Set ring buffer size */
  3793. ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
  3794. rb_bufsz = order_base_2(ring->ring_size / 8);
  3795. tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
  3796. #ifdef __BIG_ENDIAN
  3797. tmp |= BUF_SWAP_32BIT;
  3798. #endif
  3799. WREG32(CP_RB0_CNTL, tmp);
  3800. /* Initialize the ring buffer's read and write pointers */
  3801. WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA);
  3802. ring->wptr = 0;
  3803. WREG32(CP_RB0_WPTR, ring->wptr);
  3804. /* set the wb address wether it's enabled or not */
  3805. WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC);
  3806. WREG32(CP_RB0_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
  3807. /* scratch register shadowing is no longer supported */
  3808. WREG32(SCRATCH_UMSK, 0);
  3809. if (!rdev->wb.enabled)
  3810. tmp |= RB_NO_UPDATE;
  3811. mdelay(1);
  3812. WREG32(CP_RB0_CNTL, tmp);
  3813. rb_addr = ring->gpu_addr >> 8;
  3814. WREG32(CP_RB0_BASE, rb_addr);
  3815. WREG32(CP_RB0_BASE_HI, upper_32_bits(rb_addr));
  3816. ring->rptr = RREG32(CP_RB0_RPTR);
  3817. /* start the ring */
  3818. cik_cp_gfx_start(rdev);
  3819. rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true;
  3820. r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
  3821. if (r) {
  3822. rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
  3823. return r;
  3824. }
  3825. return 0;
  3826. }
  3827. u32 cik_compute_ring_get_rptr(struct radeon_device *rdev,
  3828. struct radeon_ring *ring)
  3829. {
  3830. u32 rptr;
  3831. if (rdev->wb.enabled) {
  3832. rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]);
  3833. } else {
  3834. mutex_lock(&rdev->srbm_mutex);
  3835. cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0);
  3836. rptr = RREG32(CP_HQD_PQ_RPTR);
  3837. cik_srbm_select(rdev, 0, 0, 0, 0);
  3838. mutex_unlock(&rdev->srbm_mutex);
  3839. }
  3840. return rptr;
  3841. }
  3842. u32 cik_compute_ring_get_wptr(struct radeon_device *rdev,
  3843. struct radeon_ring *ring)
  3844. {
  3845. u32 wptr;
  3846. if (rdev->wb.enabled) {
  3847. wptr = le32_to_cpu(rdev->wb.wb[ring->wptr_offs/4]);
  3848. } else {
  3849. mutex_lock(&rdev->srbm_mutex);
  3850. cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0);
  3851. wptr = RREG32(CP_HQD_PQ_WPTR);
  3852. cik_srbm_select(rdev, 0, 0, 0, 0);
  3853. mutex_unlock(&rdev->srbm_mutex);
  3854. }
  3855. return wptr;
  3856. }
  3857. void cik_compute_ring_set_wptr(struct radeon_device *rdev,
  3858. struct radeon_ring *ring)
  3859. {
  3860. rdev->wb.wb[ring->wptr_offs/4] = cpu_to_le32(ring->wptr);
  3861. WDOORBELL32(ring->doorbell_offset, ring->wptr);
  3862. }
  3863. /**
  3864. * cik_cp_compute_enable - enable/disable the compute CP MEs
  3865. *
  3866. * @rdev: radeon_device pointer
  3867. * @enable: enable or disable the MEs
  3868. *
  3869. * Halts or unhalts the compute MEs.
  3870. */
  3871. static void cik_cp_compute_enable(struct radeon_device *rdev, bool enable)
  3872. {
  3873. if (enable)
  3874. WREG32(CP_MEC_CNTL, 0);
  3875. else
  3876. WREG32(CP_MEC_CNTL, (MEC_ME1_HALT | MEC_ME2_HALT));
  3877. udelay(50);
  3878. }
  3879. /**
  3880. * cik_cp_compute_load_microcode - load the compute CP ME ucode
  3881. *
  3882. * @rdev: radeon_device pointer
  3883. *
  3884. * Loads the compute MEC1&2 ucode.
  3885. * Returns 0 for success, -EINVAL if the ucode is not available.
  3886. */
  3887. static int cik_cp_compute_load_microcode(struct radeon_device *rdev)
  3888. {
  3889. const __be32 *fw_data;
  3890. int i;
  3891. if (!rdev->mec_fw)
  3892. return -EINVAL;
  3893. cik_cp_compute_enable(rdev, false);
  3894. /* MEC1 */
  3895. fw_data = (const __be32 *)rdev->mec_fw->data;
  3896. WREG32(CP_MEC_ME1_UCODE_ADDR, 0);
  3897. for (i = 0; i < CIK_MEC_UCODE_SIZE; i++)
  3898. WREG32(CP_MEC_ME1_UCODE_DATA, be32_to_cpup(fw_data++));
  3899. WREG32(CP_MEC_ME1_UCODE_ADDR, 0);
  3900. if (rdev->family == CHIP_KAVERI) {
  3901. /* MEC2 */
  3902. fw_data = (const __be32 *)rdev->mec_fw->data;
  3903. WREG32(CP_MEC_ME2_UCODE_ADDR, 0);
  3904. for (i = 0; i < CIK_MEC_UCODE_SIZE; i++)
  3905. WREG32(CP_MEC_ME2_UCODE_DATA, be32_to_cpup(fw_data++));
  3906. WREG32(CP_MEC_ME2_UCODE_ADDR, 0);
  3907. }
  3908. return 0;
  3909. }
  3910. /**
  3911. * cik_cp_compute_start - start the compute queues
  3912. *
  3913. * @rdev: radeon_device pointer
  3914. *
  3915. * Enable the compute queues.
  3916. * Returns 0 for success, error for failure.
  3917. */
  3918. static int cik_cp_compute_start(struct radeon_device *rdev)
  3919. {
  3920. cik_cp_compute_enable(rdev, true);
  3921. return 0;
  3922. }
  3923. /**
  3924. * cik_cp_compute_fini - stop the compute queues
  3925. *
  3926. * @rdev: radeon_device pointer
  3927. *
  3928. * Stop the compute queues and tear down the driver queue
  3929. * info.
  3930. */
  3931. static void cik_cp_compute_fini(struct radeon_device *rdev)
  3932. {
  3933. int i, idx, r;
  3934. cik_cp_compute_enable(rdev, false);
  3935. for (i = 0; i < 2; i++) {
  3936. if (i == 0)
  3937. idx = CAYMAN_RING_TYPE_CP1_INDEX;
  3938. else
  3939. idx = CAYMAN_RING_TYPE_CP2_INDEX;
  3940. if (rdev->ring[idx].mqd_obj) {
  3941. r = radeon_bo_reserve(rdev->ring[idx].mqd_obj, false);
  3942. if (unlikely(r != 0))
  3943. dev_warn(rdev->dev, "(%d) reserve MQD bo failed\n", r);
  3944. radeon_bo_unpin(rdev->ring[idx].mqd_obj);
  3945. radeon_bo_unreserve(rdev->ring[idx].mqd_obj);
  3946. radeon_bo_unref(&rdev->ring[idx].mqd_obj);
  3947. rdev->ring[idx].mqd_obj = NULL;
  3948. }
  3949. }
  3950. }
  3951. static void cik_mec_fini(struct radeon_device *rdev)
  3952. {
  3953. int r;
  3954. if (rdev->mec.hpd_eop_obj) {
  3955. r = radeon_bo_reserve(rdev->mec.hpd_eop_obj, false);
  3956. if (unlikely(r != 0))
  3957. dev_warn(rdev->dev, "(%d) reserve HPD EOP bo failed\n", r);
  3958. radeon_bo_unpin(rdev->mec.hpd_eop_obj);
  3959. radeon_bo_unreserve(rdev->mec.hpd_eop_obj);
  3960. radeon_bo_unref(&rdev->mec.hpd_eop_obj);
  3961. rdev->mec.hpd_eop_obj = NULL;
  3962. }
  3963. }
  3964. #define MEC_HPD_SIZE 2048
  3965. static int cik_mec_init(struct radeon_device *rdev)
  3966. {
  3967. int r;
  3968. u32 *hpd;
  3969. /*
  3970. * KV: 2 MEC, 4 Pipes/MEC, 8 Queues/Pipe - 64 Queues total
  3971. * CI/KB: 1 MEC, 4 Pipes/MEC, 8 Queues/Pipe - 32 Queues total
  3972. */
  3973. if (rdev->family == CHIP_KAVERI)
  3974. rdev->mec.num_mec = 2;
  3975. else
  3976. rdev->mec.num_mec = 1;
  3977. rdev->mec.num_pipe = 4;
  3978. rdev->mec.num_queue = rdev->mec.num_mec * rdev->mec.num_pipe * 8;
  3979. if (rdev->mec.hpd_eop_obj == NULL) {
  3980. r = radeon_bo_create(rdev,
  3981. rdev->mec.num_mec *rdev->mec.num_pipe * MEC_HPD_SIZE * 2,
  3982. PAGE_SIZE, true,
  3983. RADEON_GEM_DOMAIN_GTT, NULL,
  3984. &rdev->mec.hpd_eop_obj);
  3985. if (r) {
  3986. dev_warn(rdev->dev, "(%d) create HDP EOP bo failed\n", r);
  3987. return r;
  3988. }
  3989. }
  3990. r = radeon_bo_reserve(rdev->mec.hpd_eop_obj, false);
  3991. if (unlikely(r != 0)) {
  3992. cik_mec_fini(rdev);
  3993. return r;
  3994. }
  3995. r = radeon_bo_pin(rdev->mec.hpd_eop_obj, RADEON_GEM_DOMAIN_GTT,
  3996. &rdev->mec.hpd_eop_gpu_addr);
  3997. if (r) {
  3998. dev_warn(rdev->dev, "(%d) pin HDP EOP bo failed\n", r);
  3999. cik_mec_fini(rdev);
  4000. return r;
  4001. }
  4002. r = radeon_bo_kmap(rdev->mec.hpd_eop_obj, (void **)&hpd);
  4003. if (r) {
  4004. dev_warn(rdev->dev, "(%d) map HDP EOP bo failed\n", r);
  4005. cik_mec_fini(rdev);
  4006. return r;
  4007. }
  4008. /* clear memory. Not sure if this is required or not */
  4009. memset(hpd, 0, rdev->mec.num_mec *rdev->mec.num_pipe * MEC_HPD_SIZE * 2);
  4010. radeon_bo_kunmap(rdev->mec.hpd_eop_obj);
  4011. radeon_bo_unreserve(rdev->mec.hpd_eop_obj);
  4012. return 0;
  4013. }
  4014. struct hqd_registers
  4015. {
  4016. u32 cp_mqd_base_addr;
  4017. u32 cp_mqd_base_addr_hi;
  4018. u32 cp_hqd_active;
  4019. u32 cp_hqd_vmid;
  4020. u32 cp_hqd_persistent_state;
  4021. u32 cp_hqd_pipe_priority;
  4022. u32 cp_hqd_queue_priority;
  4023. u32 cp_hqd_quantum;
  4024. u32 cp_hqd_pq_base;
  4025. u32 cp_hqd_pq_base_hi;
  4026. u32 cp_hqd_pq_rptr;
  4027. u32 cp_hqd_pq_rptr_report_addr;
  4028. u32 cp_hqd_pq_rptr_report_addr_hi;
  4029. u32 cp_hqd_pq_wptr_poll_addr;
  4030. u32 cp_hqd_pq_wptr_poll_addr_hi;
  4031. u32 cp_hqd_pq_doorbell_control;
  4032. u32 cp_hqd_pq_wptr;
  4033. u32 cp_hqd_pq_control;
  4034. u32 cp_hqd_ib_base_addr;
  4035. u32 cp_hqd_ib_base_addr_hi;
  4036. u32 cp_hqd_ib_rptr;
  4037. u32 cp_hqd_ib_control;
  4038. u32 cp_hqd_iq_timer;
  4039. u32 cp_hqd_iq_rptr;
  4040. u32 cp_hqd_dequeue_request;
  4041. u32 cp_hqd_dma_offload;
  4042. u32 cp_hqd_sema_cmd;
  4043. u32 cp_hqd_msg_type;
  4044. u32 cp_hqd_atomic0_preop_lo;
  4045. u32 cp_hqd_atomic0_preop_hi;
  4046. u32 cp_hqd_atomic1_preop_lo;
  4047. u32 cp_hqd_atomic1_preop_hi;
  4048. u32 cp_hqd_hq_scheduler0;
  4049. u32 cp_hqd_hq_scheduler1;
  4050. u32 cp_mqd_control;
  4051. };
  4052. struct bonaire_mqd
  4053. {
  4054. u32 header;
  4055. u32 dispatch_initiator;
  4056. u32 dimensions[3];
  4057. u32 start_idx[3];
  4058. u32 num_threads[3];
  4059. u32 pipeline_stat_enable;
  4060. u32 perf_counter_enable;
  4061. u32 pgm[2];
  4062. u32 tba[2];
  4063. u32 tma[2];
  4064. u32 pgm_rsrc[2];
  4065. u32 vmid;
  4066. u32 resource_limits;
  4067. u32 static_thread_mgmt01[2];
  4068. u32 tmp_ring_size;
  4069. u32 static_thread_mgmt23[2];
  4070. u32 restart[3];
  4071. u32 thread_trace_enable;
  4072. u32 reserved1;
  4073. u32 user_data[16];
  4074. u32 vgtcs_invoke_count[2];
  4075. struct hqd_registers queue_state;
  4076. u32 dequeue_cntr;
  4077. u32 interrupt_queue[64];
  4078. };
  4079. /**
  4080. * cik_cp_compute_resume - setup the compute queue registers
  4081. *
  4082. * @rdev: radeon_device pointer
  4083. *
  4084. * Program the compute queues and test them to make sure they
  4085. * are working.
  4086. * Returns 0 for success, error for failure.
  4087. */
  4088. static int cik_cp_compute_resume(struct radeon_device *rdev)
  4089. {
  4090. int r, i, idx;
  4091. u32 tmp;
  4092. bool use_doorbell = true;
  4093. u64 hqd_gpu_addr;
  4094. u64 mqd_gpu_addr;
  4095. u64 eop_gpu_addr;
  4096. u64 wb_gpu_addr;
  4097. u32 *buf;
  4098. struct bonaire_mqd *mqd;
  4099. r = cik_cp_compute_start(rdev);
  4100. if (r)
  4101. return r;
  4102. /* fix up chicken bits */
  4103. tmp = RREG32(CP_CPF_DEBUG);
  4104. tmp |= (1 << 23);
  4105. WREG32(CP_CPF_DEBUG, tmp);
  4106. /* init the pipes */
  4107. mutex_lock(&rdev->srbm_mutex);
  4108. for (i = 0; i < (rdev->mec.num_pipe * rdev->mec.num_mec); i++) {
  4109. int me = (i < 4) ? 1 : 2;
  4110. int pipe = (i < 4) ? i : (i - 4);
  4111. eop_gpu_addr = rdev->mec.hpd_eop_gpu_addr + (i * MEC_HPD_SIZE * 2);
  4112. cik_srbm_select(rdev, me, pipe, 0, 0);
  4113. /* write the EOP addr */
  4114. WREG32(CP_HPD_EOP_BASE_ADDR, eop_gpu_addr >> 8);
  4115. WREG32(CP_HPD_EOP_BASE_ADDR_HI, upper_32_bits(eop_gpu_addr) >> 8);
  4116. /* set the VMID assigned */
  4117. WREG32(CP_HPD_EOP_VMID, 0);
  4118. /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
  4119. tmp = RREG32(CP_HPD_EOP_CONTROL);
  4120. tmp &= ~EOP_SIZE_MASK;
  4121. tmp |= order_base_2(MEC_HPD_SIZE / 8);
  4122. WREG32(CP_HPD_EOP_CONTROL, tmp);
  4123. }
  4124. cik_srbm_select(rdev, 0, 0, 0, 0);
  4125. mutex_unlock(&rdev->srbm_mutex);
  4126. /* init the queues. Just two for now. */
  4127. for (i = 0; i < 2; i++) {
  4128. if (i == 0)
  4129. idx = CAYMAN_RING_TYPE_CP1_INDEX;
  4130. else
  4131. idx = CAYMAN_RING_TYPE_CP2_INDEX;
  4132. if (rdev->ring[idx].mqd_obj == NULL) {
  4133. r = radeon_bo_create(rdev,
  4134. sizeof(struct bonaire_mqd),
  4135. PAGE_SIZE, true,
  4136. RADEON_GEM_DOMAIN_GTT, NULL,
  4137. &rdev->ring[idx].mqd_obj);
  4138. if (r) {
  4139. dev_warn(rdev->dev, "(%d) create MQD bo failed\n", r);
  4140. return r;
  4141. }
  4142. }
  4143. r = radeon_bo_reserve(rdev->ring[idx].mqd_obj, false);
  4144. if (unlikely(r != 0)) {
  4145. cik_cp_compute_fini(rdev);
  4146. return r;
  4147. }
  4148. r = radeon_bo_pin(rdev->ring[idx].mqd_obj, RADEON_GEM_DOMAIN_GTT,
  4149. &mqd_gpu_addr);
  4150. if (r) {
  4151. dev_warn(rdev->dev, "(%d) pin MQD bo failed\n", r);
  4152. cik_cp_compute_fini(rdev);
  4153. return r;
  4154. }
  4155. r = radeon_bo_kmap(rdev->ring[idx].mqd_obj, (void **)&buf);
  4156. if (r) {
  4157. dev_warn(rdev->dev, "(%d) map MQD bo failed\n", r);
  4158. cik_cp_compute_fini(rdev);
  4159. return r;
  4160. }
  4161. /* doorbell offset */
  4162. rdev->ring[idx].doorbell_offset =
  4163. (rdev->ring[idx].doorbell_page_num * PAGE_SIZE) + 0;
  4164. /* init the mqd struct */
  4165. memset(buf, 0, sizeof(struct bonaire_mqd));
  4166. mqd = (struct bonaire_mqd *)buf;
  4167. mqd->header = 0xC0310800;
  4168. mqd->static_thread_mgmt01[0] = 0xffffffff;
  4169. mqd->static_thread_mgmt01[1] = 0xffffffff;
  4170. mqd->static_thread_mgmt23[0] = 0xffffffff;
  4171. mqd->static_thread_mgmt23[1] = 0xffffffff;
  4172. mutex_lock(&rdev->srbm_mutex);
  4173. cik_srbm_select(rdev, rdev->ring[idx].me,
  4174. rdev->ring[idx].pipe,
  4175. rdev->ring[idx].queue, 0);
  4176. /* disable wptr polling */
  4177. tmp = RREG32(CP_PQ_WPTR_POLL_CNTL);
  4178. tmp &= ~WPTR_POLL_EN;
  4179. WREG32(CP_PQ_WPTR_POLL_CNTL, tmp);
  4180. /* enable doorbell? */
  4181. mqd->queue_state.cp_hqd_pq_doorbell_control =
  4182. RREG32(CP_HQD_PQ_DOORBELL_CONTROL);
  4183. if (use_doorbell)
  4184. mqd->queue_state.cp_hqd_pq_doorbell_control |= DOORBELL_EN;
  4185. else
  4186. mqd->queue_state.cp_hqd_pq_doorbell_control &= ~DOORBELL_EN;
  4187. WREG32(CP_HQD_PQ_DOORBELL_CONTROL,
  4188. mqd->queue_state.cp_hqd_pq_doorbell_control);
  4189. /* disable the queue if it's active */
  4190. mqd->queue_state.cp_hqd_dequeue_request = 0;
  4191. mqd->queue_state.cp_hqd_pq_rptr = 0;
  4192. mqd->queue_state.cp_hqd_pq_wptr= 0;
  4193. if (RREG32(CP_HQD_ACTIVE) & 1) {
  4194. WREG32(CP_HQD_DEQUEUE_REQUEST, 1);
  4195. for (i = 0; i < rdev->usec_timeout; i++) {
  4196. if (!(RREG32(CP_HQD_ACTIVE) & 1))
  4197. break;
  4198. udelay(1);
  4199. }
  4200. WREG32(CP_HQD_DEQUEUE_REQUEST, mqd->queue_state.cp_hqd_dequeue_request);
  4201. WREG32(CP_HQD_PQ_RPTR, mqd->queue_state.cp_hqd_pq_rptr);
  4202. WREG32(CP_HQD_PQ_WPTR, mqd->queue_state.cp_hqd_pq_wptr);
  4203. }
  4204. /* set the pointer to the MQD */
  4205. mqd->queue_state.cp_mqd_base_addr = mqd_gpu_addr & 0xfffffffc;
  4206. mqd->queue_state.cp_mqd_base_addr_hi = upper_32_bits(mqd_gpu_addr);
  4207. WREG32(CP_MQD_BASE_ADDR, mqd->queue_state.cp_mqd_base_addr);
  4208. WREG32(CP_MQD_BASE_ADDR_HI, mqd->queue_state.cp_mqd_base_addr_hi);
  4209. /* set MQD vmid to 0 */
  4210. mqd->queue_state.cp_mqd_control = RREG32(CP_MQD_CONTROL);
  4211. mqd->queue_state.cp_mqd_control &= ~MQD_VMID_MASK;
  4212. WREG32(CP_MQD_CONTROL, mqd->queue_state.cp_mqd_control);
  4213. /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
  4214. hqd_gpu_addr = rdev->ring[idx].gpu_addr >> 8;
  4215. mqd->queue_state.cp_hqd_pq_base = hqd_gpu_addr;
  4216. mqd->queue_state.cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
  4217. WREG32(CP_HQD_PQ_BASE, mqd->queue_state.cp_hqd_pq_base);
  4218. WREG32(CP_HQD_PQ_BASE_HI, mqd->queue_state.cp_hqd_pq_base_hi);
  4219. /* set up the HQD, this is similar to CP_RB0_CNTL */
  4220. mqd->queue_state.cp_hqd_pq_control = RREG32(CP_HQD_PQ_CONTROL);
  4221. mqd->queue_state.cp_hqd_pq_control &=
  4222. ~(QUEUE_SIZE_MASK | RPTR_BLOCK_SIZE_MASK);
  4223. mqd->queue_state.cp_hqd_pq_control |=
  4224. order_base_2(rdev->ring[idx].ring_size / 8);
  4225. mqd->queue_state.cp_hqd_pq_control |=
  4226. (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8);
  4227. #ifdef __BIG_ENDIAN
  4228. mqd->queue_state.cp_hqd_pq_control |= BUF_SWAP_32BIT;
  4229. #endif
  4230. mqd->queue_state.cp_hqd_pq_control &=
  4231. ~(UNORD_DISPATCH | ROQ_PQ_IB_FLIP | PQ_VOLATILE);
  4232. mqd->queue_state.cp_hqd_pq_control |=
  4233. PRIV_STATE | KMD_QUEUE; /* assuming kernel queue control */
  4234. WREG32(CP_HQD_PQ_CONTROL, mqd->queue_state.cp_hqd_pq_control);
  4235. /* only used if CP_PQ_WPTR_POLL_CNTL.WPTR_POLL_EN=1 */
  4236. if (i == 0)
  4237. wb_gpu_addr = rdev->wb.gpu_addr + CIK_WB_CP1_WPTR_OFFSET;
  4238. else
  4239. wb_gpu_addr = rdev->wb.gpu_addr + CIK_WB_CP2_WPTR_OFFSET;
  4240. mqd->queue_state.cp_hqd_pq_wptr_poll_addr = wb_gpu_addr & 0xfffffffc;
  4241. mqd->queue_state.cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
  4242. WREG32(CP_HQD_PQ_WPTR_POLL_ADDR, mqd->queue_state.cp_hqd_pq_wptr_poll_addr);
  4243. WREG32(CP_HQD_PQ_WPTR_POLL_ADDR_HI,
  4244. mqd->queue_state.cp_hqd_pq_wptr_poll_addr_hi);
  4245. /* set the wb address wether it's enabled or not */
  4246. if (i == 0)
  4247. wb_gpu_addr = rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET;
  4248. else
  4249. wb_gpu_addr = rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET;
  4250. mqd->queue_state.cp_hqd_pq_rptr_report_addr = wb_gpu_addr & 0xfffffffc;
  4251. mqd->queue_state.cp_hqd_pq_rptr_report_addr_hi =
  4252. upper_32_bits(wb_gpu_addr) & 0xffff;
  4253. WREG32(CP_HQD_PQ_RPTR_REPORT_ADDR,
  4254. mqd->queue_state.cp_hqd_pq_rptr_report_addr);
  4255. WREG32(CP_HQD_PQ_RPTR_REPORT_ADDR_HI,
  4256. mqd->queue_state.cp_hqd_pq_rptr_report_addr_hi);
  4257. /* enable the doorbell if requested */
  4258. if (use_doorbell) {
  4259. mqd->queue_state.cp_hqd_pq_doorbell_control =
  4260. RREG32(CP_HQD_PQ_DOORBELL_CONTROL);
  4261. mqd->queue_state.cp_hqd_pq_doorbell_control &= ~DOORBELL_OFFSET_MASK;
  4262. mqd->queue_state.cp_hqd_pq_doorbell_control |=
  4263. DOORBELL_OFFSET(rdev->ring[idx].doorbell_offset / 4);
  4264. mqd->queue_state.cp_hqd_pq_doorbell_control |= DOORBELL_EN;
  4265. mqd->queue_state.cp_hqd_pq_doorbell_control &=
  4266. ~(DOORBELL_SOURCE | DOORBELL_HIT);
  4267. } else {
  4268. mqd->queue_state.cp_hqd_pq_doorbell_control = 0;
  4269. }
  4270. WREG32(CP_HQD_PQ_DOORBELL_CONTROL,
  4271. mqd->queue_state.cp_hqd_pq_doorbell_control);
  4272. /* read and write pointers, similar to CP_RB0_WPTR/_RPTR */
  4273. rdev->ring[idx].wptr = 0;
  4274. mqd->queue_state.cp_hqd_pq_wptr = rdev->ring[idx].wptr;
  4275. WREG32(CP_HQD_PQ_WPTR, mqd->queue_state.cp_hqd_pq_wptr);
  4276. rdev->ring[idx].rptr = RREG32(CP_HQD_PQ_RPTR);
  4277. mqd->queue_state.cp_hqd_pq_rptr = rdev->ring[idx].rptr;
  4278. /* set the vmid for the queue */
  4279. mqd->queue_state.cp_hqd_vmid = 0;
  4280. WREG32(CP_HQD_VMID, mqd->queue_state.cp_hqd_vmid);
  4281. /* activate the queue */
  4282. mqd->queue_state.cp_hqd_active = 1;
  4283. WREG32(CP_HQD_ACTIVE, mqd->queue_state.cp_hqd_active);
  4284. cik_srbm_select(rdev, 0, 0, 0, 0);
  4285. mutex_unlock(&rdev->srbm_mutex);
  4286. radeon_bo_kunmap(rdev->ring[idx].mqd_obj);
  4287. radeon_bo_unreserve(rdev->ring[idx].mqd_obj);
  4288. rdev->ring[idx].ready = true;
  4289. r = radeon_ring_test(rdev, idx, &rdev->ring[idx]);
  4290. if (r)
  4291. rdev->ring[idx].ready = false;
  4292. }
  4293. return 0;
  4294. }
  4295. static void cik_cp_enable(struct radeon_device *rdev, bool enable)
  4296. {
  4297. cik_cp_gfx_enable(rdev, enable);
  4298. cik_cp_compute_enable(rdev, enable);
  4299. }
  4300. static int cik_cp_load_microcode(struct radeon_device *rdev)
  4301. {
  4302. int r;
  4303. r = cik_cp_gfx_load_microcode(rdev);
  4304. if (r)
  4305. return r;
  4306. r = cik_cp_compute_load_microcode(rdev);
  4307. if (r)
  4308. return r;
  4309. return 0;
  4310. }
  4311. static void cik_cp_fini(struct radeon_device *rdev)
  4312. {
  4313. cik_cp_gfx_fini(rdev);
  4314. cik_cp_compute_fini(rdev);
  4315. }
  4316. static int cik_cp_resume(struct radeon_device *rdev)
  4317. {
  4318. int r;
  4319. cik_enable_gui_idle_interrupt(rdev, false);
  4320. r = cik_cp_load_microcode(rdev);
  4321. if (r)
  4322. return r;
  4323. r = cik_cp_gfx_resume(rdev);
  4324. if (r)
  4325. return r;
  4326. r = cik_cp_compute_resume(rdev);
  4327. if (r)
  4328. return r;
  4329. cik_enable_gui_idle_interrupt(rdev, true);
  4330. return 0;
  4331. }
  4332. static void cik_print_gpu_status_regs(struct radeon_device *rdev)
  4333. {
  4334. dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
  4335. RREG32(GRBM_STATUS));
  4336. dev_info(rdev->dev, " GRBM_STATUS2=0x%08X\n",
  4337. RREG32(GRBM_STATUS2));
  4338. dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
  4339. RREG32(GRBM_STATUS_SE0));
  4340. dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
  4341. RREG32(GRBM_STATUS_SE1));
  4342. dev_info(rdev->dev, " GRBM_STATUS_SE2=0x%08X\n",
  4343. RREG32(GRBM_STATUS_SE2));
  4344. dev_info(rdev->dev, " GRBM_STATUS_SE3=0x%08X\n",
  4345. RREG32(GRBM_STATUS_SE3));
  4346. dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
  4347. RREG32(SRBM_STATUS));
  4348. dev_info(rdev->dev, " SRBM_STATUS2=0x%08X\n",
  4349. RREG32(SRBM_STATUS2));
  4350. dev_info(rdev->dev, " SDMA0_STATUS_REG = 0x%08X\n",
  4351. RREG32(SDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET));
  4352. dev_info(rdev->dev, " SDMA1_STATUS_REG = 0x%08X\n",
  4353. RREG32(SDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET));
  4354. dev_info(rdev->dev, " CP_STAT = 0x%08x\n", RREG32(CP_STAT));
  4355. dev_info(rdev->dev, " CP_STALLED_STAT1 = 0x%08x\n",
  4356. RREG32(CP_STALLED_STAT1));
  4357. dev_info(rdev->dev, " CP_STALLED_STAT2 = 0x%08x\n",
  4358. RREG32(CP_STALLED_STAT2));
  4359. dev_info(rdev->dev, " CP_STALLED_STAT3 = 0x%08x\n",
  4360. RREG32(CP_STALLED_STAT3));
  4361. dev_info(rdev->dev, " CP_CPF_BUSY_STAT = 0x%08x\n",
  4362. RREG32(CP_CPF_BUSY_STAT));
  4363. dev_info(rdev->dev, " CP_CPF_STALLED_STAT1 = 0x%08x\n",
  4364. RREG32(CP_CPF_STALLED_STAT1));
  4365. dev_info(rdev->dev, " CP_CPF_STATUS = 0x%08x\n", RREG32(CP_CPF_STATUS));
  4366. dev_info(rdev->dev, " CP_CPC_BUSY_STAT = 0x%08x\n", RREG32(CP_CPC_BUSY_STAT));
  4367. dev_info(rdev->dev, " CP_CPC_STALLED_STAT1 = 0x%08x\n",
  4368. RREG32(CP_CPC_STALLED_STAT1));
  4369. dev_info(rdev->dev, " CP_CPC_STATUS = 0x%08x\n", RREG32(CP_CPC_STATUS));
  4370. }
  4371. /**
  4372. * cik_gpu_check_soft_reset - check which blocks are busy
  4373. *
  4374. * @rdev: radeon_device pointer
  4375. *
  4376. * Check which blocks are busy and return the relevant reset
  4377. * mask to be used by cik_gpu_soft_reset().
  4378. * Returns a mask of the blocks to be reset.
  4379. */
  4380. u32 cik_gpu_check_soft_reset(struct radeon_device *rdev)
  4381. {
  4382. u32 reset_mask = 0;
  4383. u32 tmp;
  4384. /* GRBM_STATUS */
  4385. tmp = RREG32(GRBM_STATUS);
  4386. if (tmp & (PA_BUSY | SC_BUSY |
  4387. BCI_BUSY | SX_BUSY |
  4388. TA_BUSY | VGT_BUSY |
  4389. DB_BUSY | CB_BUSY |
  4390. GDS_BUSY | SPI_BUSY |
  4391. IA_BUSY | IA_BUSY_NO_DMA))
  4392. reset_mask |= RADEON_RESET_GFX;
  4393. if (tmp & (CP_BUSY | CP_COHERENCY_BUSY))
  4394. reset_mask |= RADEON_RESET_CP;
  4395. /* GRBM_STATUS2 */
  4396. tmp = RREG32(GRBM_STATUS2);
  4397. if (tmp & RLC_BUSY)
  4398. reset_mask |= RADEON_RESET_RLC;
  4399. /* SDMA0_STATUS_REG */
  4400. tmp = RREG32(SDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET);
  4401. if (!(tmp & SDMA_IDLE))
  4402. reset_mask |= RADEON_RESET_DMA;
  4403. /* SDMA1_STATUS_REG */
  4404. tmp = RREG32(SDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET);
  4405. if (!(tmp & SDMA_IDLE))
  4406. reset_mask |= RADEON_RESET_DMA1;
  4407. /* SRBM_STATUS2 */
  4408. tmp = RREG32(SRBM_STATUS2);
  4409. if (tmp & SDMA_BUSY)
  4410. reset_mask |= RADEON_RESET_DMA;
  4411. if (tmp & SDMA1_BUSY)
  4412. reset_mask |= RADEON_RESET_DMA1;
  4413. /* SRBM_STATUS */
  4414. tmp = RREG32(SRBM_STATUS);
  4415. if (tmp & IH_BUSY)
  4416. reset_mask |= RADEON_RESET_IH;
  4417. if (tmp & SEM_BUSY)
  4418. reset_mask |= RADEON_RESET_SEM;
  4419. if (tmp & GRBM_RQ_PENDING)
  4420. reset_mask |= RADEON_RESET_GRBM;
  4421. if (tmp & VMC_BUSY)
  4422. reset_mask |= RADEON_RESET_VMC;
  4423. if (tmp & (MCB_BUSY | MCB_NON_DISPLAY_BUSY |
  4424. MCC_BUSY | MCD_BUSY))
  4425. reset_mask |= RADEON_RESET_MC;
  4426. if (evergreen_is_display_hung(rdev))
  4427. reset_mask |= RADEON_RESET_DISPLAY;
  4428. /* Skip MC reset as it's mostly likely not hung, just busy */
  4429. if (reset_mask & RADEON_RESET_MC) {
  4430. DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
  4431. reset_mask &= ~RADEON_RESET_MC;
  4432. }
  4433. return reset_mask;
  4434. }
  4435. /**
  4436. * cik_gpu_soft_reset - soft reset GPU
  4437. *
  4438. * @rdev: radeon_device pointer
  4439. * @reset_mask: mask of which blocks to reset
  4440. *
  4441. * Soft reset the blocks specified in @reset_mask.
  4442. */
  4443. static void cik_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
  4444. {
  4445. struct evergreen_mc_save save;
  4446. u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
  4447. u32 tmp;
  4448. if (reset_mask == 0)
  4449. return;
  4450. dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
  4451. cik_print_gpu_status_regs(rdev);
  4452. dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
  4453. RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR));
  4454. dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
  4455. RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
  4456. /* disable CG/PG */
  4457. cik_fini_pg(rdev);
  4458. cik_fini_cg(rdev);
  4459. /* stop the rlc */
  4460. cik_rlc_stop(rdev);
  4461. /* Disable GFX parsing/prefetching */
  4462. WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT);
  4463. /* Disable MEC parsing/prefetching */
  4464. WREG32(CP_MEC_CNTL, MEC_ME1_HALT | MEC_ME2_HALT);
  4465. if (reset_mask & RADEON_RESET_DMA) {
  4466. /* sdma0 */
  4467. tmp = RREG32(SDMA0_ME_CNTL + SDMA0_REGISTER_OFFSET);
  4468. tmp |= SDMA_HALT;
  4469. WREG32(SDMA0_ME_CNTL + SDMA0_REGISTER_OFFSET, tmp);
  4470. }
  4471. if (reset_mask & RADEON_RESET_DMA1) {
  4472. /* sdma1 */
  4473. tmp = RREG32(SDMA0_ME_CNTL + SDMA1_REGISTER_OFFSET);
  4474. tmp |= SDMA_HALT;
  4475. WREG32(SDMA0_ME_CNTL + SDMA1_REGISTER_OFFSET, tmp);
  4476. }
  4477. evergreen_mc_stop(rdev, &save);
  4478. if (evergreen_mc_wait_for_idle(rdev)) {
  4479. dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
  4480. }
  4481. if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE | RADEON_RESET_CP))
  4482. grbm_soft_reset = SOFT_RESET_CP | SOFT_RESET_GFX;
  4483. if (reset_mask & RADEON_RESET_CP) {
  4484. grbm_soft_reset |= SOFT_RESET_CP;
  4485. srbm_soft_reset |= SOFT_RESET_GRBM;
  4486. }
  4487. if (reset_mask & RADEON_RESET_DMA)
  4488. srbm_soft_reset |= SOFT_RESET_SDMA;
  4489. if (reset_mask & RADEON_RESET_DMA1)
  4490. srbm_soft_reset |= SOFT_RESET_SDMA1;
  4491. if (reset_mask & RADEON_RESET_DISPLAY)
  4492. srbm_soft_reset |= SOFT_RESET_DC;
  4493. if (reset_mask & RADEON_RESET_RLC)
  4494. grbm_soft_reset |= SOFT_RESET_RLC;
  4495. if (reset_mask & RADEON_RESET_SEM)
  4496. srbm_soft_reset |= SOFT_RESET_SEM;
  4497. if (reset_mask & RADEON_RESET_IH)
  4498. srbm_soft_reset |= SOFT_RESET_IH;
  4499. if (reset_mask & RADEON_RESET_GRBM)
  4500. srbm_soft_reset |= SOFT_RESET_GRBM;
  4501. if (reset_mask & RADEON_RESET_VMC)
  4502. srbm_soft_reset |= SOFT_RESET_VMC;
  4503. if (!(rdev->flags & RADEON_IS_IGP)) {
  4504. if (reset_mask & RADEON_RESET_MC)
  4505. srbm_soft_reset |= SOFT_RESET_MC;
  4506. }
  4507. if (grbm_soft_reset) {
  4508. tmp = RREG32(GRBM_SOFT_RESET);
  4509. tmp |= grbm_soft_reset;
  4510. dev_info(rdev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
  4511. WREG32(GRBM_SOFT_RESET, tmp);
  4512. tmp = RREG32(GRBM_SOFT_RESET);
  4513. udelay(50);
  4514. tmp &= ~grbm_soft_reset;
  4515. WREG32(GRBM_SOFT_RESET, tmp);
  4516. tmp = RREG32(GRBM_SOFT_RESET);
  4517. }
  4518. if (srbm_soft_reset) {
  4519. tmp = RREG32(SRBM_SOFT_RESET);
  4520. tmp |= srbm_soft_reset;
  4521. dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
  4522. WREG32(SRBM_SOFT_RESET, tmp);
  4523. tmp = RREG32(SRBM_SOFT_RESET);
  4524. udelay(50);
  4525. tmp &= ~srbm_soft_reset;
  4526. WREG32(SRBM_SOFT_RESET, tmp);
  4527. tmp = RREG32(SRBM_SOFT_RESET);
  4528. }
  4529. /* Wait a little for things to settle down */
  4530. udelay(50);
  4531. evergreen_mc_resume(rdev, &save);
  4532. udelay(50);
  4533. cik_print_gpu_status_regs(rdev);
  4534. }
  4535. /**
  4536. * cik_asic_reset - soft reset GPU
  4537. *
  4538. * @rdev: radeon_device pointer
  4539. *
  4540. * Look up which blocks are hung and attempt
  4541. * to reset them.
  4542. * Returns 0 for success.
  4543. */
  4544. int cik_asic_reset(struct radeon_device *rdev)
  4545. {
  4546. u32 reset_mask;
  4547. reset_mask = cik_gpu_check_soft_reset(rdev);
  4548. if (reset_mask)
  4549. r600_set_bios_scratch_engine_hung(rdev, true);
  4550. cik_gpu_soft_reset(rdev, reset_mask);
  4551. reset_mask = cik_gpu_check_soft_reset(rdev);
  4552. if (!reset_mask)
  4553. r600_set_bios_scratch_engine_hung(rdev, false);
  4554. return 0;
  4555. }
  4556. /**
  4557. * cik_gfx_is_lockup - check if the 3D engine is locked up
  4558. *
  4559. * @rdev: radeon_device pointer
  4560. * @ring: radeon_ring structure holding ring information
  4561. *
  4562. * Check if the 3D engine is locked up (CIK).
  4563. * Returns true if the engine is locked, false if not.
  4564. */
  4565. bool cik_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
  4566. {
  4567. u32 reset_mask = cik_gpu_check_soft_reset(rdev);
  4568. if (!(reset_mask & (RADEON_RESET_GFX |
  4569. RADEON_RESET_COMPUTE |
  4570. RADEON_RESET_CP))) {
  4571. radeon_ring_lockup_update(ring);
  4572. return false;
  4573. }
  4574. /* force CP activities */
  4575. radeon_ring_force_activity(rdev, ring);
  4576. return radeon_ring_test_lockup(rdev, ring);
  4577. }
  4578. /* MC */
  4579. /**
  4580. * cik_mc_program - program the GPU memory controller
  4581. *
  4582. * @rdev: radeon_device pointer
  4583. *
  4584. * Set the location of vram, gart, and AGP in the GPU's
  4585. * physical address space (CIK).
  4586. */
  4587. static void cik_mc_program(struct radeon_device *rdev)
  4588. {
  4589. struct evergreen_mc_save save;
  4590. u32 tmp;
  4591. int i, j;
  4592. /* Initialize HDP */
  4593. for (i = 0, j = 0; i < 32; i++, j += 0x18) {
  4594. WREG32((0x2c14 + j), 0x00000000);
  4595. WREG32((0x2c18 + j), 0x00000000);
  4596. WREG32((0x2c1c + j), 0x00000000);
  4597. WREG32((0x2c20 + j), 0x00000000);
  4598. WREG32((0x2c24 + j), 0x00000000);
  4599. }
  4600. WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
  4601. evergreen_mc_stop(rdev, &save);
  4602. if (radeon_mc_wait_for_idle(rdev)) {
  4603. dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
  4604. }
  4605. /* Lockout access through VGA aperture*/
  4606. WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
  4607. /* Update configuration */
  4608. WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
  4609. rdev->mc.vram_start >> 12);
  4610. WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
  4611. rdev->mc.vram_end >> 12);
  4612. WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
  4613. rdev->vram_scratch.gpu_addr >> 12);
  4614. tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
  4615. tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
  4616. WREG32(MC_VM_FB_LOCATION, tmp);
  4617. /* XXX double check these! */
  4618. WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
  4619. WREG32(HDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
  4620. WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
  4621. WREG32(MC_VM_AGP_BASE, 0);
  4622. WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
  4623. WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
  4624. if (radeon_mc_wait_for_idle(rdev)) {
  4625. dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
  4626. }
  4627. evergreen_mc_resume(rdev, &save);
  4628. /* we need to own VRAM, so turn off the VGA renderer here
  4629. * to stop it overwriting our objects */
  4630. rv515_vga_render_disable(rdev);
  4631. }
  4632. /**
  4633. * cik_mc_init - initialize the memory controller driver params
  4634. *
  4635. * @rdev: radeon_device pointer
  4636. *
  4637. * Look up the amount of vram, vram width, and decide how to place
  4638. * vram and gart within the GPU's physical address space (CIK).
  4639. * Returns 0 for success.
  4640. */
  4641. static int cik_mc_init(struct radeon_device *rdev)
  4642. {
  4643. u32 tmp;
  4644. int chansize, numchan;
  4645. /* Get VRAM informations */
  4646. rdev->mc.vram_is_ddr = true;
  4647. tmp = RREG32(MC_ARB_RAMCFG);
  4648. if (tmp & CHANSIZE_MASK) {
  4649. chansize = 64;
  4650. } else {
  4651. chansize = 32;
  4652. }
  4653. tmp = RREG32(MC_SHARED_CHMAP);
  4654. switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
  4655. case 0:
  4656. default:
  4657. numchan = 1;
  4658. break;
  4659. case 1:
  4660. numchan = 2;
  4661. break;
  4662. case 2:
  4663. numchan = 4;
  4664. break;
  4665. case 3:
  4666. numchan = 8;
  4667. break;
  4668. case 4:
  4669. numchan = 3;
  4670. break;
  4671. case 5:
  4672. numchan = 6;
  4673. break;
  4674. case 6:
  4675. numchan = 10;
  4676. break;
  4677. case 7:
  4678. numchan = 12;
  4679. break;
  4680. case 8:
  4681. numchan = 16;
  4682. break;
  4683. }
  4684. rdev->mc.vram_width = numchan * chansize;
  4685. /* Could aper size report 0 ? */
  4686. rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
  4687. rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
  4688. /* size in MB on si */
  4689. rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
  4690. rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
  4691. rdev->mc.visible_vram_size = rdev->mc.aper_size;
  4692. si_vram_gtt_location(rdev, &rdev->mc);
  4693. radeon_update_bandwidth_info(rdev);
  4694. return 0;
  4695. }
  4696. /*
  4697. * GART
  4698. * VMID 0 is the physical GPU addresses as used by the kernel.
  4699. * VMIDs 1-15 are used for userspace clients and are handled
  4700. * by the radeon vm/hsa code.
  4701. */
  4702. /**
  4703. * cik_pcie_gart_tlb_flush - gart tlb flush callback
  4704. *
  4705. * @rdev: radeon_device pointer
  4706. *
  4707. * Flush the TLB for the VMID 0 page table (CIK).
  4708. */
  4709. void cik_pcie_gart_tlb_flush(struct radeon_device *rdev)
  4710. {
  4711. /* flush hdp cache */
  4712. WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0);
  4713. /* bits 0-15 are the VM contexts0-15 */
  4714. WREG32(VM_INVALIDATE_REQUEST, 0x1);
  4715. }
  4716. /**
  4717. * cik_pcie_gart_enable - gart enable
  4718. *
  4719. * @rdev: radeon_device pointer
  4720. *
  4721. * This sets up the TLBs, programs the page tables for VMID0,
  4722. * sets up the hw for VMIDs 1-15 which are allocated on
  4723. * demand, and sets up the global locations for the LDS, GDS,
  4724. * and GPUVM for FSA64 clients (CIK).
  4725. * Returns 0 for success, errors for failure.
  4726. */
  4727. static int cik_pcie_gart_enable(struct radeon_device *rdev)
  4728. {
  4729. int r, i;
  4730. if (rdev->gart.robj == NULL) {
  4731. dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
  4732. return -EINVAL;
  4733. }
  4734. r = radeon_gart_table_vram_pin(rdev);
  4735. if (r)
  4736. return r;
  4737. radeon_gart_restore(rdev);
  4738. /* Setup TLB control */
  4739. WREG32(MC_VM_MX_L1_TLB_CNTL,
  4740. (0xA << 7) |
  4741. ENABLE_L1_TLB |
  4742. SYSTEM_ACCESS_MODE_NOT_IN_SYS |
  4743. ENABLE_ADVANCED_DRIVER_MODEL |
  4744. SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
  4745. /* Setup L2 cache */
  4746. WREG32(VM_L2_CNTL, ENABLE_L2_CACHE |
  4747. ENABLE_L2_FRAGMENT_PROCESSING |
  4748. ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
  4749. ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
  4750. EFFECTIVE_L2_QUEUE_SIZE(7) |
  4751. CONTEXT1_IDENTITY_ACCESS_MODE(1));
  4752. WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE);
  4753. WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
  4754. L2_CACHE_BIGK_FRAGMENT_SIZE(6));
  4755. /* setup context0 */
  4756. WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
  4757. WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
  4758. WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
  4759. WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
  4760. (u32)(rdev->dummy_page.addr >> 12));
  4761. WREG32(VM_CONTEXT0_CNTL2, 0);
  4762. WREG32(VM_CONTEXT0_CNTL, (ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
  4763. RANGE_PROTECTION_FAULT_ENABLE_DEFAULT));
  4764. WREG32(0x15D4, 0);
  4765. WREG32(0x15D8, 0);
  4766. WREG32(0x15DC, 0);
  4767. /* empty context1-15 */
  4768. /* FIXME start with 4G, once using 2 level pt switch to full
  4769. * vm size space
  4770. */
  4771. /* set vm size, must be a multiple of 4 */
  4772. WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
  4773. WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn);
  4774. for (i = 1; i < 16; i++) {
  4775. if (i < 8)
  4776. WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
  4777. rdev->gart.table_addr >> 12);
  4778. else
  4779. WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2),
  4780. rdev->gart.table_addr >> 12);
  4781. }
  4782. /* enable context1-15 */
  4783. WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
  4784. (u32)(rdev->dummy_page.addr >> 12));
  4785. WREG32(VM_CONTEXT1_CNTL2, 4);
  4786. WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
  4787. RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
  4788. RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
  4789. DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
  4790. DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT |
  4791. PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT |
  4792. PDE0_PROTECTION_FAULT_ENABLE_DEFAULT |
  4793. VALID_PROTECTION_FAULT_ENABLE_INTERRUPT |
  4794. VALID_PROTECTION_FAULT_ENABLE_DEFAULT |
  4795. READ_PROTECTION_FAULT_ENABLE_INTERRUPT |
  4796. READ_PROTECTION_FAULT_ENABLE_DEFAULT |
  4797. WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT |
  4798. WRITE_PROTECTION_FAULT_ENABLE_DEFAULT);
  4799. /* TC cache setup ??? */
  4800. WREG32(TC_CFG_L1_LOAD_POLICY0, 0);
  4801. WREG32(TC_CFG_L1_LOAD_POLICY1, 0);
  4802. WREG32(TC_CFG_L1_STORE_POLICY, 0);
  4803. WREG32(TC_CFG_L2_LOAD_POLICY0, 0);
  4804. WREG32(TC_CFG_L2_LOAD_POLICY1, 0);
  4805. WREG32(TC_CFG_L2_STORE_POLICY0, 0);
  4806. WREG32(TC_CFG_L2_STORE_POLICY1, 0);
  4807. WREG32(TC_CFG_L2_ATOMIC_POLICY, 0);
  4808. WREG32(TC_CFG_L1_VOLATILE, 0);
  4809. WREG32(TC_CFG_L2_VOLATILE, 0);
  4810. if (rdev->family == CHIP_KAVERI) {
  4811. u32 tmp = RREG32(CHUB_CONTROL);
  4812. tmp &= ~BYPASS_VM;
  4813. WREG32(CHUB_CONTROL, tmp);
  4814. }
  4815. /* XXX SH_MEM regs */
  4816. /* where to put LDS, scratch, GPUVM in FSA64 space */
  4817. mutex_lock(&rdev->srbm_mutex);
  4818. for (i = 0; i < 16; i++) {
  4819. cik_srbm_select(rdev, 0, 0, 0, i);
  4820. /* CP and shaders */
  4821. WREG32(SH_MEM_CONFIG, 0);
  4822. WREG32(SH_MEM_APE1_BASE, 1);
  4823. WREG32(SH_MEM_APE1_LIMIT, 0);
  4824. WREG32(SH_MEM_BASES, 0);
  4825. /* SDMA GFX */
  4826. WREG32(SDMA0_GFX_VIRTUAL_ADDR + SDMA0_REGISTER_OFFSET, 0);
  4827. WREG32(SDMA0_GFX_APE1_CNTL + SDMA0_REGISTER_OFFSET, 0);
  4828. WREG32(SDMA0_GFX_VIRTUAL_ADDR + SDMA1_REGISTER_OFFSET, 0);
  4829. WREG32(SDMA0_GFX_APE1_CNTL + SDMA1_REGISTER_OFFSET, 0);
  4830. /* XXX SDMA RLC - todo */
  4831. }
  4832. cik_srbm_select(rdev, 0, 0, 0, 0);
  4833. mutex_unlock(&rdev->srbm_mutex);
  4834. cik_pcie_gart_tlb_flush(rdev);
  4835. DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
  4836. (unsigned)(rdev->mc.gtt_size >> 20),
  4837. (unsigned long long)rdev->gart.table_addr);
  4838. rdev->gart.ready = true;
  4839. return 0;
  4840. }
  4841. /**
  4842. * cik_pcie_gart_disable - gart disable
  4843. *
  4844. * @rdev: radeon_device pointer
  4845. *
  4846. * This disables all VM page table (CIK).
  4847. */
  4848. static void cik_pcie_gart_disable(struct radeon_device *rdev)
  4849. {
  4850. /* Disable all tables */
  4851. WREG32(VM_CONTEXT0_CNTL, 0);
  4852. WREG32(VM_CONTEXT1_CNTL, 0);
  4853. /* Setup TLB control */
  4854. WREG32(MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE_NOT_IN_SYS |
  4855. SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
  4856. /* Setup L2 cache */
  4857. WREG32(VM_L2_CNTL,
  4858. ENABLE_L2_FRAGMENT_PROCESSING |
  4859. ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
  4860. ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
  4861. EFFECTIVE_L2_QUEUE_SIZE(7) |
  4862. CONTEXT1_IDENTITY_ACCESS_MODE(1));
  4863. WREG32(VM_L2_CNTL2, 0);
  4864. WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
  4865. L2_CACHE_BIGK_FRAGMENT_SIZE(6));
  4866. radeon_gart_table_vram_unpin(rdev);
  4867. }
  4868. /**
  4869. * cik_pcie_gart_fini - vm fini callback
  4870. *
  4871. * @rdev: radeon_device pointer
  4872. *
  4873. * Tears down the driver GART/VM setup (CIK).
  4874. */
  4875. static void cik_pcie_gart_fini(struct radeon_device *rdev)
  4876. {
  4877. cik_pcie_gart_disable(rdev);
  4878. radeon_gart_table_vram_free(rdev);
  4879. radeon_gart_fini(rdev);
  4880. }
  4881. /* vm parser */
  4882. /**
  4883. * cik_ib_parse - vm ib_parse callback
  4884. *
  4885. * @rdev: radeon_device pointer
  4886. * @ib: indirect buffer pointer
  4887. *
  4888. * CIK uses hw IB checking so this is a nop (CIK).
  4889. */
  4890. int cik_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
  4891. {
  4892. return 0;
  4893. }
  4894. /*
  4895. * vm
  4896. * VMID 0 is the physical GPU addresses as used by the kernel.
  4897. * VMIDs 1-15 are used for userspace clients and are handled
  4898. * by the radeon vm/hsa code.
  4899. */
  4900. /**
  4901. * cik_vm_init - cik vm init callback
  4902. *
  4903. * @rdev: radeon_device pointer
  4904. *
  4905. * Inits cik specific vm parameters (number of VMs, base of vram for
  4906. * VMIDs 1-15) (CIK).
  4907. * Returns 0 for success.
  4908. */
  4909. int cik_vm_init(struct radeon_device *rdev)
  4910. {
  4911. /* number of VMs */
  4912. rdev->vm_manager.nvm = 16;
  4913. /* base offset of vram pages */
  4914. if (rdev->flags & RADEON_IS_IGP) {
  4915. u64 tmp = RREG32(MC_VM_FB_OFFSET);
  4916. tmp <<= 22;
  4917. rdev->vm_manager.vram_base_offset = tmp;
  4918. } else
  4919. rdev->vm_manager.vram_base_offset = 0;
  4920. return 0;
  4921. }
  4922. /**
  4923. * cik_vm_fini - cik vm fini callback
  4924. *
  4925. * @rdev: radeon_device pointer
  4926. *
  4927. * Tear down any asic specific VM setup (CIK).
  4928. */
  4929. void cik_vm_fini(struct radeon_device *rdev)
  4930. {
  4931. }
  4932. /**
  4933. * cik_vm_decode_fault - print human readable fault info
  4934. *
  4935. * @rdev: radeon_device pointer
  4936. * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
  4937. * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
  4938. *
  4939. * Print human readable fault information (CIK).
  4940. */
  4941. static void cik_vm_decode_fault(struct radeon_device *rdev,
  4942. u32 status, u32 addr, u32 mc_client)
  4943. {
  4944. u32 mc_id;
  4945. u32 vmid = (status & FAULT_VMID_MASK) >> FAULT_VMID_SHIFT;
  4946. u32 protections = (status & PROTECTIONS_MASK) >> PROTECTIONS_SHIFT;
  4947. char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
  4948. (mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
  4949. if (rdev->family == CHIP_HAWAII)
  4950. mc_id = (status & HAWAII_MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT;
  4951. else
  4952. mc_id = (status & MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT;
  4953. printk("VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
  4954. protections, vmid, addr,
  4955. (status & MEMORY_CLIENT_RW_MASK) ? "write" : "read",
  4956. block, mc_client, mc_id);
  4957. }
  4958. /**
  4959. * cik_vm_flush - cik vm flush using the CP
  4960. *
  4961. * @rdev: radeon_device pointer
  4962. *
  4963. * Update the page table base and flush the VM TLB
  4964. * using the CP (CIK).
  4965. */
  4966. void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
  4967. {
  4968. struct radeon_ring *ring = &rdev->ring[ridx];
  4969. if (vm == NULL)
  4970. return;
  4971. radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
  4972. radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
  4973. WRITE_DATA_DST_SEL(0)));
  4974. if (vm->id < 8) {
  4975. radeon_ring_write(ring,
  4976. (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2);
  4977. } else {
  4978. radeon_ring_write(ring,
  4979. (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2);
  4980. }
  4981. radeon_ring_write(ring, 0);
  4982. radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
  4983. /* update SH_MEM_* regs */
  4984. radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
  4985. radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
  4986. WRITE_DATA_DST_SEL(0)));
  4987. radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
  4988. radeon_ring_write(ring, 0);
  4989. radeon_ring_write(ring, VMID(vm->id));
  4990. radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 6));
  4991. radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
  4992. WRITE_DATA_DST_SEL(0)));
  4993. radeon_ring_write(ring, SH_MEM_BASES >> 2);
  4994. radeon_ring_write(ring, 0);
  4995. radeon_ring_write(ring, 0); /* SH_MEM_BASES */
  4996. radeon_ring_write(ring, 0); /* SH_MEM_CONFIG */
  4997. radeon_ring_write(ring, 1); /* SH_MEM_APE1_BASE */
  4998. radeon_ring_write(ring, 0); /* SH_MEM_APE1_LIMIT */
  4999. radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
  5000. radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
  5001. WRITE_DATA_DST_SEL(0)));
  5002. radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
  5003. radeon_ring_write(ring, 0);
  5004. radeon_ring_write(ring, VMID(0));
  5005. /* HDP flush */
  5006. /* We should be using the WAIT_REG_MEM packet here like in
  5007. * cik_fence_ring_emit(), but it causes the CP to hang in this
  5008. * context...
  5009. */
  5010. radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
  5011. radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
  5012. WRITE_DATA_DST_SEL(0)));
  5013. radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
  5014. radeon_ring_write(ring, 0);
  5015. radeon_ring_write(ring, 0);
  5016. /* bits 0-15 are the VM contexts0-15 */
  5017. radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
  5018. radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
  5019. WRITE_DATA_DST_SEL(0)));
  5020. radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
  5021. radeon_ring_write(ring, 0);
  5022. radeon_ring_write(ring, 1 << vm->id);
  5023. /* compute doesn't have PFP */
  5024. if (ridx == RADEON_RING_TYPE_GFX_INDEX) {
  5025. /* sync PFP to ME, otherwise we might get invalid PFP reads */
  5026. radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
  5027. radeon_ring_write(ring, 0x0);
  5028. }
  5029. }
  5030. /*
  5031. * RLC
  5032. * The RLC is a multi-purpose microengine that handles a
  5033. * variety of functions, the most important of which is
  5034. * the interrupt controller.
  5035. */
  5036. static void cik_enable_gui_idle_interrupt(struct radeon_device *rdev,
  5037. bool enable)
  5038. {
  5039. u32 tmp = RREG32(CP_INT_CNTL_RING0);
  5040. if (enable)
  5041. tmp |= (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
  5042. else
  5043. tmp &= ~(CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
  5044. WREG32(CP_INT_CNTL_RING0, tmp);
  5045. }
  5046. static void cik_enable_lbpw(struct radeon_device *rdev, bool enable)
  5047. {
  5048. u32 tmp;
  5049. tmp = RREG32(RLC_LB_CNTL);
  5050. if (enable)
  5051. tmp |= LOAD_BALANCE_ENABLE;
  5052. else
  5053. tmp &= ~LOAD_BALANCE_ENABLE;
  5054. WREG32(RLC_LB_CNTL, tmp);
  5055. }
  5056. static void cik_wait_for_rlc_serdes(struct radeon_device *rdev)
  5057. {
  5058. u32 i, j, k;
  5059. u32 mask;
  5060. for (i = 0; i < rdev->config.cik.max_shader_engines; i++) {
  5061. for (j = 0; j < rdev->config.cik.max_sh_per_se; j++) {
  5062. cik_select_se_sh(rdev, i, j);
  5063. for (k = 0; k < rdev->usec_timeout; k++) {
  5064. if (RREG32(RLC_SERDES_CU_MASTER_BUSY) == 0)
  5065. break;
  5066. udelay(1);
  5067. }
  5068. }
  5069. }
  5070. cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
  5071. mask = SE_MASTER_BUSY_MASK | GC_MASTER_BUSY | TC0_MASTER_BUSY | TC1_MASTER_BUSY;
  5072. for (k = 0; k < rdev->usec_timeout; k++) {
  5073. if ((RREG32(RLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0)
  5074. break;
  5075. udelay(1);
  5076. }
  5077. }
  5078. static void cik_update_rlc(struct radeon_device *rdev, u32 rlc)
  5079. {
  5080. u32 tmp;
  5081. tmp = RREG32(RLC_CNTL);
  5082. if (tmp != rlc)
  5083. WREG32(RLC_CNTL, rlc);
  5084. }
  5085. static u32 cik_halt_rlc(struct radeon_device *rdev)
  5086. {
  5087. u32 data, orig;
  5088. orig = data = RREG32(RLC_CNTL);
  5089. if (data & RLC_ENABLE) {
  5090. u32 i;
  5091. data &= ~RLC_ENABLE;
  5092. WREG32(RLC_CNTL, data);
  5093. for (i = 0; i < rdev->usec_timeout; i++) {
  5094. if ((RREG32(RLC_GPM_STAT) & RLC_GPM_BUSY) == 0)
  5095. break;
  5096. udelay(1);
  5097. }
  5098. cik_wait_for_rlc_serdes(rdev);
  5099. }
  5100. return orig;
  5101. }
  5102. void cik_enter_rlc_safe_mode(struct radeon_device *rdev)
  5103. {
  5104. u32 tmp, i, mask;
  5105. tmp = REQ | MESSAGE(MSG_ENTER_RLC_SAFE_MODE);
  5106. WREG32(RLC_GPR_REG2, tmp);
  5107. mask = GFX_POWER_STATUS | GFX_CLOCK_STATUS;
  5108. for (i = 0; i < rdev->usec_timeout; i++) {
  5109. if ((RREG32(RLC_GPM_STAT) & mask) == mask)
  5110. break;
  5111. udelay(1);
  5112. }
  5113. for (i = 0; i < rdev->usec_timeout; i++) {
  5114. if ((RREG32(RLC_GPR_REG2) & REQ) == 0)
  5115. break;
  5116. udelay(1);
  5117. }
  5118. }
  5119. void cik_exit_rlc_safe_mode(struct radeon_device *rdev)
  5120. {
  5121. u32 tmp;
  5122. tmp = REQ | MESSAGE(MSG_EXIT_RLC_SAFE_MODE);
  5123. WREG32(RLC_GPR_REG2, tmp);
  5124. }
  5125. /**
  5126. * cik_rlc_stop - stop the RLC ME
  5127. *
  5128. * @rdev: radeon_device pointer
  5129. *
  5130. * Halt the RLC ME (MicroEngine) (CIK).
  5131. */
  5132. static void cik_rlc_stop(struct radeon_device *rdev)
  5133. {
  5134. WREG32(RLC_CNTL, 0);
  5135. cik_enable_gui_idle_interrupt(rdev, false);
  5136. cik_wait_for_rlc_serdes(rdev);
  5137. }
  5138. /**
  5139. * cik_rlc_start - start the RLC ME
  5140. *
  5141. * @rdev: radeon_device pointer
  5142. *
  5143. * Unhalt the RLC ME (MicroEngine) (CIK).
  5144. */
  5145. static void cik_rlc_start(struct radeon_device *rdev)
  5146. {
  5147. WREG32(RLC_CNTL, RLC_ENABLE);
  5148. cik_enable_gui_idle_interrupt(rdev, true);
  5149. udelay(50);
  5150. }
  5151. /**
  5152. * cik_rlc_resume - setup the RLC hw
  5153. *
  5154. * @rdev: radeon_device pointer
  5155. *
  5156. * Initialize the RLC registers, load the ucode,
  5157. * and start the RLC (CIK).
  5158. * Returns 0 for success, -EINVAL if the ucode is not available.
  5159. */
  5160. static int cik_rlc_resume(struct radeon_device *rdev)
  5161. {
  5162. u32 i, size, tmp;
  5163. const __be32 *fw_data;
  5164. if (!rdev->rlc_fw)
  5165. return -EINVAL;
  5166. switch (rdev->family) {
  5167. case CHIP_BONAIRE:
  5168. case CHIP_HAWAII:
  5169. default:
  5170. size = BONAIRE_RLC_UCODE_SIZE;
  5171. break;
  5172. case CHIP_KAVERI:
  5173. size = KV_RLC_UCODE_SIZE;
  5174. break;
  5175. case CHIP_KABINI:
  5176. size = KB_RLC_UCODE_SIZE;
  5177. break;
  5178. }
  5179. cik_rlc_stop(rdev);
  5180. /* disable CG */
  5181. tmp = RREG32(RLC_CGCG_CGLS_CTRL) & 0xfffffffc;
  5182. WREG32(RLC_CGCG_CGLS_CTRL, tmp);
  5183. si_rlc_reset(rdev);
  5184. cik_init_pg(rdev);
  5185. cik_init_cg(rdev);
  5186. WREG32(RLC_LB_CNTR_INIT, 0);
  5187. WREG32(RLC_LB_CNTR_MAX, 0x00008000);
  5188. cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
  5189. WREG32(RLC_LB_INIT_CU_MASK, 0xffffffff);
  5190. WREG32(RLC_LB_PARAMS, 0x00600408);
  5191. WREG32(RLC_LB_CNTL, 0x80000004);
  5192. WREG32(RLC_MC_CNTL, 0);
  5193. WREG32(RLC_UCODE_CNTL, 0);
  5194. fw_data = (const __be32 *)rdev->rlc_fw->data;
  5195. WREG32(RLC_GPM_UCODE_ADDR, 0);
  5196. for (i = 0; i < size; i++)
  5197. WREG32(RLC_GPM_UCODE_DATA, be32_to_cpup(fw_data++));
  5198. WREG32(RLC_GPM_UCODE_ADDR, 0);
  5199. /* XXX - find out what chips support lbpw */
  5200. cik_enable_lbpw(rdev, false);
  5201. if (rdev->family == CHIP_BONAIRE)
  5202. WREG32(RLC_DRIVER_DMA_STATUS, 0);
  5203. cik_rlc_start(rdev);
  5204. return 0;
  5205. }
  5206. static void cik_enable_cgcg(struct radeon_device *rdev, bool enable)
  5207. {
  5208. u32 data, orig, tmp, tmp2;
  5209. orig = data = RREG32(RLC_CGCG_CGLS_CTRL);
  5210. if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CGCG)) {
  5211. cik_enable_gui_idle_interrupt(rdev, true);
  5212. tmp = cik_halt_rlc(rdev);
  5213. cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
  5214. WREG32(RLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
  5215. WREG32(RLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
  5216. tmp2 = BPM_ADDR_MASK | CGCG_OVERRIDE_0 | CGLS_ENABLE;
  5217. WREG32(RLC_SERDES_WR_CTRL, tmp2);
  5218. cik_update_rlc(rdev, tmp);
  5219. data |= CGCG_EN | CGLS_EN;
  5220. } else {
  5221. cik_enable_gui_idle_interrupt(rdev, false);
  5222. RREG32(CB_CGTT_SCLK_CTRL);
  5223. RREG32(CB_CGTT_SCLK_CTRL);
  5224. RREG32(CB_CGTT_SCLK_CTRL);
  5225. RREG32(CB_CGTT_SCLK_CTRL);
  5226. data &= ~(CGCG_EN | CGLS_EN);
  5227. }
  5228. if (orig != data)
  5229. WREG32(RLC_CGCG_CGLS_CTRL, data);
  5230. }
  5231. static void cik_enable_mgcg(struct radeon_device *rdev, bool enable)
  5232. {
  5233. u32 data, orig, tmp = 0;
  5234. if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_MGCG)) {
  5235. if (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_MGLS) {
  5236. if (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CP_LS) {
  5237. orig = data = RREG32(CP_MEM_SLP_CNTL);
  5238. data |= CP_MEM_LS_EN;
  5239. if (orig != data)
  5240. WREG32(CP_MEM_SLP_CNTL, data);
  5241. }
  5242. }
  5243. orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
  5244. data &= 0xfffffffd;
  5245. if (orig != data)
  5246. WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
  5247. tmp = cik_halt_rlc(rdev);
  5248. cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
  5249. WREG32(RLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
  5250. WREG32(RLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
  5251. data = BPM_ADDR_MASK | MGCG_OVERRIDE_0;
  5252. WREG32(RLC_SERDES_WR_CTRL, data);
  5253. cik_update_rlc(rdev, tmp);
  5254. if (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CGTS) {
  5255. orig = data = RREG32(CGTS_SM_CTRL_REG);
  5256. data &= ~SM_MODE_MASK;
  5257. data |= SM_MODE(0x2);
  5258. data |= SM_MODE_ENABLE;
  5259. data &= ~CGTS_OVERRIDE;
  5260. if ((rdev->cg_flags & RADEON_CG_SUPPORT_GFX_MGLS) &&
  5261. (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CGTS_LS))
  5262. data &= ~CGTS_LS_OVERRIDE;
  5263. data &= ~ON_MONITOR_ADD_MASK;
  5264. data |= ON_MONITOR_ADD_EN;
  5265. data |= ON_MONITOR_ADD(0x96);
  5266. if (orig != data)
  5267. WREG32(CGTS_SM_CTRL_REG, data);
  5268. }
  5269. } else {
  5270. orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
  5271. data |= 0x00000002;
  5272. if (orig != data)
  5273. WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
  5274. data = RREG32(RLC_MEM_SLP_CNTL);
  5275. if (data & RLC_MEM_LS_EN) {
  5276. data &= ~RLC_MEM_LS_EN;
  5277. WREG32(RLC_MEM_SLP_CNTL, data);
  5278. }
  5279. data = RREG32(CP_MEM_SLP_CNTL);
  5280. if (data & CP_MEM_LS_EN) {
  5281. data &= ~CP_MEM_LS_EN;
  5282. WREG32(CP_MEM_SLP_CNTL, data);
  5283. }
  5284. orig = data = RREG32(CGTS_SM_CTRL_REG);
  5285. data |= CGTS_OVERRIDE | CGTS_LS_OVERRIDE;
  5286. if (orig != data)
  5287. WREG32(CGTS_SM_CTRL_REG, data);
  5288. tmp = cik_halt_rlc(rdev);
  5289. cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
  5290. WREG32(RLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
  5291. WREG32(RLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
  5292. data = BPM_ADDR_MASK | MGCG_OVERRIDE_1;
  5293. WREG32(RLC_SERDES_WR_CTRL, data);
  5294. cik_update_rlc(rdev, tmp);
  5295. }
  5296. }
  5297. static const u32 mc_cg_registers[] =
  5298. {
  5299. MC_HUB_MISC_HUB_CG,
  5300. MC_HUB_MISC_SIP_CG,
  5301. MC_HUB_MISC_VM_CG,
  5302. MC_XPB_CLK_GAT,
  5303. ATC_MISC_CG,
  5304. MC_CITF_MISC_WR_CG,
  5305. MC_CITF_MISC_RD_CG,
  5306. MC_CITF_MISC_VM_CG,
  5307. VM_L2_CG,
  5308. };
  5309. static void cik_enable_mc_ls(struct radeon_device *rdev,
  5310. bool enable)
  5311. {
  5312. int i;
  5313. u32 orig, data;
  5314. for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
  5315. orig = data = RREG32(mc_cg_registers[i]);
  5316. if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_MC_LS))
  5317. data |= MC_LS_ENABLE;
  5318. else
  5319. data &= ~MC_LS_ENABLE;
  5320. if (data != orig)
  5321. WREG32(mc_cg_registers[i], data);
  5322. }
  5323. }
  5324. static void cik_enable_mc_mgcg(struct radeon_device *rdev,
  5325. bool enable)
  5326. {
  5327. int i;
  5328. u32 orig, data;
  5329. for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
  5330. orig = data = RREG32(mc_cg_registers[i]);
  5331. if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_MC_MGCG))
  5332. data |= MC_CG_ENABLE;
  5333. else
  5334. data &= ~MC_CG_ENABLE;
  5335. if (data != orig)
  5336. WREG32(mc_cg_registers[i], data);
  5337. }
  5338. }
  5339. static void cik_enable_sdma_mgcg(struct radeon_device *rdev,
  5340. bool enable)
  5341. {
  5342. u32 orig, data;
  5343. if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_SDMA_MGCG)) {
  5344. WREG32(SDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET, 0x00000100);
  5345. WREG32(SDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET, 0x00000100);
  5346. } else {
  5347. orig = data = RREG32(SDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET);
  5348. data |= 0xff000000;
  5349. if (data != orig)
  5350. WREG32(SDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET, data);
  5351. orig = data = RREG32(SDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET);
  5352. data |= 0xff000000;
  5353. if (data != orig)
  5354. WREG32(SDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET, data);
  5355. }
  5356. }
  5357. static void cik_enable_sdma_mgls(struct radeon_device *rdev,
  5358. bool enable)
  5359. {
  5360. u32 orig, data;
  5361. if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_SDMA_LS)) {
  5362. orig = data = RREG32(SDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET);
  5363. data |= 0x100;
  5364. if (orig != data)
  5365. WREG32(SDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET, data);
  5366. orig = data = RREG32(SDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET);
  5367. data |= 0x100;
  5368. if (orig != data)
  5369. WREG32(SDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET, data);
  5370. } else {
  5371. orig = data = RREG32(SDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET);
  5372. data &= ~0x100;
  5373. if (orig != data)
  5374. WREG32(SDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET, data);
  5375. orig = data = RREG32(SDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET);
  5376. data &= ~0x100;
  5377. if (orig != data)
  5378. WREG32(SDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET, data);
  5379. }
  5380. }
  5381. static void cik_enable_uvd_mgcg(struct radeon_device *rdev,
  5382. bool enable)
  5383. {
  5384. u32 orig, data;
  5385. if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_UVD_MGCG)) {
  5386. data = RREG32_UVD_CTX(UVD_CGC_MEM_CTRL);
  5387. data = 0xfff;
  5388. WREG32_UVD_CTX(UVD_CGC_MEM_CTRL, data);
  5389. orig = data = RREG32(UVD_CGC_CTRL);
  5390. data |= DCM;
  5391. if (orig != data)
  5392. WREG32(UVD_CGC_CTRL, data);
  5393. } else {
  5394. data = RREG32_UVD_CTX(UVD_CGC_MEM_CTRL);
  5395. data &= ~0xfff;
  5396. WREG32_UVD_CTX(UVD_CGC_MEM_CTRL, data);
  5397. orig = data = RREG32(UVD_CGC_CTRL);
  5398. data &= ~DCM;
  5399. if (orig != data)
  5400. WREG32(UVD_CGC_CTRL, data);
  5401. }
  5402. }
  5403. static void cik_enable_bif_mgls(struct radeon_device *rdev,
  5404. bool enable)
  5405. {
  5406. u32 orig, data;
  5407. orig = data = RREG32_PCIE_PORT(PCIE_CNTL2);
  5408. if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_BIF_LS))
  5409. data |= SLV_MEM_LS_EN | MST_MEM_LS_EN |
  5410. REPLAY_MEM_LS_EN | SLV_MEM_AGGRESSIVE_LS_EN;
  5411. else
  5412. data &= ~(SLV_MEM_LS_EN | MST_MEM_LS_EN |
  5413. REPLAY_MEM_LS_EN | SLV_MEM_AGGRESSIVE_LS_EN);
  5414. if (orig != data)
  5415. WREG32_PCIE_PORT(PCIE_CNTL2, data);
  5416. }
  5417. static void cik_enable_hdp_mgcg(struct radeon_device *rdev,
  5418. bool enable)
  5419. {
  5420. u32 orig, data;
  5421. orig = data = RREG32(HDP_HOST_PATH_CNTL);
  5422. if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_HDP_MGCG))
  5423. data &= ~CLOCK_GATING_DIS;
  5424. else
  5425. data |= CLOCK_GATING_DIS;
  5426. if (orig != data)
  5427. WREG32(HDP_HOST_PATH_CNTL, data);
  5428. }
  5429. static void cik_enable_hdp_ls(struct radeon_device *rdev,
  5430. bool enable)
  5431. {
  5432. u32 orig, data;
  5433. orig = data = RREG32(HDP_MEM_POWER_LS);
  5434. if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_HDP_LS))
  5435. data |= HDP_LS_ENABLE;
  5436. else
  5437. data &= ~HDP_LS_ENABLE;
  5438. if (orig != data)
  5439. WREG32(HDP_MEM_POWER_LS, data);
  5440. }
  5441. void cik_update_cg(struct radeon_device *rdev,
  5442. u32 block, bool enable)
  5443. {
  5444. if (block & RADEON_CG_BLOCK_GFX) {
  5445. cik_enable_gui_idle_interrupt(rdev, false);
  5446. /* order matters! */
  5447. if (enable) {
  5448. cik_enable_mgcg(rdev, true);
  5449. cik_enable_cgcg(rdev, true);
  5450. } else {
  5451. cik_enable_cgcg(rdev, false);
  5452. cik_enable_mgcg(rdev, false);
  5453. }
  5454. cik_enable_gui_idle_interrupt(rdev, true);
  5455. }
  5456. if (block & RADEON_CG_BLOCK_MC) {
  5457. if (!(rdev->flags & RADEON_IS_IGP)) {
  5458. cik_enable_mc_mgcg(rdev, enable);
  5459. cik_enable_mc_ls(rdev, enable);
  5460. }
  5461. }
  5462. if (block & RADEON_CG_BLOCK_SDMA) {
  5463. cik_enable_sdma_mgcg(rdev, enable);
  5464. cik_enable_sdma_mgls(rdev, enable);
  5465. }
  5466. if (block & RADEON_CG_BLOCK_BIF) {
  5467. cik_enable_bif_mgls(rdev, enable);
  5468. }
  5469. if (block & RADEON_CG_BLOCK_UVD) {
  5470. if (rdev->has_uvd)
  5471. cik_enable_uvd_mgcg(rdev, enable);
  5472. }
  5473. if (block & RADEON_CG_BLOCK_HDP) {
  5474. cik_enable_hdp_mgcg(rdev, enable);
  5475. cik_enable_hdp_ls(rdev, enable);
  5476. }
  5477. }
  5478. static void cik_init_cg(struct radeon_device *rdev)
  5479. {
  5480. cik_update_cg(rdev, RADEON_CG_BLOCK_GFX, true);
  5481. if (rdev->has_uvd)
  5482. si_init_uvd_internal_cg(rdev);
  5483. cik_update_cg(rdev, (RADEON_CG_BLOCK_MC |
  5484. RADEON_CG_BLOCK_SDMA |
  5485. RADEON_CG_BLOCK_BIF |
  5486. RADEON_CG_BLOCK_UVD |
  5487. RADEON_CG_BLOCK_HDP), true);
  5488. }
  5489. static void cik_fini_cg(struct radeon_device *rdev)
  5490. {
  5491. cik_update_cg(rdev, (RADEON_CG_BLOCK_MC |
  5492. RADEON_CG_BLOCK_SDMA |
  5493. RADEON_CG_BLOCK_BIF |
  5494. RADEON_CG_BLOCK_UVD |
  5495. RADEON_CG_BLOCK_HDP), false);
  5496. cik_update_cg(rdev, RADEON_CG_BLOCK_GFX, false);
  5497. }
  5498. static void cik_enable_sck_slowdown_on_pu(struct radeon_device *rdev,
  5499. bool enable)
  5500. {
  5501. u32 data, orig;
  5502. orig = data = RREG32(RLC_PG_CNTL);
  5503. if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_RLC_SMU_HS))
  5504. data |= SMU_CLK_SLOWDOWN_ON_PU_ENABLE;
  5505. else
  5506. data &= ~SMU_CLK_SLOWDOWN_ON_PU_ENABLE;
  5507. if (orig != data)
  5508. WREG32(RLC_PG_CNTL, data);
  5509. }
  5510. static void cik_enable_sck_slowdown_on_pd(struct radeon_device *rdev,
  5511. bool enable)
  5512. {
  5513. u32 data, orig;
  5514. orig = data = RREG32(RLC_PG_CNTL);
  5515. if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_RLC_SMU_HS))
  5516. data |= SMU_CLK_SLOWDOWN_ON_PD_ENABLE;
  5517. else
  5518. data &= ~SMU_CLK_SLOWDOWN_ON_PD_ENABLE;
  5519. if (orig != data)
  5520. WREG32(RLC_PG_CNTL, data);
  5521. }
  5522. static void cik_enable_cp_pg(struct radeon_device *rdev, bool enable)
  5523. {
  5524. u32 data, orig;
  5525. orig = data = RREG32(RLC_PG_CNTL);
  5526. if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_CP))
  5527. data &= ~DISABLE_CP_PG;
  5528. else
  5529. data |= DISABLE_CP_PG;
  5530. if (orig != data)
  5531. WREG32(RLC_PG_CNTL, data);
  5532. }
  5533. static void cik_enable_gds_pg(struct radeon_device *rdev, bool enable)
  5534. {
  5535. u32 data, orig;
  5536. orig = data = RREG32(RLC_PG_CNTL);
  5537. if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GDS))
  5538. data &= ~DISABLE_GDS_PG;
  5539. else
  5540. data |= DISABLE_GDS_PG;
  5541. if (orig != data)
  5542. WREG32(RLC_PG_CNTL, data);
  5543. }
  5544. #define CP_ME_TABLE_SIZE 96
  5545. #define CP_ME_TABLE_OFFSET 2048
  5546. #define CP_MEC_TABLE_OFFSET 4096
  5547. void cik_init_cp_pg_table(struct radeon_device *rdev)
  5548. {
  5549. const __be32 *fw_data;
  5550. volatile u32 *dst_ptr;
  5551. int me, i, max_me = 4;
  5552. u32 bo_offset = 0;
  5553. u32 table_offset;
  5554. if (rdev->family == CHIP_KAVERI)
  5555. max_me = 5;
  5556. if (rdev->rlc.cp_table_ptr == NULL)
  5557. return;
  5558. /* write the cp table buffer */
  5559. dst_ptr = rdev->rlc.cp_table_ptr;
  5560. for (me = 0; me < max_me; me++) {
  5561. if (me == 0) {
  5562. fw_data = (const __be32 *)rdev->ce_fw->data;
  5563. table_offset = CP_ME_TABLE_OFFSET;
  5564. } else if (me == 1) {
  5565. fw_data = (const __be32 *)rdev->pfp_fw->data;
  5566. table_offset = CP_ME_TABLE_OFFSET;
  5567. } else if (me == 2) {
  5568. fw_data = (const __be32 *)rdev->me_fw->data;
  5569. table_offset = CP_ME_TABLE_OFFSET;
  5570. } else {
  5571. fw_data = (const __be32 *)rdev->mec_fw->data;
  5572. table_offset = CP_MEC_TABLE_OFFSET;
  5573. }
  5574. for (i = 0; i < CP_ME_TABLE_SIZE; i ++) {
  5575. dst_ptr[bo_offset + i] = cpu_to_le32(be32_to_cpu(fw_data[table_offset + i]));
  5576. }
  5577. bo_offset += CP_ME_TABLE_SIZE;
  5578. }
  5579. }
  5580. static void cik_enable_gfx_cgpg(struct radeon_device *rdev,
  5581. bool enable)
  5582. {
  5583. u32 data, orig;
  5584. if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG)) {
  5585. orig = data = RREG32(RLC_PG_CNTL);
  5586. data |= GFX_PG_ENABLE;
  5587. if (orig != data)
  5588. WREG32(RLC_PG_CNTL, data);
  5589. orig = data = RREG32(RLC_AUTO_PG_CTRL);
  5590. data |= AUTO_PG_EN;
  5591. if (orig != data)
  5592. WREG32(RLC_AUTO_PG_CTRL, data);
  5593. } else {
  5594. orig = data = RREG32(RLC_PG_CNTL);
  5595. data &= ~GFX_PG_ENABLE;
  5596. if (orig != data)
  5597. WREG32(RLC_PG_CNTL, data);
  5598. orig = data = RREG32(RLC_AUTO_PG_CTRL);
  5599. data &= ~AUTO_PG_EN;
  5600. if (orig != data)
  5601. WREG32(RLC_AUTO_PG_CTRL, data);
  5602. data = RREG32(DB_RENDER_CONTROL);
  5603. }
  5604. }
  5605. static u32 cik_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh)
  5606. {
  5607. u32 mask = 0, tmp, tmp1;
  5608. int i;
  5609. cik_select_se_sh(rdev, se, sh);
  5610. tmp = RREG32(CC_GC_SHADER_ARRAY_CONFIG);
  5611. tmp1 = RREG32(GC_USER_SHADER_ARRAY_CONFIG);
  5612. cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
  5613. tmp &= 0xffff0000;
  5614. tmp |= tmp1;
  5615. tmp >>= 16;
  5616. for (i = 0; i < rdev->config.cik.max_cu_per_sh; i ++) {
  5617. mask <<= 1;
  5618. mask |= 1;
  5619. }
  5620. return (~tmp) & mask;
  5621. }
  5622. static void cik_init_ao_cu_mask(struct radeon_device *rdev)
  5623. {
  5624. u32 i, j, k, active_cu_number = 0;
  5625. u32 mask, counter, cu_bitmap;
  5626. u32 tmp = 0;
  5627. for (i = 0; i < rdev->config.cik.max_shader_engines; i++) {
  5628. for (j = 0; j < rdev->config.cik.max_sh_per_se; j++) {
  5629. mask = 1;
  5630. cu_bitmap = 0;
  5631. counter = 0;
  5632. for (k = 0; k < rdev->config.cik.max_cu_per_sh; k ++) {
  5633. if (cik_get_cu_active_bitmap(rdev, i, j) & mask) {
  5634. if (counter < 2)
  5635. cu_bitmap |= mask;
  5636. counter ++;
  5637. }
  5638. mask <<= 1;
  5639. }
  5640. active_cu_number += counter;
  5641. tmp |= (cu_bitmap << (i * 16 + j * 8));
  5642. }
  5643. }
  5644. WREG32(RLC_PG_AO_CU_MASK, tmp);
  5645. tmp = RREG32(RLC_MAX_PG_CU);
  5646. tmp &= ~MAX_PU_CU_MASK;
  5647. tmp |= MAX_PU_CU(active_cu_number);
  5648. WREG32(RLC_MAX_PG_CU, tmp);
  5649. }
  5650. static void cik_enable_gfx_static_mgpg(struct radeon_device *rdev,
  5651. bool enable)
  5652. {
  5653. u32 data, orig;
  5654. orig = data = RREG32(RLC_PG_CNTL);
  5655. if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_SMG))
  5656. data |= STATIC_PER_CU_PG_ENABLE;
  5657. else
  5658. data &= ~STATIC_PER_CU_PG_ENABLE;
  5659. if (orig != data)
  5660. WREG32(RLC_PG_CNTL, data);
  5661. }
  5662. static void cik_enable_gfx_dynamic_mgpg(struct radeon_device *rdev,
  5663. bool enable)
  5664. {
  5665. u32 data, orig;
  5666. orig = data = RREG32(RLC_PG_CNTL);
  5667. if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_DMG))
  5668. data |= DYN_PER_CU_PG_ENABLE;
  5669. else
  5670. data &= ~DYN_PER_CU_PG_ENABLE;
  5671. if (orig != data)
  5672. WREG32(RLC_PG_CNTL, data);
  5673. }
  5674. #define RLC_SAVE_AND_RESTORE_STARTING_OFFSET 0x90
  5675. #define RLC_CLEAR_STATE_DESCRIPTOR_OFFSET 0x3D
  5676. static void cik_init_gfx_cgpg(struct radeon_device *rdev)
  5677. {
  5678. u32 data, orig;
  5679. u32 i;
  5680. if (rdev->rlc.cs_data) {
  5681. WREG32(RLC_GPM_SCRATCH_ADDR, RLC_CLEAR_STATE_DESCRIPTOR_OFFSET);
  5682. WREG32(RLC_GPM_SCRATCH_DATA, upper_32_bits(rdev->rlc.clear_state_gpu_addr));
  5683. WREG32(RLC_GPM_SCRATCH_DATA, lower_32_bits(rdev->rlc.clear_state_gpu_addr));
  5684. WREG32(RLC_GPM_SCRATCH_DATA, rdev->rlc.clear_state_size);
  5685. } else {
  5686. WREG32(RLC_GPM_SCRATCH_ADDR, RLC_CLEAR_STATE_DESCRIPTOR_OFFSET);
  5687. for (i = 0; i < 3; i++)
  5688. WREG32(RLC_GPM_SCRATCH_DATA, 0);
  5689. }
  5690. if (rdev->rlc.reg_list) {
  5691. WREG32(RLC_GPM_SCRATCH_ADDR, RLC_SAVE_AND_RESTORE_STARTING_OFFSET);
  5692. for (i = 0; i < rdev->rlc.reg_list_size; i++)
  5693. WREG32(RLC_GPM_SCRATCH_DATA, rdev->rlc.reg_list[i]);
  5694. }
  5695. orig = data = RREG32(RLC_PG_CNTL);
  5696. data |= GFX_PG_SRC;
  5697. if (orig != data)
  5698. WREG32(RLC_PG_CNTL, data);
  5699. WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
  5700. WREG32(RLC_CP_TABLE_RESTORE, rdev->rlc.cp_table_gpu_addr >> 8);
  5701. data = RREG32(CP_RB_WPTR_POLL_CNTL);
  5702. data &= ~IDLE_POLL_COUNT_MASK;
  5703. data |= IDLE_POLL_COUNT(0x60);
  5704. WREG32(CP_RB_WPTR_POLL_CNTL, data);
  5705. data = 0x10101010;
  5706. WREG32(RLC_PG_DELAY, data);
  5707. data = RREG32(RLC_PG_DELAY_2);
  5708. data &= ~0xff;
  5709. data |= 0x3;
  5710. WREG32(RLC_PG_DELAY_2, data);
  5711. data = RREG32(RLC_AUTO_PG_CTRL);
  5712. data &= ~GRBM_REG_SGIT_MASK;
  5713. data |= GRBM_REG_SGIT(0x700);
  5714. WREG32(RLC_AUTO_PG_CTRL, data);
  5715. }
  5716. static void cik_update_gfx_pg(struct radeon_device *rdev, bool enable)
  5717. {
  5718. cik_enable_gfx_cgpg(rdev, enable);
  5719. cik_enable_gfx_static_mgpg(rdev, enable);
  5720. cik_enable_gfx_dynamic_mgpg(rdev, enable);
  5721. }
  5722. u32 cik_get_csb_size(struct radeon_device *rdev)
  5723. {
  5724. u32 count = 0;
  5725. const struct cs_section_def *sect = NULL;
  5726. const struct cs_extent_def *ext = NULL;
  5727. if (rdev->rlc.cs_data == NULL)
  5728. return 0;
  5729. /* begin clear state */
  5730. count += 2;
  5731. /* context control state */
  5732. count += 3;
  5733. for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) {
  5734. for (ext = sect->section; ext->extent != NULL; ++ext) {
  5735. if (sect->id == SECT_CONTEXT)
  5736. count += 2 + ext->reg_count;
  5737. else
  5738. return 0;
  5739. }
  5740. }
  5741. /* pa_sc_raster_config/pa_sc_raster_config1 */
  5742. count += 4;
  5743. /* end clear state */
  5744. count += 2;
  5745. /* clear state */
  5746. count += 2;
  5747. return count;
  5748. }
  5749. void cik_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer)
  5750. {
  5751. u32 count = 0, i;
  5752. const struct cs_section_def *sect = NULL;
  5753. const struct cs_extent_def *ext = NULL;
  5754. if (rdev->rlc.cs_data == NULL)
  5755. return;
  5756. if (buffer == NULL)
  5757. return;
  5758. buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
  5759. buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
  5760. buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
  5761. buffer[count++] = cpu_to_le32(0x80000000);
  5762. buffer[count++] = cpu_to_le32(0x80000000);
  5763. for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) {
  5764. for (ext = sect->section; ext->extent != NULL; ++ext) {
  5765. if (sect->id == SECT_CONTEXT) {
  5766. buffer[count++] =
  5767. cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
  5768. buffer[count++] = cpu_to_le32(ext->reg_index - 0xa000);
  5769. for (i = 0; i < ext->reg_count; i++)
  5770. buffer[count++] = cpu_to_le32(ext->extent[i]);
  5771. } else {
  5772. return;
  5773. }
  5774. }
  5775. }
  5776. buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 2));
  5777. buffer[count++] = cpu_to_le32(PA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
  5778. switch (rdev->family) {
  5779. case CHIP_BONAIRE:
  5780. buffer[count++] = cpu_to_le32(0x16000012);
  5781. buffer[count++] = cpu_to_le32(0x00000000);
  5782. break;
  5783. case CHIP_KAVERI:
  5784. buffer[count++] = cpu_to_le32(0x00000000); /* XXX */
  5785. buffer[count++] = cpu_to_le32(0x00000000);
  5786. break;
  5787. case CHIP_KABINI:
  5788. buffer[count++] = cpu_to_le32(0x00000000); /* XXX */
  5789. buffer[count++] = cpu_to_le32(0x00000000);
  5790. break;
  5791. case CHIP_HAWAII:
  5792. buffer[count++] = 0x3a00161a;
  5793. buffer[count++] = 0x0000002e;
  5794. break;
  5795. default:
  5796. buffer[count++] = cpu_to_le32(0x00000000);
  5797. buffer[count++] = cpu_to_le32(0x00000000);
  5798. break;
  5799. }
  5800. buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
  5801. buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
  5802. buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
  5803. buffer[count++] = cpu_to_le32(0);
  5804. }
  5805. static void cik_init_pg(struct radeon_device *rdev)
  5806. {
  5807. if (rdev->pg_flags) {
  5808. cik_enable_sck_slowdown_on_pu(rdev, true);
  5809. cik_enable_sck_slowdown_on_pd(rdev, true);
  5810. if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG) {
  5811. cik_init_gfx_cgpg(rdev);
  5812. cik_enable_cp_pg(rdev, true);
  5813. cik_enable_gds_pg(rdev, true);
  5814. }
  5815. cik_init_ao_cu_mask(rdev);
  5816. cik_update_gfx_pg(rdev, true);
  5817. }
  5818. }
  5819. static void cik_fini_pg(struct radeon_device *rdev)
  5820. {
  5821. if (rdev->pg_flags) {
  5822. cik_update_gfx_pg(rdev, false);
  5823. if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG) {
  5824. cik_enable_cp_pg(rdev, false);
  5825. cik_enable_gds_pg(rdev, false);
  5826. }
  5827. }
  5828. }
  5829. /*
  5830. * Interrupts
  5831. * Starting with r6xx, interrupts are handled via a ring buffer.
  5832. * Ring buffers are areas of GPU accessible memory that the GPU
  5833. * writes interrupt vectors into and the host reads vectors out of.
  5834. * There is a rptr (read pointer) that determines where the
  5835. * host is currently reading, and a wptr (write pointer)
  5836. * which determines where the GPU has written. When the
  5837. * pointers are equal, the ring is idle. When the GPU
  5838. * writes vectors to the ring buffer, it increments the
  5839. * wptr. When there is an interrupt, the host then starts
  5840. * fetching commands and processing them until the pointers are
  5841. * equal again at which point it updates the rptr.
  5842. */
  5843. /**
  5844. * cik_enable_interrupts - Enable the interrupt ring buffer
  5845. *
  5846. * @rdev: radeon_device pointer
  5847. *
  5848. * Enable the interrupt ring buffer (CIK).
  5849. */
  5850. static void cik_enable_interrupts(struct radeon_device *rdev)
  5851. {
  5852. u32 ih_cntl = RREG32(IH_CNTL);
  5853. u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
  5854. ih_cntl |= ENABLE_INTR;
  5855. ih_rb_cntl |= IH_RB_ENABLE;
  5856. WREG32(IH_CNTL, ih_cntl);
  5857. WREG32(IH_RB_CNTL, ih_rb_cntl);
  5858. rdev->ih.enabled = true;
  5859. }
  5860. /**
  5861. * cik_disable_interrupts - Disable the interrupt ring buffer
  5862. *
  5863. * @rdev: radeon_device pointer
  5864. *
  5865. * Disable the interrupt ring buffer (CIK).
  5866. */
  5867. static void cik_disable_interrupts(struct radeon_device *rdev)
  5868. {
  5869. u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
  5870. u32 ih_cntl = RREG32(IH_CNTL);
  5871. ih_rb_cntl &= ~IH_RB_ENABLE;
  5872. ih_cntl &= ~ENABLE_INTR;
  5873. WREG32(IH_RB_CNTL, ih_rb_cntl);
  5874. WREG32(IH_CNTL, ih_cntl);
  5875. /* set rptr, wptr to 0 */
  5876. WREG32(IH_RB_RPTR, 0);
  5877. WREG32(IH_RB_WPTR, 0);
  5878. rdev->ih.enabled = false;
  5879. rdev->ih.rptr = 0;
  5880. }
  5881. /**
  5882. * cik_disable_interrupt_state - Disable all interrupt sources
  5883. *
  5884. * @rdev: radeon_device pointer
  5885. *
  5886. * Clear all interrupt enable bits used by the driver (CIK).
  5887. */
  5888. static void cik_disable_interrupt_state(struct radeon_device *rdev)
  5889. {
  5890. u32 tmp;
  5891. /* gfx ring */
  5892. tmp = RREG32(CP_INT_CNTL_RING0) &
  5893. (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
  5894. WREG32(CP_INT_CNTL_RING0, tmp);
  5895. /* sdma */
  5896. tmp = RREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
  5897. WREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET, tmp);
  5898. tmp = RREG32(SDMA0_CNTL + SDMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
  5899. WREG32(SDMA0_CNTL + SDMA1_REGISTER_OFFSET, tmp);
  5900. /* compute queues */
  5901. WREG32(CP_ME1_PIPE0_INT_CNTL, 0);
  5902. WREG32(CP_ME1_PIPE1_INT_CNTL, 0);
  5903. WREG32(CP_ME1_PIPE2_INT_CNTL, 0);
  5904. WREG32(CP_ME1_PIPE3_INT_CNTL, 0);
  5905. WREG32(CP_ME2_PIPE0_INT_CNTL, 0);
  5906. WREG32(CP_ME2_PIPE1_INT_CNTL, 0);
  5907. WREG32(CP_ME2_PIPE2_INT_CNTL, 0);
  5908. WREG32(CP_ME2_PIPE3_INT_CNTL, 0);
  5909. /* grbm */
  5910. WREG32(GRBM_INT_CNTL, 0);
  5911. /* vline/vblank, etc. */
  5912. WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
  5913. WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
  5914. if (rdev->num_crtc >= 4) {
  5915. WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
  5916. WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
  5917. }
  5918. if (rdev->num_crtc >= 6) {
  5919. WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
  5920. WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
  5921. }
  5922. /* dac hotplug */
  5923. WREG32(DAC_AUTODETECT_INT_CONTROL, 0);
  5924. /* digital hotplug */
  5925. tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
  5926. WREG32(DC_HPD1_INT_CONTROL, tmp);
  5927. tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
  5928. WREG32(DC_HPD2_INT_CONTROL, tmp);
  5929. tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
  5930. WREG32(DC_HPD3_INT_CONTROL, tmp);
  5931. tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
  5932. WREG32(DC_HPD4_INT_CONTROL, tmp);
  5933. tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
  5934. WREG32(DC_HPD5_INT_CONTROL, tmp);
  5935. tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
  5936. WREG32(DC_HPD6_INT_CONTROL, tmp);
  5937. }
  5938. /**
  5939. * cik_irq_init - init and enable the interrupt ring
  5940. *
  5941. * @rdev: radeon_device pointer
  5942. *
  5943. * Allocate a ring buffer for the interrupt controller,
  5944. * enable the RLC, disable interrupts, enable the IH
  5945. * ring buffer and enable it (CIK).
  5946. * Called at device load and reume.
  5947. * Returns 0 for success, errors for failure.
  5948. */
  5949. static int cik_irq_init(struct radeon_device *rdev)
  5950. {
  5951. int ret = 0;
  5952. int rb_bufsz;
  5953. u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
  5954. /* allocate ring */
  5955. ret = r600_ih_ring_alloc(rdev);
  5956. if (ret)
  5957. return ret;
  5958. /* disable irqs */
  5959. cik_disable_interrupts(rdev);
  5960. /* init rlc */
  5961. ret = cik_rlc_resume(rdev);
  5962. if (ret) {
  5963. r600_ih_ring_fini(rdev);
  5964. return ret;
  5965. }
  5966. /* setup interrupt control */
  5967. /* XXX this should actually be a bus address, not an MC address. same on older asics */
  5968. WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
  5969. interrupt_cntl = RREG32(INTERRUPT_CNTL);
  5970. /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
  5971. * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
  5972. */
  5973. interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
  5974. /* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
  5975. interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
  5976. WREG32(INTERRUPT_CNTL, interrupt_cntl);
  5977. WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
  5978. rb_bufsz = order_base_2(rdev->ih.ring_size / 4);
  5979. ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
  5980. IH_WPTR_OVERFLOW_CLEAR |
  5981. (rb_bufsz << 1));
  5982. if (rdev->wb.enabled)
  5983. ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;
  5984. /* set the writeback address whether it's enabled or not */
  5985. WREG32(IH_RB_WPTR_ADDR_LO, (rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFFFFFFFC);
  5986. WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFF);
  5987. WREG32(IH_RB_CNTL, ih_rb_cntl);
  5988. /* set rptr, wptr to 0 */
  5989. WREG32(IH_RB_RPTR, 0);
  5990. WREG32(IH_RB_WPTR, 0);
  5991. /* Default settings for IH_CNTL (disabled at first) */
  5992. ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10) | MC_VMID(0);
  5993. /* RPTR_REARM only works if msi's are enabled */
  5994. if (rdev->msi_enabled)
  5995. ih_cntl |= RPTR_REARM;
  5996. WREG32(IH_CNTL, ih_cntl);
  5997. /* force the active interrupt state to all disabled */
  5998. cik_disable_interrupt_state(rdev);
  5999. pci_set_master(rdev->pdev);
  6000. /* enable irqs */
  6001. cik_enable_interrupts(rdev);
  6002. return ret;
  6003. }
  6004. /**
  6005. * cik_irq_set - enable/disable interrupt sources
  6006. *
  6007. * @rdev: radeon_device pointer
  6008. *
  6009. * Enable interrupt sources on the GPU (vblanks, hpd,
  6010. * etc.) (CIK).
  6011. * Returns 0 for success, errors for failure.
  6012. */
  6013. int cik_irq_set(struct radeon_device *rdev)
  6014. {
  6015. u32 cp_int_cntl;
  6016. u32 cp_m1p0, cp_m1p1, cp_m1p2, cp_m1p3;
  6017. u32 cp_m2p0, cp_m2p1, cp_m2p2, cp_m2p3;
  6018. u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
  6019. u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
  6020. u32 grbm_int_cntl = 0;
  6021. u32 dma_cntl, dma_cntl1;
  6022. u32 thermal_int;
  6023. if (!rdev->irq.installed) {
  6024. WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
  6025. return -EINVAL;
  6026. }
  6027. /* don't enable anything if the ih is disabled */
  6028. if (!rdev->ih.enabled) {
  6029. cik_disable_interrupts(rdev);
  6030. /* force the active interrupt state to all disabled */
  6031. cik_disable_interrupt_state(rdev);
  6032. return 0;
  6033. }
  6034. cp_int_cntl = RREG32(CP_INT_CNTL_RING0) &
  6035. (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
  6036. cp_int_cntl |= PRIV_INSTR_INT_ENABLE | PRIV_REG_INT_ENABLE;
  6037. hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
  6038. hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
  6039. hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
  6040. hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
  6041. hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
  6042. hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
  6043. dma_cntl = RREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
  6044. dma_cntl1 = RREG32(SDMA0_CNTL + SDMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
  6045. cp_m1p0 = RREG32(CP_ME1_PIPE0_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
  6046. cp_m1p1 = RREG32(CP_ME1_PIPE1_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
  6047. cp_m1p2 = RREG32(CP_ME1_PIPE2_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
  6048. cp_m1p3 = RREG32(CP_ME1_PIPE3_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
  6049. cp_m2p0 = RREG32(CP_ME2_PIPE0_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
  6050. cp_m2p1 = RREG32(CP_ME2_PIPE1_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
  6051. cp_m2p2 = RREG32(CP_ME2_PIPE2_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
  6052. cp_m2p3 = RREG32(CP_ME2_PIPE3_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
  6053. if (rdev->flags & RADEON_IS_IGP)
  6054. thermal_int = RREG32_SMC(CG_THERMAL_INT_CTRL) &
  6055. ~(THERM_INTH_MASK | THERM_INTL_MASK);
  6056. else
  6057. thermal_int = RREG32_SMC(CG_THERMAL_INT) &
  6058. ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
  6059. /* enable CP interrupts on all rings */
  6060. if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
  6061. DRM_DEBUG("cik_irq_set: sw int gfx\n");
  6062. cp_int_cntl |= TIME_STAMP_INT_ENABLE;
  6063. }
  6064. if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP1_INDEX])) {
  6065. struct radeon_ring *ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
  6066. DRM_DEBUG("si_irq_set: sw int cp1\n");
  6067. if (ring->me == 1) {
  6068. switch (ring->pipe) {
  6069. case 0:
  6070. cp_m1p0 |= TIME_STAMP_INT_ENABLE;
  6071. break;
  6072. case 1:
  6073. cp_m1p1 |= TIME_STAMP_INT_ENABLE;
  6074. break;
  6075. case 2:
  6076. cp_m1p2 |= TIME_STAMP_INT_ENABLE;
  6077. break;
  6078. case 3:
  6079. cp_m1p2 |= TIME_STAMP_INT_ENABLE;
  6080. break;
  6081. default:
  6082. DRM_DEBUG("si_irq_set: sw int cp1 invalid pipe %d\n", ring->pipe);
  6083. break;
  6084. }
  6085. } else if (ring->me == 2) {
  6086. switch (ring->pipe) {
  6087. case 0:
  6088. cp_m2p0 |= TIME_STAMP_INT_ENABLE;
  6089. break;
  6090. case 1:
  6091. cp_m2p1 |= TIME_STAMP_INT_ENABLE;
  6092. break;
  6093. case 2:
  6094. cp_m2p2 |= TIME_STAMP_INT_ENABLE;
  6095. break;
  6096. case 3:
  6097. cp_m2p2 |= TIME_STAMP_INT_ENABLE;
  6098. break;
  6099. default:
  6100. DRM_DEBUG("si_irq_set: sw int cp1 invalid pipe %d\n", ring->pipe);
  6101. break;
  6102. }
  6103. } else {
  6104. DRM_DEBUG("si_irq_set: sw int cp1 invalid me %d\n", ring->me);
  6105. }
  6106. }
  6107. if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP2_INDEX])) {
  6108. struct radeon_ring *ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
  6109. DRM_DEBUG("si_irq_set: sw int cp2\n");
  6110. if (ring->me == 1) {
  6111. switch (ring->pipe) {
  6112. case 0:
  6113. cp_m1p0 |= TIME_STAMP_INT_ENABLE;
  6114. break;
  6115. case 1:
  6116. cp_m1p1 |= TIME_STAMP_INT_ENABLE;
  6117. break;
  6118. case 2:
  6119. cp_m1p2 |= TIME_STAMP_INT_ENABLE;
  6120. break;
  6121. case 3:
  6122. cp_m1p2 |= TIME_STAMP_INT_ENABLE;
  6123. break;
  6124. default:
  6125. DRM_DEBUG("si_irq_set: sw int cp2 invalid pipe %d\n", ring->pipe);
  6126. break;
  6127. }
  6128. } else if (ring->me == 2) {
  6129. switch (ring->pipe) {
  6130. case 0:
  6131. cp_m2p0 |= TIME_STAMP_INT_ENABLE;
  6132. break;
  6133. case 1:
  6134. cp_m2p1 |= TIME_STAMP_INT_ENABLE;
  6135. break;
  6136. case 2:
  6137. cp_m2p2 |= TIME_STAMP_INT_ENABLE;
  6138. break;
  6139. case 3:
  6140. cp_m2p2 |= TIME_STAMP_INT_ENABLE;
  6141. break;
  6142. default:
  6143. DRM_DEBUG("si_irq_set: sw int cp2 invalid pipe %d\n", ring->pipe);
  6144. break;
  6145. }
  6146. } else {
  6147. DRM_DEBUG("si_irq_set: sw int cp2 invalid me %d\n", ring->me);
  6148. }
  6149. }
  6150. if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) {
  6151. DRM_DEBUG("cik_irq_set: sw int dma\n");
  6152. dma_cntl |= TRAP_ENABLE;
  6153. }
  6154. if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_DMA1_INDEX])) {
  6155. DRM_DEBUG("cik_irq_set: sw int dma1\n");
  6156. dma_cntl1 |= TRAP_ENABLE;
  6157. }
  6158. if (rdev->irq.crtc_vblank_int[0] ||
  6159. atomic_read(&rdev->irq.pflip[0])) {
  6160. DRM_DEBUG("cik_irq_set: vblank 0\n");
  6161. crtc1 |= VBLANK_INTERRUPT_MASK;
  6162. }
  6163. if (rdev->irq.crtc_vblank_int[1] ||
  6164. atomic_read(&rdev->irq.pflip[1])) {
  6165. DRM_DEBUG("cik_irq_set: vblank 1\n");
  6166. crtc2 |= VBLANK_INTERRUPT_MASK;
  6167. }
  6168. if (rdev->irq.crtc_vblank_int[2] ||
  6169. atomic_read(&rdev->irq.pflip[2])) {
  6170. DRM_DEBUG("cik_irq_set: vblank 2\n");
  6171. crtc3 |= VBLANK_INTERRUPT_MASK;
  6172. }
  6173. if (rdev->irq.crtc_vblank_int[3] ||
  6174. atomic_read(&rdev->irq.pflip[3])) {
  6175. DRM_DEBUG("cik_irq_set: vblank 3\n");
  6176. crtc4 |= VBLANK_INTERRUPT_MASK;
  6177. }
  6178. if (rdev->irq.crtc_vblank_int[4] ||
  6179. atomic_read(&rdev->irq.pflip[4])) {
  6180. DRM_DEBUG("cik_irq_set: vblank 4\n");
  6181. crtc5 |= VBLANK_INTERRUPT_MASK;
  6182. }
  6183. if (rdev->irq.crtc_vblank_int[5] ||
  6184. atomic_read(&rdev->irq.pflip[5])) {
  6185. DRM_DEBUG("cik_irq_set: vblank 5\n");
  6186. crtc6 |= VBLANK_INTERRUPT_MASK;
  6187. }
  6188. if (rdev->irq.hpd[0]) {
  6189. DRM_DEBUG("cik_irq_set: hpd 1\n");
  6190. hpd1 |= DC_HPDx_INT_EN;
  6191. }
  6192. if (rdev->irq.hpd[1]) {
  6193. DRM_DEBUG("cik_irq_set: hpd 2\n");
  6194. hpd2 |= DC_HPDx_INT_EN;
  6195. }
  6196. if (rdev->irq.hpd[2]) {
  6197. DRM_DEBUG("cik_irq_set: hpd 3\n");
  6198. hpd3 |= DC_HPDx_INT_EN;
  6199. }
  6200. if (rdev->irq.hpd[3]) {
  6201. DRM_DEBUG("cik_irq_set: hpd 4\n");
  6202. hpd4 |= DC_HPDx_INT_EN;
  6203. }
  6204. if (rdev->irq.hpd[4]) {
  6205. DRM_DEBUG("cik_irq_set: hpd 5\n");
  6206. hpd5 |= DC_HPDx_INT_EN;
  6207. }
  6208. if (rdev->irq.hpd[5]) {
  6209. DRM_DEBUG("cik_irq_set: hpd 6\n");
  6210. hpd6 |= DC_HPDx_INT_EN;
  6211. }
  6212. if (rdev->irq.dpm_thermal) {
  6213. DRM_DEBUG("dpm thermal\n");
  6214. if (rdev->flags & RADEON_IS_IGP)
  6215. thermal_int |= THERM_INTH_MASK | THERM_INTL_MASK;
  6216. else
  6217. thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
  6218. }
  6219. WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
  6220. WREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET, dma_cntl);
  6221. WREG32(SDMA0_CNTL + SDMA1_REGISTER_OFFSET, dma_cntl1);
  6222. WREG32(CP_ME1_PIPE0_INT_CNTL, cp_m1p0);
  6223. WREG32(CP_ME1_PIPE1_INT_CNTL, cp_m1p1);
  6224. WREG32(CP_ME1_PIPE2_INT_CNTL, cp_m1p2);
  6225. WREG32(CP_ME1_PIPE3_INT_CNTL, cp_m1p3);
  6226. WREG32(CP_ME2_PIPE0_INT_CNTL, cp_m2p0);
  6227. WREG32(CP_ME2_PIPE1_INT_CNTL, cp_m2p1);
  6228. WREG32(CP_ME2_PIPE2_INT_CNTL, cp_m2p2);
  6229. WREG32(CP_ME2_PIPE3_INT_CNTL, cp_m2p3);
  6230. WREG32(GRBM_INT_CNTL, grbm_int_cntl);
  6231. WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
  6232. WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2);
  6233. if (rdev->num_crtc >= 4) {
  6234. WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3);
  6235. WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4);
  6236. }
  6237. if (rdev->num_crtc >= 6) {
  6238. WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5);
  6239. WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
  6240. }
  6241. WREG32(DC_HPD1_INT_CONTROL, hpd1);
  6242. WREG32(DC_HPD2_INT_CONTROL, hpd2);
  6243. WREG32(DC_HPD3_INT_CONTROL, hpd3);
  6244. WREG32(DC_HPD4_INT_CONTROL, hpd4);
  6245. WREG32(DC_HPD5_INT_CONTROL, hpd5);
  6246. WREG32(DC_HPD6_INT_CONTROL, hpd6);
  6247. if (rdev->flags & RADEON_IS_IGP)
  6248. WREG32_SMC(CG_THERMAL_INT_CTRL, thermal_int);
  6249. else
  6250. WREG32_SMC(CG_THERMAL_INT, thermal_int);
  6251. return 0;
  6252. }
  6253. /**
  6254. * cik_irq_ack - ack interrupt sources
  6255. *
  6256. * @rdev: radeon_device pointer
  6257. *
  6258. * Ack interrupt sources on the GPU (vblanks, hpd,
  6259. * etc.) (CIK). Certain interrupts sources are sw
  6260. * generated and do not require an explicit ack.
  6261. */
  6262. static inline void cik_irq_ack(struct radeon_device *rdev)
  6263. {
  6264. u32 tmp;
  6265. rdev->irq.stat_regs.cik.disp_int = RREG32(DISP_INTERRUPT_STATUS);
  6266. rdev->irq.stat_regs.cik.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
  6267. rdev->irq.stat_regs.cik.disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2);
  6268. rdev->irq.stat_regs.cik.disp_int_cont3 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE3);
  6269. rdev->irq.stat_regs.cik.disp_int_cont4 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE4);
  6270. rdev->irq.stat_regs.cik.disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5);
  6271. rdev->irq.stat_regs.cik.disp_int_cont6 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE6);
  6272. if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VBLANK_INTERRUPT)
  6273. WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK);
  6274. if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VLINE_INTERRUPT)
  6275. WREG32(LB_VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VLINE_ACK);
  6276. if (rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VBLANK_INTERRUPT)
  6277. WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VBLANK_ACK);
  6278. if (rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VLINE_INTERRUPT)
  6279. WREG32(LB_VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK);
  6280. if (rdev->num_crtc >= 4) {
  6281. if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT)
  6282. WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK);
  6283. if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VLINE_INTERRUPT)
  6284. WREG32(LB_VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VLINE_ACK);
  6285. if (rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT)
  6286. WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VBLANK_ACK);
  6287. if (rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VLINE_INTERRUPT)
  6288. WREG32(LB_VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VLINE_ACK);
  6289. }
  6290. if (rdev->num_crtc >= 6) {
  6291. if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT)
  6292. WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK);
  6293. if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VLINE_INTERRUPT)
  6294. WREG32(LB_VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VLINE_ACK);
  6295. if (rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT)
  6296. WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VBLANK_ACK);
  6297. if (rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VLINE_INTERRUPT)
  6298. WREG32(LB_VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VLINE_ACK);
  6299. }
  6300. if (rdev->irq.stat_regs.cik.disp_int & DC_HPD1_INTERRUPT) {
  6301. tmp = RREG32(DC_HPD1_INT_CONTROL);
  6302. tmp |= DC_HPDx_INT_ACK;
  6303. WREG32(DC_HPD1_INT_CONTROL, tmp);
  6304. }
  6305. if (rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_INTERRUPT) {
  6306. tmp = RREG32(DC_HPD2_INT_CONTROL);
  6307. tmp |= DC_HPDx_INT_ACK;
  6308. WREG32(DC_HPD2_INT_CONTROL, tmp);
  6309. }
  6310. if (rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_INTERRUPT) {
  6311. tmp = RREG32(DC_HPD3_INT_CONTROL);
  6312. tmp |= DC_HPDx_INT_ACK;
  6313. WREG32(DC_HPD3_INT_CONTROL, tmp);
  6314. }
  6315. if (rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_INTERRUPT) {
  6316. tmp = RREG32(DC_HPD4_INT_CONTROL);
  6317. tmp |= DC_HPDx_INT_ACK;
  6318. WREG32(DC_HPD4_INT_CONTROL, tmp);
  6319. }
  6320. if (rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_INTERRUPT) {
  6321. tmp = RREG32(DC_HPD5_INT_CONTROL);
  6322. tmp |= DC_HPDx_INT_ACK;
  6323. WREG32(DC_HPD5_INT_CONTROL, tmp);
  6324. }
  6325. if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_INTERRUPT) {
  6326. tmp = RREG32(DC_HPD5_INT_CONTROL);
  6327. tmp |= DC_HPDx_INT_ACK;
  6328. WREG32(DC_HPD6_INT_CONTROL, tmp);
  6329. }
  6330. }
  6331. /**
  6332. * cik_irq_disable - disable interrupts
  6333. *
  6334. * @rdev: radeon_device pointer
  6335. *
  6336. * Disable interrupts on the hw (CIK).
  6337. */
  6338. static void cik_irq_disable(struct radeon_device *rdev)
  6339. {
  6340. cik_disable_interrupts(rdev);
  6341. /* Wait and acknowledge irq */
  6342. mdelay(1);
  6343. cik_irq_ack(rdev);
  6344. cik_disable_interrupt_state(rdev);
  6345. }
  6346. /**
  6347. * cik_irq_disable - disable interrupts for suspend
  6348. *
  6349. * @rdev: radeon_device pointer
  6350. *
  6351. * Disable interrupts and stop the RLC (CIK).
  6352. * Used for suspend.
  6353. */
  6354. static void cik_irq_suspend(struct radeon_device *rdev)
  6355. {
  6356. cik_irq_disable(rdev);
  6357. cik_rlc_stop(rdev);
  6358. }
  6359. /**
  6360. * cik_irq_fini - tear down interrupt support
  6361. *
  6362. * @rdev: radeon_device pointer
  6363. *
  6364. * Disable interrupts on the hw and free the IH ring
  6365. * buffer (CIK).
  6366. * Used for driver unload.
  6367. */
  6368. static void cik_irq_fini(struct radeon_device *rdev)
  6369. {
  6370. cik_irq_suspend(rdev);
  6371. r600_ih_ring_fini(rdev);
  6372. }
  6373. /**
  6374. * cik_get_ih_wptr - get the IH ring buffer wptr
  6375. *
  6376. * @rdev: radeon_device pointer
  6377. *
  6378. * Get the IH ring buffer wptr from either the register
  6379. * or the writeback memory buffer (CIK). Also check for
  6380. * ring buffer overflow and deal with it.
  6381. * Used by cik_irq_process().
  6382. * Returns the value of the wptr.
  6383. */
  6384. static inline u32 cik_get_ih_wptr(struct radeon_device *rdev)
  6385. {
  6386. u32 wptr, tmp;
  6387. if (rdev->wb.enabled)
  6388. wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]);
  6389. else
  6390. wptr = RREG32(IH_RB_WPTR);
  6391. if (wptr & RB_OVERFLOW) {
  6392. /* When a ring buffer overflow happen start parsing interrupt
  6393. * from the last not overwritten vector (wptr + 16). Hopefully
  6394. * this should allow us to catchup.
  6395. */
  6396. dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
  6397. wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
  6398. rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
  6399. tmp = RREG32(IH_RB_CNTL);
  6400. tmp |= IH_WPTR_OVERFLOW_CLEAR;
  6401. WREG32(IH_RB_CNTL, tmp);
  6402. }
  6403. return (wptr & rdev->ih.ptr_mask);
  6404. }
  6405. /* CIK IV Ring
  6406. * Each IV ring entry is 128 bits:
  6407. * [7:0] - interrupt source id
  6408. * [31:8] - reserved
  6409. * [59:32] - interrupt source data
  6410. * [63:60] - reserved
  6411. * [71:64] - RINGID
  6412. * CP:
  6413. * ME_ID [1:0], PIPE_ID[1:0], QUEUE_ID[2:0]
  6414. * QUEUE_ID - for compute, which of the 8 queues owned by the dispatcher
  6415. * - for gfx, hw shader state (0=PS...5=LS, 6=CS)
  6416. * ME_ID - 0 = gfx, 1 = first 4 CS pipes, 2 = second 4 CS pipes
  6417. * PIPE_ID - ME0 0=3D
  6418. * - ME1&2 compute dispatcher (4 pipes each)
  6419. * SDMA:
  6420. * INSTANCE_ID [1:0], QUEUE_ID[1:0]
  6421. * INSTANCE_ID - 0 = sdma0, 1 = sdma1
  6422. * QUEUE_ID - 0 = gfx, 1 = rlc0, 2 = rlc1
  6423. * [79:72] - VMID
  6424. * [95:80] - PASID
  6425. * [127:96] - reserved
  6426. */
  6427. /**
  6428. * cik_irq_process - interrupt handler
  6429. *
  6430. * @rdev: radeon_device pointer
  6431. *
  6432. * Interrupt hander (CIK). Walk the IH ring,
  6433. * ack interrupts and schedule work to handle
  6434. * interrupt events.
  6435. * Returns irq process return code.
  6436. */
  6437. int cik_irq_process(struct radeon_device *rdev)
  6438. {
  6439. struct radeon_ring *cp1_ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
  6440. struct radeon_ring *cp2_ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
  6441. u32 wptr;
  6442. u32 rptr;
  6443. u32 src_id, src_data, ring_id;
  6444. u8 me_id, pipe_id, queue_id;
  6445. u32 ring_index;
  6446. bool queue_hotplug = false;
  6447. bool queue_reset = false;
  6448. u32 addr, status, mc_client;
  6449. bool queue_thermal = false;
  6450. if (!rdev->ih.enabled || rdev->shutdown)
  6451. return IRQ_NONE;
  6452. wptr = cik_get_ih_wptr(rdev);
  6453. restart_ih:
  6454. /* is somebody else already processing irqs? */
  6455. if (atomic_xchg(&rdev->ih.lock, 1))
  6456. return IRQ_NONE;
  6457. rptr = rdev->ih.rptr;
  6458. DRM_DEBUG("cik_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
  6459. /* Order reading of wptr vs. reading of IH ring data */
  6460. rmb();
  6461. /* display interrupts */
  6462. cik_irq_ack(rdev);
  6463. while (rptr != wptr) {
  6464. /* wptr/rptr are in bytes! */
  6465. ring_index = rptr / 4;
  6466. src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
  6467. src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
  6468. ring_id = le32_to_cpu(rdev->ih.ring[ring_index + 2]) & 0xff;
  6469. switch (src_id) {
  6470. case 1: /* D1 vblank/vline */
  6471. switch (src_data) {
  6472. case 0: /* D1 vblank */
  6473. if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VBLANK_INTERRUPT) {
  6474. if (rdev->irq.crtc_vblank_int[0]) {
  6475. drm_handle_vblank(rdev->ddev, 0);
  6476. rdev->pm.vblank_sync = true;
  6477. wake_up(&rdev->irq.vblank_queue);
  6478. }
  6479. if (atomic_read(&rdev->irq.pflip[0]))
  6480. radeon_crtc_handle_flip(rdev, 0);
  6481. rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
  6482. DRM_DEBUG("IH: D1 vblank\n");
  6483. }
  6484. break;
  6485. case 1: /* D1 vline */
  6486. if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VLINE_INTERRUPT) {
  6487. rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VLINE_INTERRUPT;
  6488. DRM_DEBUG("IH: D1 vline\n");
  6489. }
  6490. break;
  6491. default:
  6492. DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
  6493. break;
  6494. }
  6495. break;
  6496. case 2: /* D2 vblank/vline */
  6497. switch (src_data) {
  6498. case 0: /* D2 vblank */
  6499. if (rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
  6500. if (rdev->irq.crtc_vblank_int[1]) {
  6501. drm_handle_vblank(rdev->ddev, 1);
  6502. rdev->pm.vblank_sync = true;
  6503. wake_up(&rdev->irq.vblank_queue);
  6504. }
  6505. if (atomic_read(&rdev->irq.pflip[1]))
  6506. radeon_crtc_handle_flip(rdev, 1);
  6507. rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
  6508. DRM_DEBUG("IH: D2 vblank\n");
  6509. }
  6510. break;
  6511. case 1: /* D2 vline */
  6512. if (rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VLINE_INTERRUPT) {
  6513. rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
  6514. DRM_DEBUG("IH: D2 vline\n");
  6515. }
  6516. break;
  6517. default:
  6518. DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
  6519. break;
  6520. }
  6521. break;
  6522. case 3: /* D3 vblank/vline */
  6523. switch (src_data) {
  6524. case 0: /* D3 vblank */
  6525. if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
  6526. if (rdev->irq.crtc_vblank_int[2]) {
  6527. drm_handle_vblank(rdev->ddev, 2);
  6528. rdev->pm.vblank_sync = true;
  6529. wake_up(&rdev->irq.vblank_queue);
  6530. }
  6531. if (atomic_read(&rdev->irq.pflip[2]))
  6532. radeon_crtc_handle_flip(rdev, 2);
  6533. rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
  6534. DRM_DEBUG("IH: D3 vblank\n");
  6535. }
  6536. break;
  6537. case 1: /* D3 vline */
  6538. if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {
  6539. rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
  6540. DRM_DEBUG("IH: D3 vline\n");
  6541. }
  6542. break;
  6543. default:
  6544. DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
  6545. break;
  6546. }
  6547. break;
  6548. case 4: /* D4 vblank/vline */
  6549. switch (src_data) {
  6550. case 0: /* D4 vblank */
  6551. if (rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
  6552. if (rdev->irq.crtc_vblank_int[3]) {
  6553. drm_handle_vblank(rdev->ddev, 3);
  6554. rdev->pm.vblank_sync = true;
  6555. wake_up(&rdev->irq.vblank_queue);
  6556. }
  6557. if (atomic_read(&rdev->irq.pflip[3]))
  6558. radeon_crtc_handle_flip(rdev, 3);
  6559. rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
  6560. DRM_DEBUG("IH: D4 vblank\n");
  6561. }
  6562. break;
  6563. case 1: /* D4 vline */
  6564. if (rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {
  6565. rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
  6566. DRM_DEBUG("IH: D4 vline\n");
  6567. }
  6568. break;
  6569. default:
  6570. DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
  6571. break;
  6572. }
  6573. break;
  6574. case 5: /* D5 vblank/vline */
  6575. switch (src_data) {
  6576. case 0: /* D5 vblank */
  6577. if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
  6578. if (rdev->irq.crtc_vblank_int[4]) {
  6579. drm_handle_vblank(rdev->ddev, 4);
  6580. rdev->pm.vblank_sync = true;
  6581. wake_up(&rdev->irq.vblank_queue);
  6582. }
  6583. if (atomic_read(&rdev->irq.pflip[4]))
  6584. radeon_crtc_handle_flip(rdev, 4);
  6585. rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
  6586. DRM_DEBUG("IH: D5 vblank\n");
  6587. }
  6588. break;
  6589. case 1: /* D5 vline */
  6590. if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {
  6591. rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
  6592. DRM_DEBUG("IH: D5 vline\n");
  6593. }
  6594. break;
  6595. default:
  6596. DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
  6597. break;
  6598. }
  6599. break;
  6600. case 6: /* D6 vblank/vline */
  6601. switch (src_data) {
  6602. case 0: /* D6 vblank */
  6603. if (rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
  6604. if (rdev->irq.crtc_vblank_int[5]) {
  6605. drm_handle_vblank(rdev->ddev, 5);
  6606. rdev->pm.vblank_sync = true;
  6607. wake_up(&rdev->irq.vblank_queue);
  6608. }
  6609. if (atomic_read(&rdev->irq.pflip[5]))
  6610. radeon_crtc_handle_flip(rdev, 5);
  6611. rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
  6612. DRM_DEBUG("IH: D6 vblank\n");
  6613. }
  6614. break;
  6615. case 1: /* D6 vline */
  6616. if (rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {
  6617. rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
  6618. DRM_DEBUG("IH: D6 vline\n");
  6619. }
  6620. break;
  6621. default:
  6622. DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
  6623. break;
  6624. }
  6625. break;
  6626. case 42: /* HPD hotplug */
  6627. switch (src_data) {
  6628. case 0:
  6629. if (rdev->irq.stat_regs.cik.disp_int & DC_HPD1_INTERRUPT) {
  6630. rdev->irq.stat_regs.cik.disp_int &= ~DC_HPD1_INTERRUPT;
  6631. queue_hotplug = true;
  6632. DRM_DEBUG("IH: HPD1\n");
  6633. }
  6634. break;
  6635. case 1:
  6636. if (rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_INTERRUPT) {
  6637. rdev->irq.stat_regs.cik.disp_int_cont &= ~DC_HPD2_INTERRUPT;
  6638. queue_hotplug = true;
  6639. DRM_DEBUG("IH: HPD2\n");
  6640. }
  6641. break;
  6642. case 2:
  6643. if (rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_INTERRUPT) {
  6644. rdev->irq.stat_regs.cik.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
  6645. queue_hotplug = true;
  6646. DRM_DEBUG("IH: HPD3\n");
  6647. }
  6648. break;
  6649. case 3:
  6650. if (rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_INTERRUPT) {
  6651. rdev->irq.stat_regs.cik.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
  6652. queue_hotplug = true;
  6653. DRM_DEBUG("IH: HPD4\n");
  6654. }
  6655. break;
  6656. case 4:
  6657. if (rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_INTERRUPT) {
  6658. rdev->irq.stat_regs.cik.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
  6659. queue_hotplug = true;
  6660. DRM_DEBUG("IH: HPD5\n");
  6661. }
  6662. break;
  6663. case 5:
  6664. if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_INTERRUPT) {
  6665. rdev->irq.stat_regs.cik.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
  6666. queue_hotplug = true;
  6667. DRM_DEBUG("IH: HPD6\n");
  6668. }
  6669. break;
  6670. default:
  6671. DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
  6672. break;
  6673. }
  6674. break;
  6675. case 124: /* UVD */
  6676. DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
  6677. radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
  6678. break;
  6679. case 146:
  6680. case 147:
  6681. addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR);
  6682. status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS);
  6683. mc_client = RREG32(VM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
  6684. dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
  6685. dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
  6686. addr);
  6687. dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
  6688. status);
  6689. cik_vm_decode_fault(rdev, status, addr, mc_client);
  6690. /* reset addr and status */
  6691. WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
  6692. break;
  6693. case 176: /* GFX RB CP_INT */
  6694. case 177: /* GFX IB CP_INT */
  6695. radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
  6696. break;
  6697. case 181: /* CP EOP event */
  6698. DRM_DEBUG("IH: CP EOP\n");
  6699. /* XXX check the bitfield order! */
  6700. me_id = (ring_id & 0x60) >> 5;
  6701. pipe_id = (ring_id & 0x18) >> 3;
  6702. queue_id = (ring_id & 0x7) >> 0;
  6703. switch (me_id) {
  6704. case 0:
  6705. radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
  6706. break;
  6707. case 1:
  6708. case 2:
  6709. if ((cp1_ring->me == me_id) & (cp1_ring->pipe == pipe_id))
  6710. radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
  6711. if ((cp2_ring->me == me_id) & (cp2_ring->pipe == pipe_id))
  6712. radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
  6713. break;
  6714. }
  6715. break;
  6716. case 184: /* CP Privileged reg access */
  6717. DRM_ERROR("Illegal register access in command stream\n");
  6718. /* XXX check the bitfield order! */
  6719. me_id = (ring_id & 0x60) >> 5;
  6720. pipe_id = (ring_id & 0x18) >> 3;
  6721. queue_id = (ring_id & 0x7) >> 0;
  6722. switch (me_id) {
  6723. case 0:
  6724. /* This results in a full GPU reset, but all we need to do is soft
  6725. * reset the CP for gfx
  6726. */
  6727. queue_reset = true;
  6728. break;
  6729. case 1:
  6730. /* XXX compute */
  6731. queue_reset = true;
  6732. break;
  6733. case 2:
  6734. /* XXX compute */
  6735. queue_reset = true;
  6736. break;
  6737. }
  6738. break;
  6739. case 185: /* CP Privileged inst */
  6740. DRM_ERROR("Illegal instruction in command stream\n");
  6741. /* XXX check the bitfield order! */
  6742. me_id = (ring_id & 0x60) >> 5;
  6743. pipe_id = (ring_id & 0x18) >> 3;
  6744. queue_id = (ring_id & 0x7) >> 0;
  6745. switch (me_id) {
  6746. case 0:
  6747. /* This results in a full GPU reset, but all we need to do is soft
  6748. * reset the CP for gfx
  6749. */
  6750. queue_reset = true;
  6751. break;
  6752. case 1:
  6753. /* XXX compute */
  6754. queue_reset = true;
  6755. break;
  6756. case 2:
  6757. /* XXX compute */
  6758. queue_reset = true;
  6759. break;
  6760. }
  6761. break;
  6762. case 224: /* SDMA trap event */
  6763. /* XXX check the bitfield order! */
  6764. me_id = (ring_id & 0x3) >> 0;
  6765. queue_id = (ring_id & 0xc) >> 2;
  6766. DRM_DEBUG("IH: SDMA trap\n");
  6767. switch (me_id) {
  6768. case 0:
  6769. switch (queue_id) {
  6770. case 0:
  6771. radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
  6772. break;
  6773. case 1:
  6774. /* XXX compute */
  6775. break;
  6776. case 2:
  6777. /* XXX compute */
  6778. break;
  6779. }
  6780. break;
  6781. case 1:
  6782. switch (queue_id) {
  6783. case 0:
  6784. radeon_fence_process(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
  6785. break;
  6786. case 1:
  6787. /* XXX compute */
  6788. break;
  6789. case 2:
  6790. /* XXX compute */
  6791. break;
  6792. }
  6793. break;
  6794. }
  6795. break;
  6796. case 230: /* thermal low to high */
  6797. DRM_DEBUG("IH: thermal low to high\n");
  6798. rdev->pm.dpm.thermal.high_to_low = false;
  6799. queue_thermal = true;
  6800. break;
  6801. case 231: /* thermal high to low */
  6802. DRM_DEBUG("IH: thermal high to low\n");
  6803. rdev->pm.dpm.thermal.high_to_low = true;
  6804. queue_thermal = true;
  6805. break;
  6806. case 233: /* GUI IDLE */
  6807. DRM_DEBUG("IH: GUI idle\n");
  6808. break;
  6809. case 241: /* SDMA Privileged inst */
  6810. case 247: /* SDMA Privileged inst */
  6811. DRM_ERROR("Illegal instruction in SDMA command stream\n");
  6812. /* XXX check the bitfield order! */
  6813. me_id = (ring_id & 0x3) >> 0;
  6814. queue_id = (ring_id & 0xc) >> 2;
  6815. switch (me_id) {
  6816. case 0:
  6817. switch (queue_id) {
  6818. case 0:
  6819. queue_reset = true;
  6820. break;
  6821. case 1:
  6822. /* XXX compute */
  6823. queue_reset = true;
  6824. break;
  6825. case 2:
  6826. /* XXX compute */
  6827. queue_reset = true;
  6828. break;
  6829. }
  6830. break;
  6831. case 1:
  6832. switch (queue_id) {
  6833. case 0:
  6834. queue_reset = true;
  6835. break;
  6836. case 1:
  6837. /* XXX compute */
  6838. queue_reset = true;
  6839. break;
  6840. case 2:
  6841. /* XXX compute */
  6842. queue_reset = true;
  6843. break;
  6844. }
  6845. break;
  6846. }
  6847. break;
  6848. default:
  6849. DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
  6850. break;
  6851. }
  6852. /* wptr/rptr are in bytes! */
  6853. rptr += 16;
  6854. rptr &= rdev->ih.ptr_mask;
  6855. }
  6856. if (queue_hotplug)
  6857. schedule_work(&rdev->hotplug_work);
  6858. if (queue_reset)
  6859. schedule_work(&rdev->reset_work);
  6860. if (queue_thermal)
  6861. schedule_work(&rdev->pm.dpm.thermal.work);
  6862. rdev->ih.rptr = rptr;
  6863. WREG32(IH_RB_RPTR, rdev->ih.rptr);
  6864. atomic_set(&rdev->ih.lock, 0);
  6865. /* make sure wptr hasn't changed while processing */
  6866. wptr = cik_get_ih_wptr(rdev);
  6867. if (wptr != rptr)
  6868. goto restart_ih;
  6869. return IRQ_HANDLED;
  6870. }
  6871. /*
  6872. * startup/shutdown callbacks
  6873. */
  6874. /**
  6875. * cik_startup - program the asic to a functional state
  6876. *
  6877. * @rdev: radeon_device pointer
  6878. *
  6879. * Programs the asic to a functional state (CIK).
  6880. * Called by cik_init() and cik_resume().
  6881. * Returns 0 for success, error for failure.
  6882. */
  6883. static int cik_startup(struct radeon_device *rdev)
  6884. {
  6885. struct radeon_ring *ring;
  6886. int r;
  6887. /* enable pcie gen2/3 link */
  6888. cik_pcie_gen3_enable(rdev);
  6889. /* enable aspm */
  6890. cik_program_aspm(rdev);
  6891. /* scratch needs to be initialized before MC */
  6892. r = r600_vram_scratch_init(rdev);
  6893. if (r)
  6894. return r;
  6895. cik_mc_program(rdev);
  6896. if (rdev->flags & RADEON_IS_IGP) {
  6897. if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
  6898. !rdev->mec_fw || !rdev->sdma_fw || !rdev->rlc_fw) {
  6899. r = cik_init_microcode(rdev);
  6900. if (r) {
  6901. DRM_ERROR("Failed to load firmware!\n");
  6902. return r;
  6903. }
  6904. }
  6905. } else {
  6906. if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
  6907. !rdev->mec_fw || !rdev->sdma_fw || !rdev->rlc_fw ||
  6908. !rdev->mc_fw) {
  6909. r = cik_init_microcode(rdev);
  6910. if (r) {
  6911. DRM_ERROR("Failed to load firmware!\n");
  6912. return r;
  6913. }
  6914. }
  6915. r = ci_mc_load_microcode(rdev);
  6916. if (r) {
  6917. DRM_ERROR("Failed to load MC firmware!\n");
  6918. return r;
  6919. }
  6920. }
  6921. r = cik_pcie_gart_enable(rdev);
  6922. if (r)
  6923. return r;
  6924. cik_gpu_init(rdev);
  6925. /* allocate rlc buffers */
  6926. if (rdev->flags & RADEON_IS_IGP) {
  6927. if (rdev->family == CHIP_KAVERI) {
  6928. rdev->rlc.reg_list = spectre_rlc_save_restore_register_list;
  6929. rdev->rlc.reg_list_size =
  6930. (u32)ARRAY_SIZE(spectre_rlc_save_restore_register_list);
  6931. } else {
  6932. rdev->rlc.reg_list = kalindi_rlc_save_restore_register_list;
  6933. rdev->rlc.reg_list_size =
  6934. (u32)ARRAY_SIZE(kalindi_rlc_save_restore_register_list);
  6935. }
  6936. }
  6937. rdev->rlc.cs_data = ci_cs_data;
  6938. rdev->rlc.cp_table_size = CP_ME_TABLE_SIZE * 5 * 4;
  6939. r = sumo_rlc_init(rdev);
  6940. if (r) {
  6941. DRM_ERROR("Failed to init rlc BOs!\n");
  6942. return r;
  6943. }
  6944. /* allocate wb buffer */
  6945. r = radeon_wb_init(rdev);
  6946. if (r)
  6947. return r;
  6948. /* allocate mec buffers */
  6949. r = cik_mec_init(rdev);
  6950. if (r) {
  6951. DRM_ERROR("Failed to init MEC BOs!\n");
  6952. return r;
  6953. }
  6954. r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
  6955. if (r) {
  6956. dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
  6957. return r;
  6958. }
  6959. r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
  6960. if (r) {
  6961. dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
  6962. return r;
  6963. }
  6964. r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
  6965. if (r) {
  6966. dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
  6967. return r;
  6968. }
  6969. r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
  6970. if (r) {
  6971. dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
  6972. return r;
  6973. }
  6974. r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
  6975. if (r) {
  6976. dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
  6977. return r;
  6978. }
  6979. r = radeon_uvd_resume(rdev);
  6980. if (!r) {
  6981. r = uvd_v4_2_resume(rdev);
  6982. if (!r) {
  6983. r = radeon_fence_driver_start_ring(rdev,
  6984. R600_RING_TYPE_UVD_INDEX);
  6985. if (r)
  6986. dev_err(rdev->dev, "UVD fences init error (%d).\n", r);
  6987. }
  6988. }
  6989. if (r)
  6990. rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
  6991. /* Enable IRQ */
  6992. if (!rdev->irq.installed) {
  6993. r = radeon_irq_kms_init(rdev);
  6994. if (r)
  6995. return r;
  6996. }
  6997. r = cik_irq_init(rdev);
  6998. if (r) {
  6999. DRM_ERROR("radeon: IH init failed (%d).\n", r);
  7000. radeon_irq_kms_fini(rdev);
  7001. return r;
  7002. }
  7003. cik_irq_set(rdev);
  7004. ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
  7005. r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
  7006. CP_RB0_RPTR, CP_RB0_WPTR,
  7007. PACKET3(PACKET3_NOP, 0x3FFF));
  7008. if (r)
  7009. return r;
  7010. /* set up the compute queues */
  7011. /* type-2 packets are deprecated on MEC, use type-3 instead */
  7012. ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
  7013. r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP1_RPTR_OFFSET,
  7014. CP_HQD_PQ_RPTR, CP_HQD_PQ_WPTR,
  7015. PACKET3(PACKET3_NOP, 0x3FFF));
  7016. if (r)
  7017. return r;
  7018. ring->me = 1; /* first MEC */
  7019. ring->pipe = 0; /* first pipe */
  7020. ring->queue = 0; /* first queue */
  7021. ring->wptr_offs = CIK_WB_CP1_WPTR_OFFSET;
  7022. /* type-2 packets are deprecated on MEC, use type-3 instead */
  7023. ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
  7024. r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP2_RPTR_OFFSET,
  7025. CP_HQD_PQ_RPTR, CP_HQD_PQ_WPTR,
  7026. PACKET3(PACKET3_NOP, 0x3FFF));
  7027. if (r)
  7028. return r;
  7029. /* dGPU only have 1 MEC */
  7030. ring->me = 1; /* first MEC */
  7031. ring->pipe = 0; /* first pipe */
  7032. ring->queue = 1; /* second queue */
  7033. ring->wptr_offs = CIK_WB_CP2_WPTR_OFFSET;
  7034. ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
  7035. r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
  7036. SDMA0_GFX_RB_RPTR + SDMA0_REGISTER_OFFSET,
  7037. SDMA0_GFX_RB_WPTR + SDMA0_REGISTER_OFFSET,
  7038. SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0));
  7039. if (r)
  7040. return r;
  7041. ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
  7042. r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
  7043. SDMA0_GFX_RB_RPTR + SDMA1_REGISTER_OFFSET,
  7044. SDMA0_GFX_RB_WPTR + SDMA1_REGISTER_OFFSET,
  7045. SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0));
  7046. if (r)
  7047. return r;
  7048. r = cik_cp_resume(rdev);
  7049. if (r)
  7050. return r;
  7051. r = cik_sdma_resume(rdev);
  7052. if (r)
  7053. return r;
  7054. ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
  7055. if (ring->ring_size) {
  7056. r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
  7057. UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
  7058. RADEON_CP_PACKET2);
  7059. if (!r)
  7060. r = uvd_v1_0_init(rdev);
  7061. if (r)
  7062. DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
  7063. }
  7064. r = radeon_ib_pool_init(rdev);
  7065. if (r) {
  7066. dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
  7067. return r;
  7068. }
  7069. r = radeon_vm_manager_init(rdev);
  7070. if (r) {
  7071. dev_err(rdev->dev, "vm manager initialization failed (%d).\n", r);
  7072. return r;
  7073. }
  7074. r = dce6_audio_init(rdev);
  7075. if (r)
  7076. return r;
  7077. return 0;
  7078. }
  7079. /**
  7080. * cik_resume - resume the asic to a functional state
  7081. *
  7082. * @rdev: radeon_device pointer
  7083. *
  7084. * Programs the asic to a functional state (CIK).
  7085. * Called at resume.
  7086. * Returns 0 for success, error for failure.
  7087. */
  7088. int cik_resume(struct radeon_device *rdev)
  7089. {
  7090. int r;
  7091. /* post card */
  7092. atom_asic_init(rdev->mode_info.atom_context);
  7093. /* init golden registers */
  7094. cik_init_golden_registers(rdev);
  7095. rdev->accel_working = true;
  7096. r = cik_startup(rdev);
  7097. if (r) {
  7098. DRM_ERROR("cik startup failed on resume\n");
  7099. rdev->accel_working = false;
  7100. return r;
  7101. }
  7102. return r;
  7103. }
  7104. /**
  7105. * cik_suspend - suspend the asic
  7106. *
  7107. * @rdev: radeon_device pointer
  7108. *
  7109. * Bring the chip into a state suitable for suspend (CIK).
  7110. * Called at suspend.
  7111. * Returns 0 for success.
  7112. */
  7113. int cik_suspend(struct radeon_device *rdev)
  7114. {
  7115. dce6_audio_fini(rdev);
  7116. radeon_vm_manager_fini(rdev);
  7117. cik_cp_enable(rdev, false);
  7118. cik_sdma_enable(rdev, false);
  7119. uvd_v1_0_fini(rdev);
  7120. radeon_uvd_suspend(rdev);
  7121. cik_fini_pg(rdev);
  7122. cik_fini_cg(rdev);
  7123. cik_irq_suspend(rdev);
  7124. radeon_wb_disable(rdev);
  7125. cik_pcie_gart_disable(rdev);
  7126. return 0;
  7127. }
  7128. /* Plan is to move initialization in that function and use
  7129. * helper function so that radeon_device_init pretty much
  7130. * do nothing more than calling asic specific function. This
  7131. * should also allow to remove a bunch of callback function
  7132. * like vram_info.
  7133. */
  7134. /**
  7135. * cik_init - asic specific driver and hw init
  7136. *
  7137. * @rdev: radeon_device pointer
  7138. *
  7139. * Setup asic specific driver variables and program the hw
  7140. * to a functional state (CIK).
  7141. * Called at driver startup.
  7142. * Returns 0 for success, errors for failure.
  7143. */
  7144. int cik_init(struct radeon_device *rdev)
  7145. {
  7146. struct radeon_ring *ring;
  7147. int r;
  7148. /* Read BIOS */
  7149. if (!radeon_get_bios(rdev)) {
  7150. if (ASIC_IS_AVIVO(rdev))
  7151. return -EINVAL;
  7152. }
  7153. /* Must be an ATOMBIOS */
  7154. if (!rdev->is_atom_bios) {
  7155. dev_err(rdev->dev, "Expecting atombios for cayman GPU\n");
  7156. return -EINVAL;
  7157. }
  7158. r = radeon_atombios_init(rdev);
  7159. if (r)
  7160. return r;
  7161. /* Post card if necessary */
  7162. if (!radeon_card_posted(rdev)) {
  7163. if (!rdev->bios) {
  7164. dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
  7165. return -EINVAL;
  7166. }
  7167. DRM_INFO("GPU not posted. posting now...\n");
  7168. atom_asic_init(rdev->mode_info.atom_context);
  7169. }
  7170. /* init golden registers */
  7171. cik_init_golden_registers(rdev);
  7172. /* Initialize scratch registers */
  7173. cik_scratch_init(rdev);
  7174. /* Initialize surface registers */
  7175. radeon_surface_init(rdev);
  7176. /* Initialize clocks */
  7177. radeon_get_clock_info(rdev->ddev);
  7178. /* Fence driver */
  7179. r = radeon_fence_driver_init(rdev);
  7180. if (r)
  7181. return r;
  7182. /* initialize memory controller */
  7183. r = cik_mc_init(rdev);
  7184. if (r)
  7185. return r;
  7186. /* Memory manager */
  7187. r = radeon_bo_init(rdev);
  7188. if (r)
  7189. return r;
  7190. ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
  7191. ring->ring_obj = NULL;
  7192. r600_ring_init(rdev, ring, 1024 * 1024);
  7193. ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
  7194. ring->ring_obj = NULL;
  7195. r600_ring_init(rdev, ring, 1024 * 1024);
  7196. r = radeon_doorbell_get(rdev, &ring->doorbell_page_num);
  7197. if (r)
  7198. return r;
  7199. ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
  7200. ring->ring_obj = NULL;
  7201. r600_ring_init(rdev, ring, 1024 * 1024);
  7202. r = radeon_doorbell_get(rdev, &ring->doorbell_page_num);
  7203. if (r)
  7204. return r;
  7205. ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
  7206. ring->ring_obj = NULL;
  7207. r600_ring_init(rdev, ring, 256 * 1024);
  7208. ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
  7209. ring->ring_obj = NULL;
  7210. r600_ring_init(rdev, ring, 256 * 1024);
  7211. r = radeon_uvd_init(rdev);
  7212. if (!r) {
  7213. ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
  7214. ring->ring_obj = NULL;
  7215. r600_ring_init(rdev, ring, 4096);
  7216. }
  7217. rdev->ih.ring_obj = NULL;
  7218. r600_ih_ring_init(rdev, 64 * 1024);
  7219. r = r600_pcie_gart_init(rdev);
  7220. if (r)
  7221. return r;
  7222. rdev->accel_working = true;
  7223. r = cik_startup(rdev);
  7224. if (r) {
  7225. dev_err(rdev->dev, "disabling GPU acceleration\n");
  7226. cik_cp_fini(rdev);
  7227. cik_sdma_fini(rdev);
  7228. cik_irq_fini(rdev);
  7229. sumo_rlc_fini(rdev);
  7230. cik_mec_fini(rdev);
  7231. radeon_wb_fini(rdev);
  7232. radeon_ib_pool_fini(rdev);
  7233. radeon_vm_manager_fini(rdev);
  7234. radeon_irq_kms_fini(rdev);
  7235. cik_pcie_gart_fini(rdev);
  7236. rdev->accel_working = false;
  7237. }
  7238. /* Don't start up if the MC ucode is missing.
  7239. * The default clocks and voltages before the MC ucode
  7240. * is loaded are not suffient for advanced operations.
  7241. */
  7242. if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) {
  7243. DRM_ERROR("radeon: MC ucode required for NI+.\n");
  7244. return -EINVAL;
  7245. }
  7246. return 0;
  7247. }
  7248. /**
  7249. * cik_fini - asic specific driver and hw fini
  7250. *
  7251. * @rdev: radeon_device pointer
  7252. *
  7253. * Tear down the asic specific driver variables and program the hw
  7254. * to an idle state (CIK).
  7255. * Called at driver unload.
  7256. */
  7257. void cik_fini(struct radeon_device *rdev)
  7258. {
  7259. cik_cp_fini(rdev);
  7260. cik_sdma_fini(rdev);
  7261. cik_fini_pg(rdev);
  7262. cik_fini_cg(rdev);
  7263. cik_irq_fini(rdev);
  7264. sumo_rlc_fini(rdev);
  7265. cik_mec_fini(rdev);
  7266. radeon_wb_fini(rdev);
  7267. radeon_vm_manager_fini(rdev);
  7268. radeon_ib_pool_fini(rdev);
  7269. radeon_irq_kms_fini(rdev);
  7270. uvd_v1_0_fini(rdev);
  7271. radeon_uvd_fini(rdev);
  7272. cik_pcie_gart_fini(rdev);
  7273. r600_vram_scratch_fini(rdev);
  7274. radeon_gem_fini(rdev);
  7275. radeon_fence_driver_fini(rdev);
  7276. radeon_bo_fini(rdev);
  7277. radeon_atombios_fini(rdev);
  7278. kfree(rdev->bios);
  7279. rdev->bios = NULL;
  7280. }
  7281. void dce8_program_fmt(struct drm_encoder *encoder)
  7282. {
  7283. struct drm_device *dev = encoder->dev;
  7284. struct radeon_device *rdev = dev->dev_private;
  7285. struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
  7286. struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
  7287. struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
  7288. int bpc = 0;
  7289. u32 tmp = 0;
  7290. enum radeon_connector_dither dither = RADEON_FMT_DITHER_DISABLE;
  7291. if (connector) {
  7292. struct radeon_connector *radeon_connector = to_radeon_connector(connector);
  7293. bpc = radeon_get_monitor_bpc(connector);
  7294. dither = radeon_connector->dither;
  7295. }
  7296. /* LVDS/eDP FMT is set up by atom */
  7297. if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
  7298. return;
  7299. /* not needed for analog */
  7300. if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
  7301. (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
  7302. return;
  7303. if (bpc == 0)
  7304. return;
  7305. switch (bpc) {
  7306. case 6:
  7307. if (dither == RADEON_FMT_DITHER_ENABLE)
  7308. /* XXX sort out optimal dither settings */
  7309. tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
  7310. FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH(0));
  7311. else
  7312. tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH(0));
  7313. break;
  7314. case 8:
  7315. if (dither == RADEON_FMT_DITHER_ENABLE)
  7316. /* XXX sort out optimal dither settings */
  7317. tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
  7318. FMT_RGB_RANDOM_ENABLE |
  7319. FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH(1));
  7320. else
  7321. tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH(1));
  7322. break;
  7323. case 10:
  7324. if (dither == RADEON_FMT_DITHER_ENABLE)
  7325. /* XXX sort out optimal dither settings */
  7326. tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
  7327. FMT_RGB_RANDOM_ENABLE |
  7328. FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH(2));
  7329. else
  7330. tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH(2));
  7331. break;
  7332. default:
  7333. /* not needed */
  7334. break;
  7335. }
  7336. WREG32(FMT_BIT_DEPTH_CONTROL + radeon_crtc->crtc_offset, tmp);
  7337. }
  7338. /* display watermark setup */
  7339. /**
  7340. * dce8_line_buffer_adjust - Set up the line buffer
  7341. *
  7342. * @rdev: radeon_device pointer
  7343. * @radeon_crtc: the selected display controller
  7344. * @mode: the current display mode on the selected display
  7345. * controller
  7346. *
  7347. * Setup up the line buffer allocation for
  7348. * the selected display controller (CIK).
  7349. * Returns the line buffer size in pixels.
  7350. */
  7351. static u32 dce8_line_buffer_adjust(struct radeon_device *rdev,
  7352. struct radeon_crtc *radeon_crtc,
  7353. struct drm_display_mode *mode)
  7354. {
  7355. u32 tmp, buffer_alloc, i;
  7356. u32 pipe_offset = radeon_crtc->crtc_id * 0x20;
  7357. /*
  7358. * Line Buffer Setup
  7359. * There are 6 line buffers, one for each display controllers.
  7360. * There are 3 partitions per LB. Select the number of partitions
  7361. * to enable based on the display width. For display widths larger
  7362. * than 4096, you need use to use 2 display controllers and combine
  7363. * them using the stereo blender.
  7364. */
  7365. if (radeon_crtc->base.enabled && mode) {
  7366. if (mode->crtc_hdisplay < 1920) {
  7367. tmp = 1;
  7368. buffer_alloc = 2;
  7369. } else if (mode->crtc_hdisplay < 2560) {
  7370. tmp = 2;
  7371. buffer_alloc = 2;
  7372. } else if (mode->crtc_hdisplay < 4096) {
  7373. tmp = 0;
  7374. buffer_alloc = (rdev->flags & RADEON_IS_IGP) ? 2 : 4;
  7375. } else {
  7376. DRM_DEBUG_KMS("Mode too big for LB!\n");
  7377. tmp = 0;
  7378. buffer_alloc = (rdev->flags & RADEON_IS_IGP) ? 2 : 4;
  7379. }
  7380. } else {
  7381. tmp = 1;
  7382. buffer_alloc = 0;
  7383. }
  7384. WREG32(LB_MEMORY_CTRL + radeon_crtc->crtc_offset,
  7385. LB_MEMORY_CONFIG(tmp) | LB_MEMORY_SIZE(0x6B0));
  7386. WREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
  7387. DMIF_BUFFERS_ALLOCATED(buffer_alloc));
  7388. for (i = 0; i < rdev->usec_timeout; i++) {
  7389. if (RREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
  7390. DMIF_BUFFERS_ALLOCATED_COMPLETED)
  7391. break;
  7392. udelay(1);
  7393. }
  7394. if (radeon_crtc->base.enabled && mode) {
  7395. switch (tmp) {
  7396. case 0:
  7397. default:
  7398. return 4096 * 2;
  7399. case 1:
  7400. return 1920 * 2;
  7401. case 2:
  7402. return 2560 * 2;
  7403. }
  7404. }
  7405. /* controller not enabled, so no lb used */
  7406. return 0;
  7407. }
  7408. /**
  7409. * cik_get_number_of_dram_channels - get the number of dram channels
  7410. *
  7411. * @rdev: radeon_device pointer
  7412. *
  7413. * Look up the number of video ram channels (CIK).
  7414. * Used for display watermark bandwidth calculations
  7415. * Returns the number of dram channels
  7416. */
  7417. static u32 cik_get_number_of_dram_channels(struct radeon_device *rdev)
  7418. {
  7419. u32 tmp = RREG32(MC_SHARED_CHMAP);
  7420. switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
  7421. case 0:
  7422. default:
  7423. return 1;
  7424. case 1:
  7425. return 2;
  7426. case 2:
  7427. return 4;
  7428. case 3:
  7429. return 8;
  7430. case 4:
  7431. return 3;
  7432. case 5:
  7433. return 6;
  7434. case 6:
  7435. return 10;
  7436. case 7:
  7437. return 12;
  7438. case 8:
  7439. return 16;
  7440. }
  7441. }
  7442. struct dce8_wm_params {
  7443. u32 dram_channels; /* number of dram channels */
  7444. u32 yclk; /* bandwidth per dram data pin in kHz */
  7445. u32 sclk; /* engine clock in kHz */
  7446. u32 disp_clk; /* display clock in kHz */
  7447. u32 src_width; /* viewport width */
  7448. u32 active_time; /* active display time in ns */
  7449. u32 blank_time; /* blank time in ns */
  7450. bool interlaced; /* mode is interlaced */
  7451. fixed20_12 vsc; /* vertical scale ratio */
  7452. u32 num_heads; /* number of active crtcs */
  7453. u32 bytes_per_pixel; /* bytes per pixel display + overlay */
  7454. u32 lb_size; /* line buffer allocated to pipe */
  7455. u32 vtaps; /* vertical scaler taps */
  7456. };
  7457. /**
  7458. * dce8_dram_bandwidth - get the dram bandwidth
  7459. *
  7460. * @wm: watermark calculation data
  7461. *
  7462. * Calculate the raw dram bandwidth (CIK).
  7463. * Used for display watermark bandwidth calculations
  7464. * Returns the dram bandwidth in MBytes/s
  7465. */
  7466. static u32 dce8_dram_bandwidth(struct dce8_wm_params *wm)
  7467. {
  7468. /* Calculate raw DRAM Bandwidth */
  7469. fixed20_12 dram_efficiency; /* 0.7 */
  7470. fixed20_12 yclk, dram_channels, bandwidth;
  7471. fixed20_12 a;
  7472. a.full = dfixed_const(1000);
  7473. yclk.full = dfixed_const(wm->yclk);
  7474. yclk.full = dfixed_div(yclk, a);
  7475. dram_channels.full = dfixed_const(wm->dram_channels * 4);
  7476. a.full = dfixed_const(10);
  7477. dram_efficiency.full = dfixed_const(7);
  7478. dram_efficiency.full = dfixed_div(dram_efficiency, a);
  7479. bandwidth.full = dfixed_mul(dram_channels, yclk);
  7480. bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
  7481. return dfixed_trunc(bandwidth);
  7482. }
  7483. /**
  7484. * dce8_dram_bandwidth_for_display - get the dram bandwidth for display
  7485. *
  7486. * @wm: watermark calculation data
  7487. *
  7488. * Calculate the dram bandwidth used for display (CIK).
  7489. * Used for display watermark bandwidth calculations
  7490. * Returns the dram bandwidth for display in MBytes/s
  7491. */
  7492. static u32 dce8_dram_bandwidth_for_display(struct dce8_wm_params *wm)
  7493. {
  7494. /* Calculate DRAM Bandwidth and the part allocated to display. */
  7495. fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
  7496. fixed20_12 yclk, dram_channels, bandwidth;
  7497. fixed20_12 a;
  7498. a.full = dfixed_const(1000);
  7499. yclk.full = dfixed_const(wm->yclk);
  7500. yclk.full = dfixed_div(yclk, a);
  7501. dram_channels.full = dfixed_const(wm->dram_channels * 4);
  7502. a.full = dfixed_const(10);
  7503. disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
  7504. disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
  7505. bandwidth.full = dfixed_mul(dram_channels, yclk);
  7506. bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
  7507. return dfixed_trunc(bandwidth);
  7508. }
  7509. /**
  7510. * dce8_data_return_bandwidth - get the data return bandwidth
  7511. *
  7512. * @wm: watermark calculation data
  7513. *
  7514. * Calculate the data return bandwidth used for display (CIK).
  7515. * Used for display watermark bandwidth calculations
  7516. * Returns the data return bandwidth in MBytes/s
  7517. */
  7518. static u32 dce8_data_return_bandwidth(struct dce8_wm_params *wm)
  7519. {
  7520. /* Calculate the display Data return Bandwidth */
  7521. fixed20_12 return_efficiency; /* 0.8 */
  7522. fixed20_12 sclk, bandwidth;
  7523. fixed20_12 a;
  7524. a.full = dfixed_const(1000);
  7525. sclk.full = dfixed_const(wm->sclk);
  7526. sclk.full = dfixed_div(sclk, a);
  7527. a.full = dfixed_const(10);
  7528. return_efficiency.full = dfixed_const(8);
  7529. return_efficiency.full = dfixed_div(return_efficiency, a);
  7530. a.full = dfixed_const(32);
  7531. bandwidth.full = dfixed_mul(a, sclk);
  7532. bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
  7533. return dfixed_trunc(bandwidth);
  7534. }
  7535. /**
  7536. * dce8_dmif_request_bandwidth - get the dmif bandwidth
  7537. *
  7538. * @wm: watermark calculation data
  7539. *
  7540. * Calculate the dmif bandwidth used for display (CIK).
  7541. * Used for display watermark bandwidth calculations
  7542. * Returns the dmif bandwidth in MBytes/s
  7543. */
  7544. static u32 dce8_dmif_request_bandwidth(struct dce8_wm_params *wm)
  7545. {
  7546. /* Calculate the DMIF Request Bandwidth */
  7547. fixed20_12 disp_clk_request_efficiency; /* 0.8 */
  7548. fixed20_12 disp_clk, bandwidth;
  7549. fixed20_12 a, b;
  7550. a.full = dfixed_const(1000);
  7551. disp_clk.full = dfixed_const(wm->disp_clk);
  7552. disp_clk.full = dfixed_div(disp_clk, a);
  7553. a.full = dfixed_const(32);
  7554. b.full = dfixed_mul(a, disp_clk);
  7555. a.full = dfixed_const(10);
  7556. disp_clk_request_efficiency.full = dfixed_const(8);
  7557. disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
  7558. bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency);
  7559. return dfixed_trunc(bandwidth);
  7560. }
  7561. /**
  7562. * dce8_available_bandwidth - get the min available bandwidth
  7563. *
  7564. * @wm: watermark calculation data
  7565. *
  7566. * Calculate the min available bandwidth used for display (CIK).
  7567. * Used for display watermark bandwidth calculations
  7568. * Returns the min available bandwidth in MBytes/s
  7569. */
  7570. static u32 dce8_available_bandwidth(struct dce8_wm_params *wm)
  7571. {
  7572. /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
  7573. u32 dram_bandwidth = dce8_dram_bandwidth(wm);
  7574. u32 data_return_bandwidth = dce8_data_return_bandwidth(wm);
  7575. u32 dmif_req_bandwidth = dce8_dmif_request_bandwidth(wm);
  7576. return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
  7577. }
  7578. /**
  7579. * dce8_average_bandwidth - get the average available bandwidth
  7580. *
  7581. * @wm: watermark calculation data
  7582. *
  7583. * Calculate the average available bandwidth used for display (CIK).
  7584. * Used for display watermark bandwidth calculations
  7585. * Returns the average available bandwidth in MBytes/s
  7586. */
  7587. static u32 dce8_average_bandwidth(struct dce8_wm_params *wm)
  7588. {
  7589. /* Calculate the display mode Average Bandwidth
  7590. * DisplayMode should contain the source and destination dimensions,
  7591. * timing, etc.
  7592. */
  7593. fixed20_12 bpp;
  7594. fixed20_12 line_time;
  7595. fixed20_12 src_width;
  7596. fixed20_12 bandwidth;
  7597. fixed20_12 a;
  7598. a.full = dfixed_const(1000);
  7599. line_time.full = dfixed_const(wm->active_time + wm->blank_time);
  7600. line_time.full = dfixed_div(line_time, a);
  7601. bpp.full = dfixed_const(wm->bytes_per_pixel);
  7602. src_width.full = dfixed_const(wm->src_width);
  7603. bandwidth.full = dfixed_mul(src_width, bpp);
  7604. bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
  7605. bandwidth.full = dfixed_div(bandwidth, line_time);
  7606. return dfixed_trunc(bandwidth);
  7607. }
  7608. /**
  7609. * dce8_latency_watermark - get the latency watermark
  7610. *
  7611. * @wm: watermark calculation data
  7612. *
  7613. * Calculate the latency watermark (CIK).
  7614. * Used for display watermark bandwidth calculations
  7615. * Returns the latency watermark in ns
  7616. */
  7617. static u32 dce8_latency_watermark(struct dce8_wm_params *wm)
  7618. {
  7619. /* First calculate the latency in ns */
  7620. u32 mc_latency = 2000; /* 2000 ns. */
  7621. u32 available_bandwidth = dce8_available_bandwidth(wm);
  7622. u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
  7623. u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
  7624. u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
  7625. u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
  7626. (wm->num_heads * cursor_line_pair_return_time);
  7627. u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
  7628. u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
  7629. u32 tmp, dmif_size = 12288;
  7630. fixed20_12 a, b, c;
  7631. if (wm->num_heads == 0)
  7632. return 0;
  7633. a.full = dfixed_const(2);
  7634. b.full = dfixed_const(1);
  7635. if ((wm->vsc.full > a.full) ||
  7636. ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
  7637. (wm->vtaps >= 5) ||
  7638. ((wm->vsc.full >= a.full) && wm->interlaced))
  7639. max_src_lines_per_dst_line = 4;
  7640. else
  7641. max_src_lines_per_dst_line = 2;
  7642. a.full = dfixed_const(available_bandwidth);
  7643. b.full = dfixed_const(wm->num_heads);
  7644. a.full = dfixed_div(a, b);
  7645. b.full = dfixed_const(mc_latency + 512);
  7646. c.full = dfixed_const(wm->disp_clk);
  7647. b.full = dfixed_div(b, c);
  7648. c.full = dfixed_const(dmif_size);
  7649. b.full = dfixed_div(c, b);
  7650. tmp = min(dfixed_trunc(a), dfixed_trunc(b));
  7651. b.full = dfixed_const(1000);
  7652. c.full = dfixed_const(wm->disp_clk);
  7653. b.full = dfixed_div(c, b);
  7654. c.full = dfixed_const(wm->bytes_per_pixel);
  7655. b.full = dfixed_mul(b, c);
  7656. lb_fill_bw = min(tmp, dfixed_trunc(b));
  7657. a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
  7658. b.full = dfixed_const(1000);
  7659. c.full = dfixed_const(lb_fill_bw);
  7660. b.full = dfixed_div(c, b);
  7661. a.full = dfixed_div(a, b);
  7662. line_fill_time = dfixed_trunc(a);
  7663. if (line_fill_time < wm->active_time)
  7664. return latency;
  7665. else
  7666. return latency + (line_fill_time - wm->active_time);
  7667. }
  7668. /**
  7669. * dce8_average_bandwidth_vs_dram_bandwidth_for_display - check
  7670. * average and available dram bandwidth
  7671. *
  7672. * @wm: watermark calculation data
  7673. *
  7674. * Check if the display average bandwidth fits in the display
  7675. * dram bandwidth (CIK).
  7676. * Used for display watermark bandwidth calculations
  7677. * Returns true if the display fits, false if not.
  7678. */
  7679. static bool dce8_average_bandwidth_vs_dram_bandwidth_for_display(struct dce8_wm_params *wm)
  7680. {
  7681. if (dce8_average_bandwidth(wm) <=
  7682. (dce8_dram_bandwidth_for_display(wm) / wm->num_heads))
  7683. return true;
  7684. else
  7685. return false;
  7686. }
  7687. /**
  7688. * dce8_average_bandwidth_vs_available_bandwidth - check
  7689. * average and available bandwidth
  7690. *
  7691. * @wm: watermark calculation data
  7692. *
  7693. * Check if the display average bandwidth fits in the display
  7694. * available bandwidth (CIK).
  7695. * Used for display watermark bandwidth calculations
  7696. * Returns true if the display fits, false if not.
  7697. */
  7698. static bool dce8_average_bandwidth_vs_available_bandwidth(struct dce8_wm_params *wm)
  7699. {
  7700. if (dce8_average_bandwidth(wm) <=
  7701. (dce8_available_bandwidth(wm) / wm->num_heads))
  7702. return true;
  7703. else
  7704. return false;
  7705. }
  7706. /**
  7707. * dce8_check_latency_hiding - check latency hiding
  7708. *
  7709. * @wm: watermark calculation data
  7710. *
  7711. * Check latency hiding (CIK).
  7712. * Used for display watermark bandwidth calculations
  7713. * Returns true if the display fits, false if not.
  7714. */
  7715. static bool dce8_check_latency_hiding(struct dce8_wm_params *wm)
  7716. {
  7717. u32 lb_partitions = wm->lb_size / wm->src_width;
  7718. u32 line_time = wm->active_time + wm->blank_time;
  7719. u32 latency_tolerant_lines;
  7720. u32 latency_hiding;
  7721. fixed20_12 a;
  7722. a.full = dfixed_const(1);
  7723. if (wm->vsc.full > a.full)
  7724. latency_tolerant_lines = 1;
  7725. else {
  7726. if (lb_partitions <= (wm->vtaps + 1))
  7727. latency_tolerant_lines = 1;
  7728. else
  7729. latency_tolerant_lines = 2;
  7730. }
  7731. latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
  7732. if (dce8_latency_watermark(wm) <= latency_hiding)
  7733. return true;
  7734. else
  7735. return false;
  7736. }
  7737. /**
  7738. * dce8_program_watermarks - program display watermarks
  7739. *
  7740. * @rdev: radeon_device pointer
  7741. * @radeon_crtc: the selected display controller
  7742. * @lb_size: line buffer size
  7743. * @num_heads: number of display controllers in use
  7744. *
  7745. * Calculate and program the display watermarks for the
  7746. * selected display controller (CIK).
  7747. */
  7748. static void dce8_program_watermarks(struct radeon_device *rdev,
  7749. struct radeon_crtc *radeon_crtc,
  7750. u32 lb_size, u32 num_heads)
  7751. {
  7752. struct drm_display_mode *mode = &radeon_crtc->base.mode;
  7753. struct dce8_wm_params wm_low, wm_high;
  7754. u32 pixel_period;
  7755. u32 line_time = 0;
  7756. u32 latency_watermark_a = 0, latency_watermark_b = 0;
  7757. u32 tmp, wm_mask;
  7758. if (radeon_crtc->base.enabled && num_heads && mode) {
  7759. pixel_period = 1000000 / (u32)mode->clock;
  7760. line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
  7761. /* watermark for high clocks */
  7762. if ((rdev->pm.pm_method == PM_METHOD_DPM) &&
  7763. rdev->pm.dpm_enabled) {
  7764. wm_high.yclk =
  7765. radeon_dpm_get_mclk(rdev, false) * 10;
  7766. wm_high.sclk =
  7767. radeon_dpm_get_sclk(rdev, false) * 10;
  7768. } else {
  7769. wm_high.yclk = rdev->pm.current_mclk * 10;
  7770. wm_high.sclk = rdev->pm.current_sclk * 10;
  7771. }
  7772. wm_high.disp_clk = mode->clock;
  7773. wm_high.src_width = mode->crtc_hdisplay;
  7774. wm_high.active_time = mode->crtc_hdisplay * pixel_period;
  7775. wm_high.blank_time = line_time - wm_high.active_time;
  7776. wm_high.interlaced = false;
  7777. if (mode->flags & DRM_MODE_FLAG_INTERLACE)
  7778. wm_high.interlaced = true;
  7779. wm_high.vsc = radeon_crtc->vsc;
  7780. wm_high.vtaps = 1;
  7781. if (radeon_crtc->rmx_type != RMX_OFF)
  7782. wm_high.vtaps = 2;
  7783. wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
  7784. wm_high.lb_size = lb_size;
  7785. wm_high.dram_channels = cik_get_number_of_dram_channels(rdev);
  7786. wm_high.num_heads = num_heads;
  7787. /* set for high clocks */
  7788. latency_watermark_a = min(dce8_latency_watermark(&wm_high), (u32)65535);
  7789. /* possibly force display priority to high */
  7790. /* should really do this at mode validation time... */
  7791. if (!dce8_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
  7792. !dce8_average_bandwidth_vs_available_bandwidth(&wm_high) ||
  7793. !dce8_check_latency_hiding(&wm_high) ||
  7794. (rdev->disp_priority == 2)) {
  7795. DRM_DEBUG_KMS("force priority to high\n");
  7796. }
  7797. /* watermark for low clocks */
  7798. if ((rdev->pm.pm_method == PM_METHOD_DPM) &&
  7799. rdev->pm.dpm_enabled) {
  7800. wm_low.yclk =
  7801. radeon_dpm_get_mclk(rdev, true) * 10;
  7802. wm_low.sclk =
  7803. radeon_dpm_get_sclk(rdev, true) * 10;
  7804. } else {
  7805. wm_low.yclk = rdev->pm.current_mclk * 10;
  7806. wm_low.sclk = rdev->pm.current_sclk * 10;
  7807. }
  7808. wm_low.disp_clk = mode->clock;
  7809. wm_low.src_width = mode->crtc_hdisplay;
  7810. wm_low.active_time = mode->crtc_hdisplay * pixel_period;
  7811. wm_low.blank_time = line_time - wm_low.active_time;
  7812. wm_low.interlaced = false;
  7813. if (mode->flags & DRM_MODE_FLAG_INTERLACE)
  7814. wm_low.interlaced = true;
  7815. wm_low.vsc = radeon_crtc->vsc;
  7816. wm_low.vtaps = 1;
  7817. if (radeon_crtc->rmx_type != RMX_OFF)
  7818. wm_low.vtaps = 2;
  7819. wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
  7820. wm_low.lb_size = lb_size;
  7821. wm_low.dram_channels = cik_get_number_of_dram_channels(rdev);
  7822. wm_low.num_heads = num_heads;
  7823. /* set for low clocks */
  7824. latency_watermark_b = min(dce8_latency_watermark(&wm_low), (u32)65535);
  7825. /* possibly force display priority to high */
  7826. /* should really do this at mode validation time... */
  7827. if (!dce8_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
  7828. !dce8_average_bandwidth_vs_available_bandwidth(&wm_low) ||
  7829. !dce8_check_latency_hiding(&wm_low) ||
  7830. (rdev->disp_priority == 2)) {
  7831. DRM_DEBUG_KMS("force priority to high\n");
  7832. }
  7833. }
  7834. /* select wm A */
  7835. wm_mask = RREG32(DPG_WATERMARK_MASK_CONTROL + radeon_crtc->crtc_offset);
  7836. tmp = wm_mask;
  7837. tmp &= ~LATENCY_WATERMARK_MASK(3);
  7838. tmp |= LATENCY_WATERMARK_MASK(1);
  7839. WREG32(DPG_WATERMARK_MASK_CONTROL + radeon_crtc->crtc_offset, tmp);
  7840. WREG32(DPG_PIPE_LATENCY_CONTROL + radeon_crtc->crtc_offset,
  7841. (LATENCY_LOW_WATERMARK(latency_watermark_a) |
  7842. LATENCY_HIGH_WATERMARK(line_time)));
  7843. /* select wm B */
  7844. tmp = RREG32(DPG_WATERMARK_MASK_CONTROL + radeon_crtc->crtc_offset);
  7845. tmp &= ~LATENCY_WATERMARK_MASK(3);
  7846. tmp |= LATENCY_WATERMARK_MASK(2);
  7847. WREG32(DPG_WATERMARK_MASK_CONTROL + radeon_crtc->crtc_offset, tmp);
  7848. WREG32(DPG_PIPE_LATENCY_CONTROL + radeon_crtc->crtc_offset,
  7849. (LATENCY_LOW_WATERMARK(latency_watermark_b) |
  7850. LATENCY_HIGH_WATERMARK(line_time)));
  7851. /* restore original selection */
  7852. WREG32(DPG_WATERMARK_MASK_CONTROL + radeon_crtc->crtc_offset, wm_mask);
  7853. /* save values for DPM */
  7854. radeon_crtc->line_time = line_time;
  7855. radeon_crtc->wm_high = latency_watermark_a;
  7856. radeon_crtc->wm_low = latency_watermark_b;
  7857. }
  7858. /**
  7859. * dce8_bandwidth_update - program display watermarks
  7860. *
  7861. * @rdev: radeon_device pointer
  7862. *
  7863. * Calculate and program the display watermarks and line
  7864. * buffer allocation (CIK).
  7865. */
  7866. void dce8_bandwidth_update(struct radeon_device *rdev)
  7867. {
  7868. struct drm_display_mode *mode = NULL;
  7869. u32 num_heads = 0, lb_size;
  7870. int i;
  7871. radeon_update_display_priority(rdev);
  7872. for (i = 0; i < rdev->num_crtc; i++) {
  7873. if (rdev->mode_info.crtcs[i]->base.enabled)
  7874. num_heads++;
  7875. }
  7876. for (i = 0; i < rdev->num_crtc; i++) {
  7877. mode = &rdev->mode_info.crtcs[i]->base.mode;
  7878. lb_size = dce8_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i], mode);
  7879. dce8_program_watermarks(rdev, rdev->mode_info.crtcs[i], lb_size, num_heads);
  7880. }
  7881. }
  7882. /**
  7883. * cik_get_gpu_clock_counter - return GPU clock counter snapshot
  7884. *
  7885. * @rdev: radeon_device pointer
  7886. *
  7887. * Fetches a GPU clock counter snapshot (SI).
  7888. * Returns the 64 bit clock counter snapshot.
  7889. */
  7890. uint64_t cik_get_gpu_clock_counter(struct radeon_device *rdev)
  7891. {
  7892. uint64_t clock;
  7893. mutex_lock(&rdev->gpu_clock_mutex);
  7894. WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1);
  7895. clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) |
  7896. ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
  7897. mutex_unlock(&rdev->gpu_clock_mutex);
  7898. return clock;
  7899. }
  7900. static int cik_set_uvd_clock(struct radeon_device *rdev, u32 clock,
  7901. u32 cntl_reg, u32 status_reg)
  7902. {
  7903. int r, i;
  7904. struct atom_clock_dividers dividers;
  7905. uint32_t tmp;
  7906. r = radeon_atom_get_clock_dividers(rdev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
  7907. clock, false, &dividers);
  7908. if (r)
  7909. return r;
  7910. tmp = RREG32_SMC(cntl_reg);
  7911. tmp &= ~(DCLK_DIR_CNTL_EN|DCLK_DIVIDER_MASK);
  7912. tmp |= dividers.post_divider;
  7913. WREG32_SMC(cntl_reg, tmp);
  7914. for (i = 0; i < 100; i++) {
  7915. if (RREG32_SMC(status_reg) & DCLK_STATUS)
  7916. break;
  7917. mdelay(10);
  7918. }
  7919. if (i == 100)
  7920. return -ETIMEDOUT;
  7921. return 0;
  7922. }
  7923. int cik_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
  7924. {
  7925. int r = 0;
  7926. r = cik_set_uvd_clock(rdev, vclk, CG_VCLK_CNTL, CG_VCLK_STATUS);
  7927. if (r)
  7928. return r;
  7929. r = cik_set_uvd_clock(rdev, dclk, CG_DCLK_CNTL, CG_DCLK_STATUS);
  7930. return r;
  7931. }
  7932. static void cik_pcie_gen3_enable(struct radeon_device *rdev)
  7933. {
  7934. struct pci_dev *root = rdev->pdev->bus->self;
  7935. int bridge_pos, gpu_pos;
  7936. u32 speed_cntl, mask, current_data_rate;
  7937. int ret, i;
  7938. u16 tmp16;
  7939. if (radeon_pcie_gen2 == 0)
  7940. return;
  7941. if (rdev->flags & RADEON_IS_IGP)
  7942. return;
  7943. if (!(rdev->flags & RADEON_IS_PCIE))
  7944. return;
  7945. ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
  7946. if (ret != 0)
  7947. return;
  7948. if (!(mask & (DRM_PCIE_SPEED_50 | DRM_PCIE_SPEED_80)))
  7949. return;
  7950. speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
  7951. current_data_rate = (speed_cntl & LC_CURRENT_DATA_RATE_MASK) >>
  7952. LC_CURRENT_DATA_RATE_SHIFT;
  7953. if (mask & DRM_PCIE_SPEED_80) {
  7954. if (current_data_rate == 2) {
  7955. DRM_INFO("PCIE gen 3 link speeds already enabled\n");
  7956. return;
  7957. }
  7958. DRM_INFO("enabling PCIE gen 3 link speeds, disable with radeon.pcie_gen2=0\n");
  7959. } else if (mask & DRM_PCIE_SPEED_50) {
  7960. if (current_data_rate == 1) {
  7961. DRM_INFO("PCIE gen 2 link speeds already enabled\n");
  7962. return;
  7963. }
  7964. DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
  7965. }
  7966. bridge_pos = pci_pcie_cap(root);
  7967. if (!bridge_pos)
  7968. return;
  7969. gpu_pos = pci_pcie_cap(rdev->pdev);
  7970. if (!gpu_pos)
  7971. return;
  7972. if (mask & DRM_PCIE_SPEED_80) {
  7973. /* re-try equalization if gen3 is not already enabled */
  7974. if (current_data_rate != 2) {
  7975. u16 bridge_cfg, gpu_cfg;
  7976. u16 bridge_cfg2, gpu_cfg2;
  7977. u32 max_lw, current_lw, tmp;
  7978. pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
  7979. pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
  7980. tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD;
  7981. pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
  7982. tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD;
  7983. pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
  7984. tmp = RREG32_PCIE_PORT(PCIE_LC_STATUS1);
  7985. max_lw = (tmp & LC_DETECTED_LINK_WIDTH_MASK) >> LC_DETECTED_LINK_WIDTH_SHIFT;
  7986. current_lw = (tmp & LC_OPERATING_LINK_WIDTH_MASK) >> LC_OPERATING_LINK_WIDTH_SHIFT;
  7987. if (current_lw < max_lw) {
  7988. tmp = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
  7989. if (tmp & LC_RENEGOTIATION_SUPPORT) {
  7990. tmp &= ~(LC_LINK_WIDTH_MASK | LC_UPCONFIGURE_DIS);
  7991. tmp |= (max_lw << LC_LINK_WIDTH_SHIFT);
  7992. tmp |= LC_UPCONFIGURE_SUPPORT | LC_RENEGOTIATE_EN | LC_RECONFIG_NOW;
  7993. WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, tmp);
  7994. }
  7995. }
  7996. for (i = 0; i < 10; i++) {
  7997. /* check status */
  7998. pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_DEVSTA, &tmp16);
  7999. if (tmp16 & PCI_EXP_DEVSTA_TRPND)
  8000. break;
  8001. pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
  8002. pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
  8003. pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &bridge_cfg2);
  8004. pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &gpu_cfg2);
  8005. tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
  8006. tmp |= LC_SET_QUIESCE;
  8007. WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
  8008. tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
  8009. tmp |= LC_REDO_EQ;
  8010. WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
  8011. mdelay(100);
  8012. /* linkctl */
  8013. pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &tmp16);
  8014. tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
  8015. tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD);
  8016. pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
  8017. pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &tmp16);
  8018. tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
  8019. tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD);
  8020. pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
  8021. /* linkctl2 */
  8022. pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &tmp16);
  8023. tmp16 &= ~((1 << 4) | (7 << 9));
  8024. tmp16 |= (bridge_cfg2 & ((1 << 4) | (7 << 9)));
  8025. pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, tmp16);
  8026. pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
  8027. tmp16 &= ~((1 << 4) | (7 << 9));
  8028. tmp16 |= (gpu_cfg2 & ((1 << 4) | (7 << 9)));
  8029. pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
  8030. tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
  8031. tmp &= ~LC_SET_QUIESCE;
  8032. WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
  8033. }
  8034. }
  8035. }
  8036. /* set the link speed */
  8037. speed_cntl |= LC_FORCE_EN_SW_SPEED_CHANGE | LC_FORCE_DIS_HW_SPEED_CHANGE;
  8038. speed_cntl &= ~LC_FORCE_DIS_SW_SPEED_CHANGE;
  8039. WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
  8040. pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
  8041. tmp16 &= ~0xf;
  8042. if (mask & DRM_PCIE_SPEED_80)
  8043. tmp16 |= 3; /* gen3 */
  8044. else if (mask & DRM_PCIE_SPEED_50)
  8045. tmp16 |= 2; /* gen2 */
  8046. else
  8047. tmp16 |= 1; /* gen1 */
  8048. pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
  8049. speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
  8050. speed_cntl |= LC_INITIATE_LINK_SPEED_CHANGE;
  8051. WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
  8052. for (i = 0; i < rdev->usec_timeout; i++) {
  8053. speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
  8054. if ((speed_cntl & LC_INITIATE_LINK_SPEED_CHANGE) == 0)
  8055. break;
  8056. udelay(1);
  8057. }
  8058. }
  8059. static void cik_program_aspm(struct radeon_device *rdev)
  8060. {
  8061. u32 data, orig;
  8062. bool disable_l0s = false, disable_l1 = false, disable_plloff_in_l1 = false;
  8063. bool disable_clkreq = false;
  8064. if (radeon_aspm == 0)
  8065. return;
  8066. /* XXX double check IGPs */
  8067. if (rdev->flags & RADEON_IS_IGP)
  8068. return;
  8069. if (!(rdev->flags & RADEON_IS_PCIE))
  8070. return;
  8071. orig = data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL);
  8072. data &= ~LC_XMIT_N_FTS_MASK;
  8073. data |= LC_XMIT_N_FTS(0x24) | LC_XMIT_N_FTS_OVERRIDE_EN;
  8074. if (orig != data)
  8075. WREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL, data);
  8076. orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL3);
  8077. data |= LC_GO_TO_RECOVERY;
  8078. if (orig != data)
  8079. WREG32_PCIE_PORT(PCIE_LC_CNTL3, data);
  8080. orig = data = RREG32_PCIE_PORT(PCIE_P_CNTL);
  8081. data |= P_IGNORE_EDB_ERR;
  8082. if (orig != data)
  8083. WREG32_PCIE_PORT(PCIE_P_CNTL, data);
  8084. orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL);
  8085. data &= ~(LC_L0S_INACTIVITY_MASK | LC_L1_INACTIVITY_MASK);
  8086. data |= LC_PMI_TO_L1_DIS;
  8087. if (!disable_l0s)
  8088. data |= LC_L0S_INACTIVITY(7);
  8089. if (!disable_l1) {
  8090. data |= LC_L1_INACTIVITY(7);
  8091. data &= ~LC_PMI_TO_L1_DIS;
  8092. if (orig != data)
  8093. WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
  8094. if (!disable_plloff_in_l1) {
  8095. bool clk_req_support;
  8096. orig = data = RREG32_PCIE_PORT(PB0_PIF_PWRDOWN_0);
  8097. data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
  8098. data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
  8099. if (orig != data)
  8100. WREG32_PCIE_PORT(PB0_PIF_PWRDOWN_0, data);
  8101. orig = data = RREG32_PCIE_PORT(PB0_PIF_PWRDOWN_1);
  8102. data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
  8103. data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
  8104. if (orig != data)
  8105. WREG32_PCIE_PORT(PB0_PIF_PWRDOWN_1, data);
  8106. orig = data = RREG32_PCIE_PORT(PB1_PIF_PWRDOWN_0);
  8107. data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
  8108. data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
  8109. if (orig != data)
  8110. WREG32_PCIE_PORT(PB1_PIF_PWRDOWN_0, data);
  8111. orig = data = RREG32_PCIE_PORT(PB1_PIF_PWRDOWN_1);
  8112. data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
  8113. data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
  8114. if (orig != data)
  8115. WREG32_PCIE_PORT(PB1_PIF_PWRDOWN_1, data);
  8116. orig = data = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
  8117. data &= ~LC_DYN_LANES_PWR_STATE_MASK;
  8118. data |= LC_DYN_LANES_PWR_STATE(3);
  8119. if (orig != data)
  8120. WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, data);
  8121. if (!disable_clkreq) {
  8122. struct pci_dev *root = rdev->pdev->bus->self;
  8123. u32 lnkcap;
  8124. clk_req_support = false;
  8125. pcie_capability_read_dword(root, PCI_EXP_LNKCAP, &lnkcap);
  8126. if (lnkcap & PCI_EXP_LNKCAP_CLKPM)
  8127. clk_req_support = true;
  8128. } else {
  8129. clk_req_support = false;
  8130. }
  8131. if (clk_req_support) {
  8132. orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL2);
  8133. data |= LC_ALLOW_PDWN_IN_L1 | LC_ALLOW_PDWN_IN_L23;
  8134. if (orig != data)
  8135. WREG32_PCIE_PORT(PCIE_LC_CNTL2, data);
  8136. orig = data = RREG32_SMC(THM_CLK_CNTL);
  8137. data &= ~(CMON_CLK_SEL_MASK | TMON_CLK_SEL_MASK);
  8138. data |= CMON_CLK_SEL(1) | TMON_CLK_SEL(1);
  8139. if (orig != data)
  8140. WREG32_SMC(THM_CLK_CNTL, data);
  8141. orig = data = RREG32_SMC(MISC_CLK_CTRL);
  8142. data &= ~(DEEP_SLEEP_CLK_SEL_MASK | ZCLK_SEL_MASK);
  8143. data |= DEEP_SLEEP_CLK_SEL(1) | ZCLK_SEL(1);
  8144. if (orig != data)
  8145. WREG32_SMC(MISC_CLK_CTRL, data);
  8146. orig = data = RREG32_SMC(CG_CLKPIN_CNTL);
  8147. data &= ~BCLK_AS_XCLK;
  8148. if (orig != data)
  8149. WREG32_SMC(CG_CLKPIN_CNTL, data);
  8150. orig = data = RREG32_SMC(CG_CLKPIN_CNTL_2);
  8151. data &= ~FORCE_BIF_REFCLK_EN;
  8152. if (orig != data)
  8153. WREG32_SMC(CG_CLKPIN_CNTL_2, data);
  8154. orig = data = RREG32_SMC(MPLL_BYPASSCLK_SEL);
  8155. data &= ~MPLL_CLKOUT_SEL_MASK;
  8156. data |= MPLL_CLKOUT_SEL(4);
  8157. if (orig != data)
  8158. WREG32_SMC(MPLL_BYPASSCLK_SEL, data);
  8159. }
  8160. }
  8161. } else {
  8162. if (orig != data)
  8163. WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
  8164. }
  8165. orig = data = RREG32_PCIE_PORT(PCIE_CNTL2);
  8166. data |= SLV_MEM_LS_EN | MST_MEM_LS_EN | REPLAY_MEM_LS_EN;
  8167. if (orig != data)
  8168. WREG32_PCIE_PORT(PCIE_CNTL2, data);
  8169. if (!disable_l0s) {
  8170. data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL);
  8171. if((data & LC_N_FTS_MASK) == LC_N_FTS_MASK) {
  8172. data = RREG32_PCIE_PORT(PCIE_LC_STATUS1);
  8173. if ((data & LC_REVERSE_XMIT) && (data & LC_REVERSE_RCVR)) {
  8174. orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL);
  8175. data &= ~LC_L0S_INACTIVITY_MASK;
  8176. if (orig != data)
  8177. WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
  8178. }
  8179. }
  8180. }
  8181. }