ni_dpm.c 129 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332
  1. /*
  2. * Copyright 2012 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. */
  23. #include "drmP.h"
  24. #include "radeon.h"
  25. #include "nid.h"
  26. #include "r600_dpm.h"
  27. #include "ni_dpm.h"
  28. #include "atom.h"
  29. #include <linux/math64.h>
  30. #include <linux/seq_file.h>
  31. #define MC_CG_ARB_FREQ_F0 0x0a
  32. #define MC_CG_ARB_FREQ_F1 0x0b
  33. #define MC_CG_ARB_FREQ_F2 0x0c
  34. #define MC_CG_ARB_FREQ_F3 0x0d
  35. #define SMC_RAM_END 0xC000
  36. static const struct ni_cac_weights cac_weights_cayman_xt =
  37. {
  38. 0x15,
  39. 0x2,
  40. 0x19,
  41. 0x2,
  42. 0x8,
  43. 0x14,
  44. 0x2,
  45. 0x16,
  46. 0xE,
  47. 0x17,
  48. 0x13,
  49. 0x2B,
  50. 0x10,
  51. 0x7,
  52. 0x5,
  53. 0x5,
  54. 0x5,
  55. 0x2,
  56. 0x3,
  57. 0x9,
  58. 0x10,
  59. 0x10,
  60. 0x2B,
  61. 0xA,
  62. 0x9,
  63. 0x4,
  64. 0xD,
  65. 0xD,
  66. 0x3E,
  67. 0x18,
  68. 0x14,
  69. 0,
  70. 0x3,
  71. 0x3,
  72. 0x5,
  73. 0,
  74. 0x2,
  75. 0,
  76. 0,
  77. 0,
  78. 0,
  79. 0,
  80. 0,
  81. 0,
  82. 0,
  83. 0,
  84. 0x1CC,
  85. 0,
  86. 0x164,
  87. 1,
  88. 1,
  89. 1,
  90. 1,
  91. 12,
  92. 12,
  93. 12,
  94. 0x12,
  95. 0x1F,
  96. 132,
  97. 5,
  98. 7,
  99. 0,
  100. { 0, 0, 0, 0, 0, 0, 0, 0 },
  101. { 0, 0, 0, 0 },
  102. true
  103. };
  104. static const struct ni_cac_weights cac_weights_cayman_pro =
  105. {
  106. 0x16,
  107. 0x4,
  108. 0x10,
  109. 0x2,
  110. 0xA,
  111. 0x16,
  112. 0x2,
  113. 0x18,
  114. 0x10,
  115. 0x1A,
  116. 0x16,
  117. 0x2D,
  118. 0x12,
  119. 0xA,
  120. 0x6,
  121. 0x6,
  122. 0x6,
  123. 0x2,
  124. 0x4,
  125. 0xB,
  126. 0x11,
  127. 0x11,
  128. 0x2D,
  129. 0xC,
  130. 0xC,
  131. 0x7,
  132. 0x10,
  133. 0x10,
  134. 0x3F,
  135. 0x1A,
  136. 0x16,
  137. 0,
  138. 0x7,
  139. 0x4,
  140. 0x6,
  141. 1,
  142. 0x2,
  143. 0x1,
  144. 0,
  145. 0,
  146. 0,
  147. 0,
  148. 0,
  149. 0,
  150. 0x30,
  151. 0,
  152. 0x1CF,
  153. 0,
  154. 0x166,
  155. 1,
  156. 1,
  157. 1,
  158. 1,
  159. 12,
  160. 12,
  161. 12,
  162. 0x15,
  163. 0x1F,
  164. 132,
  165. 6,
  166. 6,
  167. 0,
  168. { 0, 0, 0, 0, 0, 0, 0, 0 },
  169. { 0, 0, 0, 0 },
  170. true
  171. };
  172. static const struct ni_cac_weights cac_weights_cayman_le =
  173. {
  174. 0x7,
  175. 0xE,
  176. 0x1,
  177. 0xA,
  178. 0x1,
  179. 0x3F,
  180. 0x2,
  181. 0x18,
  182. 0x10,
  183. 0x1A,
  184. 0x1,
  185. 0x3F,
  186. 0x1,
  187. 0xE,
  188. 0x6,
  189. 0x6,
  190. 0x6,
  191. 0x2,
  192. 0x4,
  193. 0x9,
  194. 0x1A,
  195. 0x1A,
  196. 0x2C,
  197. 0xA,
  198. 0x11,
  199. 0x8,
  200. 0x19,
  201. 0x19,
  202. 0x1,
  203. 0x1,
  204. 0x1A,
  205. 0,
  206. 0x8,
  207. 0x5,
  208. 0x8,
  209. 0x1,
  210. 0x3,
  211. 0x1,
  212. 0,
  213. 0,
  214. 0,
  215. 0,
  216. 0,
  217. 0,
  218. 0x38,
  219. 0x38,
  220. 0x239,
  221. 0x3,
  222. 0x18A,
  223. 1,
  224. 1,
  225. 1,
  226. 1,
  227. 12,
  228. 12,
  229. 12,
  230. 0x15,
  231. 0x22,
  232. 132,
  233. 6,
  234. 6,
  235. 0,
  236. { 0, 0, 0, 0, 0, 0, 0, 0 },
  237. { 0, 0, 0, 0 },
  238. true
  239. };
  240. #define NISLANDS_MGCG_SEQUENCE 300
  241. static const u32 cayman_cgcg_cgls_default[] =
  242. {
  243. 0x000008f8, 0x00000010, 0xffffffff,
  244. 0x000008fc, 0x00000000, 0xffffffff,
  245. 0x000008f8, 0x00000011, 0xffffffff,
  246. 0x000008fc, 0x00000000, 0xffffffff,
  247. 0x000008f8, 0x00000012, 0xffffffff,
  248. 0x000008fc, 0x00000000, 0xffffffff,
  249. 0x000008f8, 0x00000013, 0xffffffff,
  250. 0x000008fc, 0x00000000, 0xffffffff,
  251. 0x000008f8, 0x00000014, 0xffffffff,
  252. 0x000008fc, 0x00000000, 0xffffffff,
  253. 0x000008f8, 0x00000015, 0xffffffff,
  254. 0x000008fc, 0x00000000, 0xffffffff,
  255. 0x000008f8, 0x00000016, 0xffffffff,
  256. 0x000008fc, 0x00000000, 0xffffffff,
  257. 0x000008f8, 0x00000017, 0xffffffff,
  258. 0x000008fc, 0x00000000, 0xffffffff,
  259. 0x000008f8, 0x00000018, 0xffffffff,
  260. 0x000008fc, 0x00000000, 0xffffffff,
  261. 0x000008f8, 0x00000019, 0xffffffff,
  262. 0x000008fc, 0x00000000, 0xffffffff,
  263. 0x000008f8, 0x0000001a, 0xffffffff,
  264. 0x000008fc, 0x00000000, 0xffffffff,
  265. 0x000008f8, 0x0000001b, 0xffffffff,
  266. 0x000008fc, 0x00000000, 0xffffffff,
  267. 0x000008f8, 0x00000020, 0xffffffff,
  268. 0x000008fc, 0x00000000, 0xffffffff,
  269. 0x000008f8, 0x00000021, 0xffffffff,
  270. 0x000008fc, 0x00000000, 0xffffffff,
  271. 0x000008f8, 0x00000022, 0xffffffff,
  272. 0x000008fc, 0x00000000, 0xffffffff,
  273. 0x000008f8, 0x00000023, 0xffffffff,
  274. 0x000008fc, 0x00000000, 0xffffffff,
  275. 0x000008f8, 0x00000024, 0xffffffff,
  276. 0x000008fc, 0x00000000, 0xffffffff,
  277. 0x000008f8, 0x00000025, 0xffffffff,
  278. 0x000008fc, 0x00000000, 0xffffffff,
  279. 0x000008f8, 0x00000026, 0xffffffff,
  280. 0x000008fc, 0x00000000, 0xffffffff,
  281. 0x000008f8, 0x00000027, 0xffffffff,
  282. 0x000008fc, 0x00000000, 0xffffffff,
  283. 0x000008f8, 0x00000028, 0xffffffff,
  284. 0x000008fc, 0x00000000, 0xffffffff,
  285. 0x000008f8, 0x00000029, 0xffffffff,
  286. 0x000008fc, 0x00000000, 0xffffffff,
  287. 0x000008f8, 0x0000002a, 0xffffffff,
  288. 0x000008fc, 0x00000000, 0xffffffff,
  289. 0x000008f8, 0x0000002b, 0xffffffff,
  290. 0x000008fc, 0x00000000, 0xffffffff
  291. };
  292. #define CAYMAN_CGCG_CGLS_DEFAULT_LENGTH sizeof(cayman_cgcg_cgls_default) / (3 * sizeof(u32))
  293. static const u32 cayman_cgcg_cgls_disable[] =
  294. {
  295. 0x000008f8, 0x00000010, 0xffffffff,
  296. 0x000008fc, 0xffffffff, 0xffffffff,
  297. 0x000008f8, 0x00000011, 0xffffffff,
  298. 0x000008fc, 0xffffffff, 0xffffffff,
  299. 0x000008f8, 0x00000012, 0xffffffff,
  300. 0x000008fc, 0xffffffff, 0xffffffff,
  301. 0x000008f8, 0x00000013, 0xffffffff,
  302. 0x000008fc, 0xffffffff, 0xffffffff,
  303. 0x000008f8, 0x00000014, 0xffffffff,
  304. 0x000008fc, 0xffffffff, 0xffffffff,
  305. 0x000008f8, 0x00000015, 0xffffffff,
  306. 0x000008fc, 0xffffffff, 0xffffffff,
  307. 0x000008f8, 0x00000016, 0xffffffff,
  308. 0x000008fc, 0xffffffff, 0xffffffff,
  309. 0x000008f8, 0x00000017, 0xffffffff,
  310. 0x000008fc, 0xffffffff, 0xffffffff,
  311. 0x000008f8, 0x00000018, 0xffffffff,
  312. 0x000008fc, 0xffffffff, 0xffffffff,
  313. 0x000008f8, 0x00000019, 0xffffffff,
  314. 0x000008fc, 0xffffffff, 0xffffffff,
  315. 0x000008f8, 0x0000001a, 0xffffffff,
  316. 0x000008fc, 0xffffffff, 0xffffffff,
  317. 0x000008f8, 0x0000001b, 0xffffffff,
  318. 0x000008fc, 0xffffffff, 0xffffffff,
  319. 0x000008f8, 0x00000020, 0xffffffff,
  320. 0x000008fc, 0x00000000, 0xffffffff,
  321. 0x000008f8, 0x00000021, 0xffffffff,
  322. 0x000008fc, 0x00000000, 0xffffffff,
  323. 0x000008f8, 0x00000022, 0xffffffff,
  324. 0x000008fc, 0x00000000, 0xffffffff,
  325. 0x000008f8, 0x00000023, 0xffffffff,
  326. 0x000008fc, 0x00000000, 0xffffffff,
  327. 0x000008f8, 0x00000024, 0xffffffff,
  328. 0x000008fc, 0x00000000, 0xffffffff,
  329. 0x000008f8, 0x00000025, 0xffffffff,
  330. 0x000008fc, 0x00000000, 0xffffffff,
  331. 0x000008f8, 0x00000026, 0xffffffff,
  332. 0x000008fc, 0x00000000, 0xffffffff,
  333. 0x000008f8, 0x00000027, 0xffffffff,
  334. 0x000008fc, 0x00000000, 0xffffffff,
  335. 0x000008f8, 0x00000028, 0xffffffff,
  336. 0x000008fc, 0x00000000, 0xffffffff,
  337. 0x000008f8, 0x00000029, 0xffffffff,
  338. 0x000008fc, 0x00000000, 0xffffffff,
  339. 0x000008f8, 0x0000002a, 0xffffffff,
  340. 0x000008fc, 0x00000000, 0xffffffff,
  341. 0x000008f8, 0x0000002b, 0xffffffff,
  342. 0x000008fc, 0x00000000, 0xffffffff,
  343. 0x00000644, 0x000f7902, 0x001f4180,
  344. 0x00000644, 0x000f3802, 0x001f4180
  345. };
  346. #define CAYMAN_CGCG_CGLS_DISABLE_LENGTH sizeof(cayman_cgcg_cgls_disable) / (3 * sizeof(u32))
  347. static const u32 cayman_cgcg_cgls_enable[] =
  348. {
  349. 0x00000644, 0x000f7882, 0x001f4080,
  350. 0x000008f8, 0x00000010, 0xffffffff,
  351. 0x000008fc, 0x00000000, 0xffffffff,
  352. 0x000008f8, 0x00000011, 0xffffffff,
  353. 0x000008fc, 0x00000000, 0xffffffff,
  354. 0x000008f8, 0x00000012, 0xffffffff,
  355. 0x000008fc, 0x00000000, 0xffffffff,
  356. 0x000008f8, 0x00000013, 0xffffffff,
  357. 0x000008fc, 0x00000000, 0xffffffff,
  358. 0x000008f8, 0x00000014, 0xffffffff,
  359. 0x000008fc, 0x00000000, 0xffffffff,
  360. 0x000008f8, 0x00000015, 0xffffffff,
  361. 0x000008fc, 0x00000000, 0xffffffff,
  362. 0x000008f8, 0x00000016, 0xffffffff,
  363. 0x000008fc, 0x00000000, 0xffffffff,
  364. 0x000008f8, 0x00000017, 0xffffffff,
  365. 0x000008fc, 0x00000000, 0xffffffff,
  366. 0x000008f8, 0x00000018, 0xffffffff,
  367. 0x000008fc, 0x00000000, 0xffffffff,
  368. 0x000008f8, 0x00000019, 0xffffffff,
  369. 0x000008fc, 0x00000000, 0xffffffff,
  370. 0x000008f8, 0x0000001a, 0xffffffff,
  371. 0x000008fc, 0x00000000, 0xffffffff,
  372. 0x000008f8, 0x0000001b, 0xffffffff,
  373. 0x000008fc, 0x00000000, 0xffffffff,
  374. 0x000008f8, 0x00000020, 0xffffffff,
  375. 0x000008fc, 0xffffffff, 0xffffffff,
  376. 0x000008f8, 0x00000021, 0xffffffff,
  377. 0x000008fc, 0xffffffff, 0xffffffff,
  378. 0x000008f8, 0x00000022, 0xffffffff,
  379. 0x000008fc, 0xffffffff, 0xffffffff,
  380. 0x000008f8, 0x00000023, 0xffffffff,
  381. 0x000008fc, 0xffffffff, 0xffffffff,
  382. 0x000008f8, 0x00000024, 0xffffffff,
  383. 0x000008fc, 0xffffffff, 0xffffffff,
  384. 0x000008f8, 0x00000025, 0xffffffff,
  385. 0x000008fc, 0xffffffff, 0xffffffff,
  386. 0x000008f8, 0x00000026, 0xffffffff,
  387. 0x000008fc, 0xffffffff, 0xffffffff,
  388. 0x000008f8, 0x00000027, 0xffffffff,
  389. 0x000008fc, 0xffffffff, 0xffffffff,
  390. 0x000008f8, 0x00000028, 0xffffffff,
  391. 0x000008fc, 0xffffffff, 0xffffffff,
  392. 0x000008f8, 0x00000029, 0xffffffff,
  393. 0x000008fc, 0xffffffff, 0xffffffff,
  394. 0x000008f8, 0x0000002a, 0xffffffff,
  395. 0x000008fc, 0xffffffff, 0xffffffff,
  396. 0x000008f8, 0x0000002b, 0xffffffff,
  397. 0x000008fc, 0xffffffff, 0xffffffff
  398. };
  399. #define CAYMAN_CGCG_CGLS_ENABLE_LENGTH sizeof(cayman_cgcg_cgls_enable) / (3 * sizeof(u32))
  400. static const u32 cayman_mgcg_default[] =
  401. {
  402. 0x0000802c, 0xc0000000, 0xffffffff,
  403. 0x00003fc4, 0xc0000000, 0xffffffff,
  404. 0x00005448, 0x00000100, 0xffffffff,
  405. 0x000055e4, 0x00000100, 0xffffffff,
  406. 0x0000160c, 0x00000100, 0xffffffff,
  407. 0x00008984, 0x06000100, 0xffffffff,
  408. 0x0000c164, 0x00000100, 0xffffffff,
  409. 0x00008a18, 0x00000100, 0xffffffff,
  410. 0x0000897c, 0x06000100, 0xffffffff,
  411. 0x00008b28, 0x00000100, 0xffffffff,
  412. 0x00009144, 0x00800200, 0xffffffff,
  413. 0x00009a60, 0x00000100, 0xffffffff,
  414. 0x00009868, 0x00000100, 0xffffffff,
  415. 0x00008d58, 0x00000100, 0xffffffff,
  416. 0x00009510, 0x00000100, 0xffffffff,
  417. 0x0000949c, 0x00000100, 0xffffffff,
  418. 0x00009654, 0x00000100, 0xffffffff,
  419. 0x00009030, 0x00000100, 0xffffffff,
  420. 0x00009034, 0x00000100, 0xffffffff,
  421. 0x00009038, 0x00000100, 0xffffffff,
  422. 0x0000903c, 0x00000100, 0xffffffff,
  423. 0x00009040, 0x00000100, 0xffffffff,
  424. 0x0000a200, 0x00000100, 0xffffffff,
  425. 0x0000a204, 0x00000100, 0xffffffff,
  426. 0x0000a208, 0x00000100, 0xffffffff,
  427. 0x0000a20c, 0x00000100, 0xffffffff,
  428. 0x00009744, 0x00000100, 0xffffffff,
  429. 0x00003f80, 0x00000100, 0xffffffff,
  430. 0x0000a210, 0x00000100, 0xffffffff,
  431. 0x0000a214, 0x00000100, 0xffffffff,
  432. 0x000004d8, 0x00000100, 0xffffffff,
  433. 0x00009664, 0x00000100, 0xffffffff,
  434. 0x00009698, 0x00000100, 0xffffffff,
  435. 0x000004d4, 0x00000200, 0xffffffff,
  436. 0x000004d0, 0x00000000, 0xffffffff,
  437. 0x000030cc, 0x00000104, 0xffffffff,
  438. 0x0000d0c0, 0x00000100, 0xffffffff,
  439. 0x0000d8c0, 0x00000100, 0xffffffff,
  440. 0x0000802c, 0x40000000, 0xffffffff,
  441. 0x00003fc4, 0x40000000, 0xffffffff,
  442. 0x0000915c, 0x00010000, 0xffffffff,
  443. 0x00009160, 0x00030002, 0xffffffff,
  444. 0x00009164, 0x00050004, 0xffffffff,
  445. 0x00009168, 0x00070006, 0xffffffff,
  446. 0x00009178, 0x00070000, 0xffffffff,
  447. 0x0000917c, 0x00030002, 0xffffffff,
  448. 0x00009180, 0x00050004, 0xffffffff,
  449. 0x0000918c, 0x00010006, 0xffffffff,
  450. 0x00009190, 0x00090008, 0xffffffff,
  451. 0x00009194, 0x00070000, 0xffffffff,
  452. 0x00009198, 0x00030002, 0xffffffff,
  453. 0x0000919c, 0x00050004, 0xffffffff,
  454. 0x000091a8, 0x00010006, 0xffffffff,
  455. 0x000091ac, 0x00090008, 0xffffffff,
  456. 0x000091b0, 0x00070000, 0xffffffff,
  457. 0x000091b4, 0x00030002, 0xffffffff,
  458. 0x000091b8, 0x00050004, 0xffffffff,
  459. 0x000091c4, 0x00010006, 0xffffffff,
  460. 0x000091c8, 0x00090008, 0xffffffff,
  461. 0x000091cc, 0x00070000, 0xffffffff,
  462. 0x000091d0, 0x00030002, 0xffffffff,
  463. 0x000091d4, 0x00050004, 0xffffffff,
  464. 0x000091e0, 0x00010006, 0xffffffff,
  465. 0x000091e4, 0x00090008, 0xffffffff,
  466. 0x000091e8, 0x00000000, 0xffffffff,
  467. 0x000091ec, 0x00070000, 0xffffffff,
  468. 0x000091f0, 0x00030002, 0xffffffff,
  469. 0x000091f4, 0x00050004, 0xffffffff,
  470. 0x00009200, 0x00010006, 0xffffffff,
  471. 0x00009204, 0x00090008, 0xffffffff,
  472. 0x00009208, 0x00070000, 0xffffffff,
  473. 0x0000920c, 0x00030002, 0xffffffff,
  474. 0x00009210, 0x00050004, 0xffffffff,
  475. 0x0000921c, 0x00010006, 0xffffffff,
  476. 0x00009220, 0x00090008, 0xffffffff,
  477. 0x00009224, 0x00070000, 0xffffffff,
  478. 0x00009228, 0x00030002, 0xffffffff,
  479. 0x0000922c, 0x00050004, 0xffffffff,
  480. 0x00009238, 0x00010006, 0xffffffff,
  481. 0x0000923c, 0x00090008, 0xffffffff,
  482. 0x00009240, 0x00070000, 0xffffffff,
  483. 0x00009244, 0x00030002, 0xffffffff,
  484. 0x00009248, 0x00050004, 0xffffffff,
  485. 0x00009254, 0x00010006, 0xffffffff,
  486. 0x00009258, 0x00090008, 0xffffffff,
  487. 0x0000925c, 0x00070000, 0xffffffff,
  488. 0x00009260, 0x00030002, 0xffffffff,
  489. 0x00009264, 0x00050004, 0xffffffff,
  490. 0x00009270, 0x00010006, 0xffffffff,
  491. 0x00009274, 0x00090008, 0xffffffff,
  492. 0x00009278, 0x00070000, 0xffffffff,
  493. 0x0000927c, 0x00030002, 0xffffffff,
  494. 0x00009280, 0x00050004, 0xffffffff,
  495. 0x0000928c, 0x00010006, 0xffffffff,
  496. 0x00009290, 0x00090008, 0xffffffff,
  497. 0x000092a8, 0x00070000, 0xffffffff,
  498. 0x000092ac, 0x00030002, 0xffffffff,
  499. 0x000092b0, 0x00050004, 0xffffffff,
  500. 0x000092bc, 0x00010006, 0xffffffff,
  501. 0x000092c0, 0x00090008, 0xffffffff,
  502. 0x000092c4, 0x00070000, 0xffffffff,
  503. 0x000092c8, 0x00030002, 0xffffffff,
  504. 0x000092cc, 0x00050004, 0xffffffff,
  505. 0x000092d8, 0x00010006, 0xffffffff,
  506. 0x000092dc, 0x00090008, 0xffffffff,
  507. 0x00009294, 0x00000000, 0xffffffff,
  508. 0x0000802c, 0x40010000, 0xffffffff,
  509. 0x00003fc4, 0x40010000, 0xffffffff,
  510. 0x0000915c, 0x00010000, 0xffffffff,
  511. 0x00009160, 0x00030002, 0xffffffff,
  512. 0x00009164, 0x00050004, 0xffffffff,
  513. 0x00009168, 0x00070006, 0xffffffff,
  514. 0x00009178, 0x00070000, 0xffffffff,
  515. 0x0000917c, 0x00030002, 0xffffffff,
  516. 0x00009180, 0x00050004, 0xffffffff,
  517. 0x0000918c, 0x00010006, 0xffffffff,
  518. 0x00009190, 0x00090008, 0xffffffff,
  519. 0x00009194, 0x00070000, 0xffffffff,
  520. 0x00009198, 0x00030002, 0xffffffff,
  521. 0x0000919c, 0x00050004, 0xffffffff,
  522. 0x000091a8, 0x00010006, 0xffffffff,
  523. 0x000091ac, 0x00090008, 0xffffffff,
  524. 0x000091b0, 0x00070000, 0xffffffff,
  525. 0x000091b4, 0x00030002, 0xffffffff,
  526. 0x000091b8, 0x00050004, 0xffffffff,
  527. 0x000091c4, 0x00010006, 0xffffffff,
  528. 0x000091c8, 0x00090008, 0xffffffff,
  529. 0x000091cc, 0x00070000, 0xffffffff,
  530. 0x000091d0, 0x00030002, 0xffffffff,
  531. 0x000091d4, 0x00050004, 0xffffffff,
  532. 0x000091e0, 0x00010006, 0xffffffff,
  533. 0x000091e4, 0x00090008, 0xffffffff,
  534. 0x000091e8, 0x00000000, 0xffffffff,
  535. 0x000091ec, 0x00070000, 0xffffffff,
  536. 0x000091f0, 0x00030002, 0xffffffff,
  537. 0x000091f4, 0x00050004, 0xffffffff,
  538. 0x00009200, 0x00010006, 0xffffffff,
  539. 0x00009204, 0x00090008, 0xffffffff,
  540. 0x00009208, 0x00070000, 0xffffffff,
  541. 0x0000920c, 0x00030002, 0xffffffff,
  542. 0x00009210, 0x00050004, 0xffffffff,
  543. 0x0000921c, 0x00010006, 0xffffffff,
  544. 0x00009220, 0x00090008, 0xffffffff,
  545. 0x00009224, 0x00070000, 0xffffffff,
  546. 0x00009228, 0x00030002, 0xffffffff,
  547. 0x0000922c, 0x00050004, 0xffffffff,
  548. 0x00009238, 0x00010006, 0xffffffff,
  549. 0x0000923c, 0x00090008, 0xffffffff,
  550. 0x00009240, 0x00070000, 0xffffffff,
  551. 0x00009244, 0x00030002, 0xffffffff,
  552. 0x00009248, 0x00050004, 0xffffffff,
  553. 0x00009254, 0x00010006, 0xffffffff,
  554. 0x00009258, 0x00090008, 0xffffffff,
  555. 0x0000925c, 0x00070000, 0xffffffff,
  556. 0x00009260, 0x00030002, 0xffffffff,
  557. 0x00009264, 0x00050004, 0xffffffff,
  558. 0x00009270, 0x00010006, 0xffffffff,
  559. 0x00009274, 0x00090008, 0xffffffff,
  560. 0x00009278, 0x00070000, 0xffffffff,
  561. 0x0000927c, 0x00030002, 0xffffffff,
  562. 0x00009280, 0x00050004, 0xffffffff,
  563. 0x0000928c, 0x00010006, 0xffffffff,
  564. 0x00009290, 0x00090008, 0xffffffff,
  565. 0x000092a8, 0x00070000, 0xffffffff,
  566. 0x000092ac, 0x00030002, 0xffffffff,
  567. 0x000092b0, 0x00050004, 0xffffffff,
  568. 0x000092bc, 0x00010006, 0xffffffff,
  569. 0x000092c0, 0x00090008, 0xffffffff,
  570. 0x000092c4, 0x00070000, 0xffffffff,
  571. 0x000092c8, 0x00030002, 0xffffffff,
  572. 0x000092cc, 0x00050004, 0xffffffff,
  573. 0x000092d8, 0x00010006, 0xffffffff,
  574. 0x000092dc, 0x00090008, 0xffffffff,
  575. 0x00009294, 0x00000000, 0xffffffff,
  576. 0x0000802c, 0xc0000000, 0xffffffff,
  577. 0x00003fc4, 0xc0000000, 0xffffffff,
  578. 0x000008f8, 0x00000010, 0xffffffff,
  579. 0x000008fc, 0x00000000, 0xffffffff,
  580. 0x000008f8, 0x00000011, 0xffffffff,
  581. 0x000008fc, 0x00000000, 0xffffffff,
  582. 0x000008f8, 0x00000012, 0xffffffff,
  583. 0x000008fc, 0x00000000, 0xffffffff,
  584. 0x000008f8, 0x00000013, 0xffffffff,
  585. 0x000008fc, 0x00000000, 0xffffffff,
  586. 0x000008f8, 0x00000014, 0xffffffff,
  587. 0x000008fc, 0x00000000, 0xffffffff,
  588. 0x000008f8, 0x00000015, 0xffffffff,
  589. 0x000008fc, 0x00000000, 0xffffffff,
  590. 0x000008f8, 0x00000016, 0xffffffff,
  591. 0x000008fc, 0x00000000, 0xffffffff,
  592. 0x000008f8, 0x00000017, 0xffffffff,
  593. 0x000008fc, 0x00000000, 0xffffffff,
  594. 0x000008f8, 0x00000018, 0xffffffff,
  595. 0x000008fc, 0x00000000, 0xffffffff,
  596. 0x000008f8, 0x00000019, 0xffffffff,
  597. 0x000008fc, 0x00000000, 0xffffffff,
  598. 0x000008f8, 0x0000001a, 0xffffffff,
  599. 0x000008fc, 0x00000000, 0xffffffff,
  600. 0x000008f8, 0x0000001b, 0xffffffff,
  601. 0x000008fc, 0x00000000, 0xffffffff
  602. };
  603. #define CAYMAN_MGCG_DEFAULT_LENGTH sizeof(cayman_mgcg_default) / (3 * sizeof(u32))
  604. static const u32 cayman_mgcg_disable[] =
  605. {
  606. 0x0000802c, 0xc0000000, 0xffffffff,
  607. 0x000008f8, 0x00000000, 0xffffffff,
  608. 0x000008fc, 0xffffffff, 0xffffffff,
  609. 0x000008f8, 0x00000001, 0xffffffff,
  610. 0x000008fc, 0xffffffff, 0xffffffff,
  611. 0x000008f8, 0x00000002, 0xffffffff,
  612. 0x000008fc, 0xffffffff, 0xffffffff,
  613. 0x000008f8, 0x00000003, 0xffffffff,
  614. 0x000008fc, 0xffffffff, 0xffffffff,
  615. 0x00009150, 0x00600000, 0xffffffff
  616. };
  617. #define CAYMAN_MGCG_DISABLE_LENGTH sizeof(cayman_mgcg_disable) / (3 * sizeof(u32))
  618. static const u32 cayman_mgcg_enable[] =
  619. {
  620. 0x0000802c, 0xc0000000, 0xffffffff,
  621. 0x000008f8, 0x00000000, 0xffffffff,
  622. 0x000008fc, 0x00000000, 0xffffffff,
  623. 0x000008f8, 0x00000001, 0xffffffff,
  624. 0x000008fc, 0x00000000, 0xffffffff,
  625. 0x000008f8, 0x00000002, 0xffffffff,
  626. 0x000008fc, 0x00600000, 0xffffffff,
  627. 0x000008f8, 0x00000003, 0xffffffff,
  628. 0x000008fc, 0x00000000, 0xffffffff,
  629. 0x00009150, 0x96944200, 0xffffffff
  630. };
  631. #define CAYMAN_MGCG_ENABLE_LENGTH sizeof(cayman_mgcg_enable) / (3 * sizeof(u32))
  632. #define NISLANDS_SYSLS_SEQUENCE 100
  633. static const u32 cayman_sysls_default[] =
  634. {
  635. /* Register, Value, Mask bits */
  636. 0x000055e8, 0x00000000, 0xffffffff,
  637. 0x0000d0bc, 0x00000000, 0xffffffff,
  638. 0x0000d8bc, 0x00000000, 0xffffffff,
  639. 0x000015c0, 0x000c1401, 0xffffffff,
  640. 0x0000264c, 0x000c0400, 0xffffffff,
  641. 0x00002648, 0x000c0400, 0xffffffff,
  642. 0x00002650, 0x000c0400, 0xffffffff,
  643. 0x000020b8, 0x000c0400, 0xffffffff,
  644. 0x000020bc, 0x000c0400, 0xffffffff,
  645. 0x000020c0, 0x000c0c80, 0xffffffff,
  646. 0x0000f4a0, 0x000000c0, 0xffffffff,
  647. 0x0000f4a4, 0x00680fff, 0xffffffff,
  648. 0x00002f50, 0x00000404, 0xffffffff,
  649. 0x000004c8, 0x00000001, 0xffffffff,
  650. 0x000064ec, 0x00000000, 0xffffffff,
  651. 0x00000c7c, 0x00000000, 0xffffffff,
  652. 0x00008dfc, 0x00000000, 0xffffffff
  653. };
  654. #define CAYMAN_SYSLS_DEFAULT_LENGTH sizeof(cayman_sysls_default) / (3 * sizeof(u32))
  655. static const u32 cayman_sysls_disable[] =
  656. {
  657. /* Register, Value, Mask bits */
  658. 0x0000d0c0, 0x00000000, 0xffffffff,
  659. 0x0000d8c0, 0x00000000, 0xffffffff,
  660. 0x000055e8, 0x00000000, 0xffffffff,
  661. 0x0000d0bc, 0x00000000, 0xffffffff,
  662. 0x0000d8bc, 0x00000000, 0xffffffff,
  663. 0x000015c0, 0x00041401, 0xffffffff,
  664. 0x0000264c, 0x00040400, 0xffffffff,
  665. 0x00002648, 0x00040400, 0xffffffff,
  666. 0x00002650, 0x00040400, 0xffffffff,
  667. 0x000020b8, 0x00040400, 0xffffffff,
  668. 0x000020bc, 0x00040400, 0xffffffff,
  669. 0x000020c0, 0x00040c80, 0xffffffff,
  670. 0x0000f4a0, 0x000000c0, 0xffffffff,
  671. 0x0000f4a4, 0x00680000, 0xffffffff,
  672. 0x00002f50, 0x00000404, 0xffffffff,
  673. 0x000004c8, 0x00000001, 0xffffffff,
  674. 0x000064ec, 0x00007ffd, 0xffffffff,
  675. 0x00000c7c, 0x0000ff00, 0xffffffff,
  676. 0x00008dfc, 0x0000007f, 0xffffffff
  677. };
  678. #define CAYMAN_SYSLS_DISABLE_LENGTH sizeof(cayman_sysls_disable) / (3 * sizeof(u32))
  679. static const u32 cayman_sysls_enable[] =
  680. {
  681. /* Register, Value, Mask bits */
  682. 0x000055e8, 0x00000001, 0xffffffff,
  683. 0x0000d0bc, 0x00000100, 0xffffffff,
  684. 0x0000d8bc, 0x00000100, 0xffffffff,
  685. 0x000015c0, 0x000c1401, 0xffffffff,
  686. 0x0000264c, 0x000c0400, 0xffffffff,
  687. 0x00002648, 0x000c0400, 0xffffffff,
  688. 0x00002650, 0x000c0400, 0xffffffff,
  689. 0x000020b8, 0x000c0400, 0xffffffff,
  690. 0x000020bc, 0x000c0400, 0xffffffff,
  691. 0x000020c0, 0x000c0c80, 0xffffffff,
  692. 0x0000f4a0, 0x000000c0, 0xffffffff,
  693. 0x0000f4a4, 0x00680fff, 0xffffffff,
  694. 0x00002f50, 0x00000903, 0xffffffff,
  695. 0x000004c8, 0x00000000, 0xffffffff,
  696. 0x000064ec, 0x00000000, 0xffffffff,
  697. 0x00000c7c, 0x00000000, 0xffffffff,
  698. 0x00008dfc, 0x00000000, 0xffffffff
  699. };
  700. #define CAYMAN_SYSLS_ENABLE_LENGTH sizeof(cayman_sysls_enable) / (3 * sizeof(u32))
  701. struct rv7xx_power_info *rv770_get_pi(struct radeon_device *rdev);
  702. struct evergreen_power_info *evergreen_get_pi(struct radeon_device *rdev);
  703. struct ni_power_info *ni_get_pi(struct radeon_device *rdev)
  704. {
  705. struct ni_power_info *pi = rdev->pm.dpm.priv;
  706. return pi;
  707. }
  708. struct ni_ps *ni_get_ps(struct radeon_ps *rps)
  709. {
  710. struct ni_ps *ps = rps->ps_priv;
  711. return ps;
  712. }
  713. static void ni_calculate_leakage_for_v_and_t_formula(const struct ni_leakage_coeffients *coeff,
  714. u16 v, s32 t,
  715. u32 ileakage,
  716. u32 *leakage)
  717. {
  718. s64 kt, kv, leakage_w, i_leakage, vddc, temperature;
  719. i_leakage = div64_s64(drm_int2fixp(ileakage), 1000);
  720. vddc = div64_s64(drm_int2fixp(v), 1000);
  721. temperature = div64_s64(drm_int2fixp(t), 1000);
  722. kt = drm_fixp_mul(div64_s64(drm_int2fixp(coeff->at), 1000),
  723. drm_fixp_exp(drm_fixp_mul(div64_s64(drm_int2fixp(coeff->bt), 1000), temperature)));
  724. kv = drm_fixp_mul(div64_s64(drm_int2fixp(coeff->av), 1000),
  725. drm_fixp_exp(drm_fixp_mul(div64_s64(drm_int2fixp(coeff->bv), 1000), vddc)));
  726. leakage_w = drm_fixp_mul(drm_fixp_mul(drm_fixp_mul(i_leakage, kt), kv), vddc);
  727. *leakage = drm_fixp2int(leakage_w * 1000);
  728. }
  729. static void ni_calculate_leakage_for_v_and_t(struct radeon_device *rdev,
  730. const struct ni_leakage_coeffients *coeff,
  731. u16 v,
  732. s32 t,
  733. u32 i_leakage,
  734. u32 *leakage)
  735. {
  736. ni_calculate_leakage_for_v_and_t_formula(coeff, v, t, i_leakage, leakage);
  737. }
  738. static void ni_apply_state_adjust_rules(struct radeon_device *rdev,
  739. struct radeon_ps *rps)
  740. {
  741. struct ni_ps *ps = ni_get_ps(rps);
  742. struct radeon_clock_and_voltage_limits *max_limits;
  743. bool disable_mclk_switching;
  744. u32 mclk, sclk;
  745. u16 vddc, vddci;
  746. int i;
  747. if (rdev->pm.dpm.new_active_crtc_count > 1)
  748. disable_mclk_switching = true;
  749. else
  750. disable_mclk_switching = false;
  751. if (rdev->pm.dpm.ac_power)
  752. max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
  753. else
  754. max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
  755. if (rdev->pm.dpm.ac_power == false) {
  756. for (i = 0; i < ps->performance_level_count; i++) {
  757. if (ps->performance_levels[i].mclk > max_limits->mclk)
  758. ps->performance_levels[i].mclk = max_limits->mclk;
  759. if (ps->performance_levels[i].sclk > max_limits->sclk)
  760. ps->performance_levels[i].sclk = max_limits->sclk;
  761. if (ps->performance_levels[i].vddc > max_limits->vddc)
  762. ps->performance_levels[i].vddc = max_limits->vddc;
  763. if (ps->performance_levels[i].vddci > max_limits->vddci)
  764. ps->performance_levels[i].vddci = max_limits->vddci;
  765. }
  766. }
  767. /* XXX validate the min clocks required for display */
  768. if (disable_mclk_switching) {
  769. mclk = ps->performance_levels[ps->performance_level_count - 1].mclk;
  770. sclk = ps->performance_levels[0].sclk;
  771. vddc = ps->performance_levels[0].vddc;
  772. vddci = ps->performance_levels[ps->performance_level_count - 1].vddci;
  773. } else {
  774. sclk = ps->performance_levels[0].sclk;
  775. mclk = ps->performance_levels[0].mclk;
  776. vddc = ps->performance_levels[0].vddc;
  777. vddci = ps->performance_levels[0].vddci;
  778. }
  779. /* adjusted low state */
  780. ps->performance_levels[0].sclk = sclk;
  781. ps->performance_levels[0].mclk = mclk;
  782. ps->performance_levels[0].vddc = vddc;
  783. ps->performance_levels[0].vddci = vddci;
  784. btc_skip_blacklist_clocks(rdev, max_limits->sclk, max_limits->mclk,
  785. &ps->performance_levels[0].sclk,
  786. &ps->performance_levels[0].mclk);
  787. for (i = 1; i < ps->performance_level_count; i++) {
  788. if (ps->performance_levels[i].sclk < ps->performance_levels[i - 1].sclk)
  789. ps->performance_levels[i].sclk = ps->performance_levels[i - 1].sclk;
  790. if (ps->performance_levels[i].vddc < ps->performance_levels[i - 1].vddc)
  791. ps->performance_levels[i].vddc = ps->performance_levels[i - 1].vddc;
  792. }
  793. if (disable_mclk_switching) {
  794. mclk = ps->performance_levels[0].mclk;
  795. for (i = 1; i < ps->performance_level_count; i++) {
  796. if (mclk < ps->performance_levels[i].mclk)
  797. mclk = ps->performance_levels[i].mclk;
  798. }
  799. for (i = 0; i < ps->performance_level_count; i++) {
  800. ps->performance_levels[i].mclk = mclk;
  801. ps->performance_levels[i].vddci = vddci;
  802. }
  803. } else {
  804. for (i = 1; i < ps->performance_level_count; i++) {
  805. if (ps->performance_levels[i].mclk < ps->performance_levels[i - 1].mclk)
  806. ps->performance_levels[i].mclk = ps->performance_levels[i - 1].mclk;
  807. if (ps->performance_levels[i].vddci < ps->performance_levels[i - 1].vddci)
  808. ps->performance_levels[i].vddci = ps->performance_levels[i - 1].vddci;
  809. }
  810. }
  811. for (i = 1; i < ps->performance_level_count; i++)
  812. btc_skip_blacklist_clocks(rdev, max_limits->sclk, max_limits->mclk,
  813. &ps->performance_levels[i].sclk,
  814. &ps->performance_levels[i].mclk);
  815. for (i = 0; i < ps->performance_level_count; i++)
  816. btc_adjust_clock_combinations(rdev, max_limits,
  817. &ps->performance_levels[i]);
  818. for (i = 0; i < ps->performance_level_count; i++) {
  819. btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
  820. ps->performance_levels[i].sclk,
  821. max_limits->vddc, &ps->performance_levels[i].vddc);
  822. btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
  823. ps->performance_levels[i].mclk,
  824. max_limits->vddci, &ps->performance_levels[i].vddci);
  825. btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
  826. ps->performance_levels[i].mclk,
  827. max_limits->vddc, &ps->performance_levels[i].vddc);
  828. btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk,
  829. rdev->clock.current_dispclk,
  830. max_limits->vddc, &ps->performance_levels[i].vddc);
  831. }
  832. for (i = 0; i < ps->performance_level_count; i++) {
  833. btc_apply_voltage_delta_rules(rdev,
  834. max_limits->vddc, max_limits->vddci,
  835. &ps->performance_levels[i].vddc,
  836. &ps->performance_levels[i].vddci);
  837. }
  838. ps->dc_compatible = true;
  839. for (i = 0; i < ps->performance_level_count; i++) {
  840. if (ps->performance_levels[i].vddc > rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc)
  841. ps->dc_compatible = false;
  842. if (ps->performance_levels[i].vddc < rdev->pm.dpm.dyn_state.min_vddc_for_pcie_gen2)
  843. ps->performance_levels[i].flags &= ~ATOM_PPLIB_R600_FLAGS_PCIEGEN2;
  844. }
  845. }
  846. static void ni_cg_clockgating_default(struct radeon_device *rdev)
  847. {
  848. u32 count;
  849. const u32 *ps = NULL;
  850. ps = (const u32 *)&cayman_cgcg_cgls_default;
  851. count = CAYMAN_CGCG_CGLS_DEFAULT_LENGTH;
  852. btc_program_mgcg_hw_sequence(rdev, ps, count);
  853. }
  854. static void ni_gfx_clockgating_enable(struct radeon_device *rdev,
  855. bool enable)
  856. {
  857. u32 count;
  858. const u32 *ps = NULL;
  859. if (enable) {
  860. ps = (const u32 *)&cayman_cgcg_cgls_enable;
  861. count = CAYMAN_CGCG_CGLS_ENABLE_LENGTH;
  862. } else {
  863. ps = (const u32 *)&cayman_cgcg_cgls_disable;
  864. count = CAYMAN_CGCG_CGLS_DISABLE_LENGTH;
  865. }
  866. btc_program_mgcg_hw_sequence(rdev, ps, count);
  867. }
  868. static void ni_mg_clockgating_default(struct radeon_device *rdev)
  869. {
  870. u32 count;
  871. const u32 *ps = NULL;
  872. ps = (const u32 *)&cayman_mgcg_default;
  873. count = CAYMAN_MGCG_DEFAULT_LENGTH;
  874. btc_program_mgcg_hw_sequence(rdev, ps, count);
  875. }
  876. static void ni_mg_clockgating_enable(struct radeon_device *rdev,
  877. bool enable)
  878. {
  879. u32 count;
  880. const u32 *ps = NULL;
  881. if (enable) {
  882. ps = (const u32 *)&cayman_mgcg_enable;
  883. count = CAYMAN_MGCG_ENABLE_LENGTH;
  884. } else {
  885. ps = (const u32 *)&cayman_mgcg_disable;
  886. count = CAYMAN_MGCG_DISABLE_LENGTH;
  887. }
  888. btc_program_mgcg_hw_sequence(rdev, ps, count);
  889. }
  890. static void ni_ls_clockgating_default(struct radeon_device *rdev)
  891. {
  892. u32 count;
  893. const u32 *ps = NULL;
  894. ps = (const u32 *)&cayman_sysls_default;
  895. count = CAYMAN_SYSLS_DEFAULT_LENGTH;
  896. btc_program_mgcg_hw_sequence(rdev, ps, count);
  897. }
  898. static void ni_ls_clockgating_enable(struct radeon_device *rdev,
  899. bool enable)
  900. {
  901. u32 count;
  902. const u32 *ps = NULL;
  903. if (enable) {
  904. ps = (const u32 *)&cayman_sysls_enable;
  905. count = CAYMAN_SYSLS_ENABLE_LENGTH;
  906. } else {
  907. ps = (const u32 *)&cayman_sysls_disable;
  908. count = CAYMAN_SYSLS_DISABLE_LENGTH;
  909. }
  910. btc_program_mgcg_hw_sequence(rdev, ps, count);
  911. }
  912. static int ni_patch_single_dependency_table_based_on_leakage(struct radeon_device *rdev,
  913. struct radeon_clock_voltage_dependency_table *table)
  914. {
  915. struct rv7xx_power_info *pi = rv770_get_pi(rdev);
  916. u32 i;
  917. if (table) {
  918. for (i = 0; i < table->count; i++) {
  919. if (0xff01 == table->entries[i].v) {
  920. if (pi->max_vddc == 0)
  921. return -EINVAL;
  922. table->entries[i].v = pi->max_vddc;
  923. }
  924. }
  925. }
  926. return 0;
  927. }
  928. static int ni_patch_dependency_tables_based_on_leakage(struct radeon_device *rdev)
  929. {
  930. int ret = 0;
  931. ret = ni_patch_single_dependency_table_based_on_leakage(rdev,
  932. &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk);
  933. ret = ni_patch_single_dependency_table_based_on_leakage(rdev,
  934. &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk);
  935. return ret;
  936. }
  937. static void ni_stop_dpm(struct radeon_device *rdev)
  938. {
  939. WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN);
  940. }
  941. #if 0
  942. static int ni_notify_hw_of_power_source(struct radeon_device *rdev,
  943. bool ac_power)
  944. {
  945. if (ac_power)
  946. return (rv770_send_msg_to_smc(rdev, PPSMC_MSG_RunningOnAC) == PPSMC_Result_OK) ?
  947. 0 : -EINVAL;
  948. return 0;
  949. }
  950. #endif
  951. static PPSMC_Result ni_send_msg_to_smc_with_parameter(struct radeon_device *rdev,
  952. PPSMC_Msg msg, u32 parameter)
  953. {
  954. WREG32(SMC_SCRATCH0, parameter);
  955. return rv770_send_msg_to_smc(rdev, msg);
  956. }
  957. static int ni_restrict_performance_levels_before_switch(struct radeon_device *rdev)
  958. {
  959. if (rv770_send_msg_to_smc(rdev, PPSMC_MSG_NoForcedLevel) != PPSMC_Result_OK)
  960. return -EINVAL;
  961. return (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, 1) == PPSMC_Result_OK) ?
  962. 0 : -EINVAL;
  963. }
  964. static int ni_unrestrict_performance_levels_after_switch(struct radeon_device *rdev)
  965. {
  966. if (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK)
  967. return -EINVAL;
  968. return (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, 0) == PPSMC_Result_OK) ?
  969. 0 : -EINVAL;
  970. }
  971. static void ni_stop_smc(struct radeon_device *rdev)
  972. {
  973. u32 tmp;
  974. int i;
  975. for (i = 0; i < rdev->usec_timeout; i++) {
  976. tmp = RREG32(LB_SYNC_RESET_SEL) & LB_SYNC_RESET_SEL_MASK;
  977. if (tmp != 1)
  978. break;
  979. udelay(1);
  980. }
  981. udelay(100);
  982. r7xx_stop_smc(rdev);
  983. }
  984. static int ni_process_firmware_header(struct radeon_device *rdev)
  985. {
  986. struct rv7xx_power_info *pi = rv770_get_pi(rdev);
  987. struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
  988. struct ni_power_info *ni_pi = ni_get_pi(rdev);
  989. u32 tmp;
  990. int ret;
  991. ret = rv770_read_smc_sram_dword(rdev,
  992. NISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
  993. NISLANDS_SMC_FIRMWARE_HEADER_stateTable,
  994. &tmp, pi->sram_end);
  995. if (ret)
  996. return ret;
  997. pi->state_table_start = (u16)tmp;
  998. ret = rv770_read_smc_sram_dword(rdev,
  999. NISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
  1000. NISLANDS_SMC_FIRMWARE_HEADER_softRegisters,
  1001. &tmp, pi->sram_end);
  1002. if (ret)
  1003. return ret;
  1004. pi->soft_regs_start = (u16)tmp;
  1005. ret = rv770_read_smc_sram_dword(rdev,
  1006. NISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
  1007. NISLANDS_SMC_FIRMWARE_HEADER_mcRegisterTable,
  1008. &tmp, pi->sram_end);
  1009. if (ret)
  1010. return ret;
  1011. eg_pi->mc_reg_table_start = (u16)tmp;
  1012. ret = rv770_read_smc_sram_dword(rdev,
  1013. NISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
  1014. NISLANDS_SMC_FIRMWARE_HEADER_fanTable,
  1015. &tmp, pi->sram_end);
  1016. if (ret)
  1017. return ret;
  1018. ni_pi->fan_table_start = (u16)tmp;
  1019. ret = rv770_read_smc_sram_dword(rdev,
  1020. NISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
  1021. NISLANDS_SMC_FIRMWARE_HEADER_mcArbDramAutoRefreshTable,
  1022. &tmp, pi->sram_end);
  1023. if (ret)
  1024. return ret;
  1025. ni_pi->arb_table_start = (u16)tmp;
  1026. ret = rv770_read_smc_sram_dword(rdev,
  1027. NISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
  1028. NISLANDS_SMC_FIRMWARE_HEADER_cacTable,
  1029. &tmp, pi->sram_end);
  1030. if (ret)
  1031. return ret;
  1032. ni_pi->cac_table_start = (u16)tmp;
  1033. ret = rv770_read_smc_sram_dword(rdev,
  1034. NISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
  1035. NISLANDS_SMC_FIRMWARE_HEADER_spllTable,
  1036. &tmp, pi->sram_end);
  1037. if (ret)
  1038. return ret;
  1039. ni_pi->spll_table_start = (u16)tmp;
  1040. return ret;
  1041. }
  1042. static void ni_read_clock_registers(struct radeon_device *rdev)
  1043. {
  1044. struct ni_power_info *ni_pi = ni_get_pi(rdev);
  1045. ni_pi->clock_registers.cg_spll_func_cntl = RREG32(CG_SPLL_FUNC_CNTL);
  1046. ni_pi->clock_registers.cg_spll_func_cntl_2 = RREG32(CG_SPLL_FUNC_CNTL_2);
  1047. ni_pi->clock_registers.cg_spll_func_cntl_3 = RREG32(CG_SPLL_FUNC_CNTL_3);
  1048. ni_pi->clock_registers.cg_spll_func_cntl_4 = RREG32(CG_SPLL_FUNC_CNTL_4);
  1049. ni_pi->clock_registers.cg_spll_spread_spectrum = RREG32(CG_SPLL_SPREAD_SPECTRUM);
  1050. ni_pi->clock_registers.cg_spll_spread_spectrum_2 = RREG32(CG_SPLL_SPREAD_SPECTRUM_2);
  1051. ni_pi->clock_registers.mpll_ad_func_cntl = RREG32(MPLL_AD_FUNC_CNTL);
  1052. ni_pi->clock_registers.mpll_ad_func_cntl_2 = RREG32(MPLL_AD_FUNC_CNTL_2);
  1053. ni_pi->clock_registers.mpll_dq_func_cntl = RREG32(MPLL_DQ_FUNC_CNTL);
  1054. ni_pi->clock_registers.mpll_dq_func_cntl_2 = RREG32(MPLL_DQ_FUNC_CNTL_2);
  1055. ni_pi->clock_registers.mclk_pwrmgt_cntl = RREG32(MCLK_PWRMGT_CNTL);
  1056. ni_pi->clock_registers.dll_cntl = RREG32(DLL_CNTL);
  1057. ni_pi->clock_registers.mpll_ss1 = RREG32(MPLL_SS1);
  1058. ni_pi->clock_registers.mpll_ss2 = RREG32(MPLL_SS2);
  1059. }
  1060. #if 0
  1061. static int ni_enter_ulp_state(struct radeon_device *rdev)
  1062. {
  1063. struct rv7xx_power_info *pi = rv770_get_pi(rdev);
  1064. if (pi->gfx_clock_gating) {
  1065. WREG32_P(SCLK_PWRMGT_CNTL, 0, ~DYN_GFX_CLK_OFF_EN);
  1066. WREG32_P(SCLK_PWRMGT_CNTL, GFX_CLK_FORCE_ON, ~GFX_CLK_FORCE_ON);
  1067. WREG32_P(SCLK_PWRMGT_CNTL, 0, ~GFX_CLK_FORCE_ON);
  1068. RREG32(GB_ADDR_CONFIG);
  1069. }
  1070. WREG32_P(SMC_MSG, HOST_SMC_MSG(PPSMC_MSG_SwitchToMinimumPower),
  1071. ~HOST_SMC_MSG_MASK);
  1072. udelay(25000);
  1073. return 0;
  1074. }
  1075. #endif
  1076. static void ni_program_response_times(struct radeon_device *rdev)
  1077. {
  1078. u32 voltage_response_time, backbias_response_time, acpi_delay_time, vbi_time_out;
  1079. u32 vddc_dly, bb_dly, acpi_dly, vbi_dly, mclk_switch_limit;
  1080. u32 reference_clock;
  1081. rv770_write_smc_soft_register(rdev, NI_SMC_SOFT_REGISTER_mvdd_chg_time, 1);
  1082. voltage_response_time = (u32)rdev->pm.dpm.voltage_response_time;
  1083. backbias_response_time = (u32)rdev->pm.dpm.backbias_response_time;
  1084. if (voltage_response_time == 0)
  1085. voltage_response_time = 1000;
  1086. if (backbias_response_time == 0)
  1087. backbias_response_time = 1000;
  1088. acpi_delay_time = 15000;
  1089. vbi_time_out = 100000;
  1090. reference_clock = radeon_get_xclk(rdev);
  1091. vddc_dly = (voltage_response_time * reference_clock) / 1600;
  1092. bb_dly = (backbias_response_time * reference_clock) / 1600;
  1093. acpi_dly = (acpi_delay_time * reference_clock) / 1600;
  1094. vbi_dly = (vbi_time_out * reference_clock) / 1600;
  1095. mclk_switch_limit = (460 * reference_clock) / 100;
  1096. rv770_write_smc_soft_register(rdev, NI_SMC_SOFT_REGISTER_delay_vreg, vddc_dly);
  1097. rv770_write_smc_soft_register(rdev, NI_SMC_SOFT_REGISTER_delay_bbias, bb_dly);
  1098. rv770_write_smc_soft_register(rdev, NI_SMC_SOFT_REGISTER_delay_acpi, acpi_dly);
  1099. rv770_write_smc_soft_register(rdev, NI_SMC_SOFT_REGISTER_mclk_chg_timeout, vbi_dly);
  1100. rv770_write_smc_soft_register(rdev, NI_SMC_SOFT_REGISTER_mc_block_delay, 0xAA);
  1101. rv770_write_smc_soft_register(rdev, NI_SMC_SOFT_REGISTER_mclk_switch_lim, mclk_switch_limit);
  1102. }
  1103. static void ni_populate_smc_voltage_table(struct radeon_device *rdev,
  1104. struct atom_voltage_table *voltage_table,
  1105. NISLANDS_SMC_STATETABLE *table)
  1106. {
  1107. unsigned int i;
  1108. for (i = 0; i < voltage_table->count; i++) {
  1109. table->highSMIO[i] = 0;
  1110. table->lowSMIO[i] |= cpu_to_be32(voltage_table->entries[i].smio_low);
  1111. }
  1112. }
  1113. static void ni_populate_smc_voltage_tables(struct radeon_device *rdev,
  1114. NISLANDS_SMC_STATETABLE *table)
  1115. {
  1116. struct rv7xx_power_info *pi = rv770_get_pi(rdev);
  1117. struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
  1118. unsigned char i;
  1119. if (eg_pi->vddc_voltage_table.count) {
  1120. ni_populate_smc_voltage_table(rdev, &eg_pi->vddc_voltage_table, table);
  1121. table->voltageMaskTable.highMask[NISLANDS_SMC_VOLTAGEMASK_VDDC] = 0;
  1122. table->voltageMaskTable.lowMask[NISLANDS_SMC_VOLTAGEMASK_VDDC] =
  1123. cpu_to_be32(eg_pi->vddc_voltage_table.mask_low);
  1124. for (i = 0; i < eg_pi->vddc_voltage_table.count; i++) {
  1125. if (pi->max_vddc_in_table <= eg_pi->vddc_voltage_table.entries[i].value) {
  1126. table->maxVDDCIndexInPPTable = i;
  1127. break;
  1128. }
  1129. }
  1130. }
  1131. if (eg_pi->vddci_voltage_table.count) {
  1132. ni_populate_smc_voltage_table(rdev, &eg_pi->vddci_voltage_table, table);
  1133. table->voltageMaskTable.highMask[NISLANDS_SMC_VOLTAGEMASK_VDDCI] = 0;
  1134. table->voltageMaskTable.lowMask[NISLANDS_SMC_VOLTAGEMASK_VDDCI] =
  1135. cpu_to_be32(eg_pi->vddc_voltage_table.mask_low);
  1136. }
  1137. }
  1138. static int ni_populate_voltage_value(struct radeon_device *rdev,
  1139. struct atom_voltage_table *table,
  1140. u16 value,
  1141. NISLANDS_SMC_VOLTAGE_VALUE *voltage)
  1142. {
  1143. unsigned int i;
  1144. for (i = 0; i < table->count; i++) {
  1145. if (value <= table->entries[i].value) {
  1146. voltage->index = (u8)i;
  1147. voltage->value = cpu_to_be16(table->entries[i].value);
  1148. break;
  1149. }
  1150. }
  1151. if (i >= table->count)
  1152. return -EINVAL;
  1153. return 0;
  1154. }
  1155. static void ni_populate_mvdd_value(struct radeon_device *rdev,
  1156. u32 mclk,
  1157. NISLANDS_SMC_VOLTAGE_VALUE *voltage)
  1158. {
  1159. struct rv7xx_power_info *pi = rv770_get_pi(rdev);
  1160. struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
  1161. if (!pi->mvdd_control) {
  1162. voltage->index = eg_pi->mvdd_high_index;
  1163. voltage->value = cpu_to_be16(MVDD_HIGH_VALUE);
  1164. return;
  1165. }
  1166. if (mclk <= pi->mvdd_split_frequency) {
  1167. voltage->index = eg_pi->mvdd_low_index;
  1168. voltage->value = cpu_to_be16(MVDD_LOW_VALUE);
  1169. } else {
  1170. voltage->index = eg_pi->mvdd_high_index;
  1171. voltage->value = cpu_to_be16(MVDD_HIGH_VALUE);
  1172. }
  1173. }
  1174. static int ni_get_std_voltage_value(struct radeon_device *rdev,
  1175. NISLANDS_SMC_VOLTAGE_VALUE *voltage,
  1176. u16 *std_voltage)
  1177. {
  1178. if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries &&
  1179. ((u32)voltage->index < rdev->pm.dpm.dyn_state.cac_leakage_table.count))
  1180. *std_voltage = rdev->pm.dpm.dyn_state.cac_leakage_table.entries[voltage->index].vddc;
  1181. else
  1182. *std_voltage = be16_to_cpu(voltage->value);
  1183. return 0;
  1184. }
  1185. static void ni_populate_std_voltage_value(struct radeon_device *rdev,
  1186. u16 value, u8 index,
  1187. NISLANDS_SMC_VOLTAGE_VALUE *voltage)
  1188. {
  1189. voltage->index = index;
  1190. voltage->value = cpu_to_be16(value);
  1191. }
  1192. static u32 ni_get_smc_power_scaling_factor(struct radeon_device *rdev)
  1193. {
  1194. u32 xclk_period;
  1195. u32 xclk = radeon_get_xclk(rdev);
  1196. u32 tmp = RREG32(CG_CAC_CTRL) & TID_CNT_MASK;
  1197. xclk_period = (1000000000UL / xclk);
  1198. xclk_period /= 10000UL;
  1199. return tmp * xclk_period;
  1200. }
  1201. static u32 ni_scale_power_for_smc(u32 power_in_watts, u32 scaling_factor)
  1202. {
  1203. return (power_in_watts * scaling_factor) << 2;
  1204. }
  1205. static u32 ni_calculate_power_boost_limit(struct radeon_device *rdev,
  1206. struct radeon_ps *radeon_state,
  1207. u32 near_tdp_limit)
  1208. {
  1209. struct ni_ps *state = ni_get_ps(radeon_state);
  1210. struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
  1211. struct ni_power_info *ni_pi = ni_get_pi(rdev);
  1212. u32 power_boost_limit = 0;
  1213. int ret;
  1214. if (ni_pi->enable_power_containment &&
  1215. ni_pi->use_power_boost_limit) {
  1216. NISLANDS_SMC_VOLTAGE_VALUE vddc;
  1217. u16 std_vddc_med;
  1218. u16 std_vddc_high;
  1219. u64 tmp, n, d;
  1220. if (state->performance_level_count < 3)
  1221. return 0;
  1222. ret = ni_populate_voltage_value(rdev, &eg_pi->vddc_voltage_table,
  1223. state->performance_levels[state->performance_level_count - 2].vddc,
  1224. &vddc);
  1225. if (ret)
  1226. return 0;
  1227. ret = ni_get_std_voltage_value(rdev, &vddc, &std_vddc_med);
  1228. if (ret)
  1229. return 0;
  1230. ret = ni_populate_voltage_value(rdev, &eg_pi->vddc_voltage_table,
  1231. state->performance_levels[state->performance_level_count - 1].vddc,
  1232. &vddc);
  1233. if (ret)
  1234. return 0;
  1235. ret = ni_get_std_voltage_value(rdev, &vddc, &std_vddc_high);
  1236. if (ret)
  1237. return 0;
  1238. n = ((u64)near_tdp_limit * ((u64)std_vddc_med * (u64)std_vddc_med) * 90);
  1239. d = ((u64)std_vddc_high * (u64)std_vddc_high * 100);
  1240. tmp = div64_u64(n, d);
  1241. if (tmp >> 32)
  1242. return 0;
  1243. power_boost_limit = (u32)tmp;
  1244. }
  1245. return power_boost_limit;
  1246. }
  1247. static int ni_calculate_adjusted_tdp_limits(struct radeon_device *rdev,
  1248. bool adjust_polarity,
  1249. u32 tdp_adjustment,
  1250. u32 *tdp_limit,
  1251. u32 *near_tdp_limit)
  1252. {
  1253. if (tdp_adjustment > (u32)rdev->pm.dpm.tdp_od_limit)
  1254. return -EINVAL;
  1255. if (adjust_polarity) {
  1256. *tdp_limit = ((100 + tdp_adjustment) * rdev->pm.dpm.tdp_limit) / 100;
  1257. *near_tdp_limit = rdev->pm.dpm.near_tdp_limit + (*tdp_limit - rdev->pm.dpm.tdp_limit);
  1258. } else {
  1259. *tdp_limit = ((100 - tdp_adjustment) * rdev->pm.dpm.tdp_limit) / 100;
  1260. *near_tdp_limit = rdev->pm.dpm.near_tdp_limit - (rdev->pm.dpm.tdp_limit - *tdp_limit);
  1261. }
  1262. return 0;
  1263. }
  1264. static int ni_populate_smc_tdp_limits(struct radeon_device *rdev,
  1265. struct radeon_ps *radeon_state)
  1266. {
  1267. struct rv7xx_power_info *pi = rv770_get_pi(rdev);
  1268. struct ni_power_info *ni_pi = ni_get_pi(rdev);
  1269. if (ni_pi->enable_power_containment) {
  1270. NISLANDS_SMC_STATETABLE *smc_table = &ni_pi->smc_statetable;
  1271. u32 scaling_factor = ni_get_smc_power_scaling_factor(rdev);
  1272. u32 tdp_limit;
  1273. u32 near_tdp_limit;
  1274. u32 power_boost_limit;
  1275. int ret;
  1276. if (scaling_factor == 0)
  1277. return -EINVAL;
  1278. memset(smc_table, 0, sizeof(NISLANDS_SMC_STATETABLE));
  1279. ret = ni_calculate_adjusted_tdp_limits(rdev,
  1280. false, /* ??? */
  1281. rdev->pm.dpm.tdp_adjustment,
  1282. &tdp_limit,
  1283. &near_tdp_limit);
  1284. if (ret)
  1285. return ret;
  1286. power_boost_limit = ni_calculate_power_boost_limit(rdev, radeon_state,
  1287. near_tdp_limit);
  1288. smc_table->dpm2Params.TDPLimit =
  1289. cpu_to_be32(ni_scale_power_for_smc(tdp_limit, scaling_factor));
  1290. smc_table->dpm2Params.NearTDPLimit =
  1291. cpu_to_be32(ni_scale_power_for_smc(near_tdp_limit, scaling_factor));
  1292. smc_table->dpm2Params.SafePowerLimit =
  1293. cpu_to_be32(ni_scale_power_for_smc((near_tdp_limit * NISLANDS_DPM2_TDP_SAFE_LIMIT_PERCENT) / 100,
  1294. scaling_factor));
  1295. smc_table->dpm2Params.PowerBoostLimit =
  1296. cpu_to_be32(ni_scale_power_for_smc(power_boost_limit, scaling_factor));
  1297. ret = rv770_copy_bytes_to_smc(rdev,
  1298. (u16)(pi->state_table_start + offsetof(NISLANDS_SMC_STATETABLE, dpm2Params) +
  1299. offsetof(PP_NIslands_DPM2Parameters, TDPLimit)),
  1300. (u8 *)(&smc_table->dpm2Params.TDPLimit),
  1301. sizeof(u32) * 4, pi->sram_end);
  1302. if (ret)
  1303. return ret;
  1304. }
  1305. return 0;
  1306. }
  1307. int ni_copy_and_switch_arb_sets(struct radeon_device *rdev,
  1308. u32 arb_freq_src, u32 arb_freq_dest)
  1309. {
  1310. u32 mc_arb_dram_timing;
  1311. u32 mc_arb_dram_timing2;
  1312. u32 burst_time;
  1313. u32 mc_cg_config;
  1314. switch (arb_freq_src) {
  1315. case MC_CG_ARB_FREQ_F0:
  1316. mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING);
  1317. mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
  1318. burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE0_MASK) >> STATE0_SHIFT;
  1319. break;
  1320. case MC_CG_ARB_FREQ_F1:
  1321. mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING_1);
  1322. mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_1);
  1323. burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE1_MASK) >> STATE1_SHIFT;
  1324. break;
  1325. case MC_CG_ARB_FREQ_F2:
  1326. mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING_2);
  1327. mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_2);
  1328. burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE2_MASK) >> STATE2_SHIFT;
  1329. break;
  1330. case MC_CG_ARB_FREQ_F3:
  1331. mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING_3);
  1332. mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_3);
  1333. burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE3_MASK) >> STATE3_SHIFT;
  1334. break;
  1335. default:
  1336. return -EINVAL;
  1337. }
  1338. switch (arb_freq_dest) {
  1339. case MC_CG_ARB_FREQ_F0:
  1340. WREG32(MC_ARB_DRAM_TIMING, mc_arb_dram_timing);
  1341. WREG32(MC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
  1342. WREG32_P(MC_ARB_BURST_TIME, STATE0(burst_time), ~STATE0_MASK);
  1343. break;
  1344. case MC_CG_ARB_FREQ_F1:
  1345. WREG32(MC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
  1346. WREG32(MC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
  1347. WREG32_P(MC_ARB_BURST_TIME, STATE1(burst_time), ~STATE1_MASK);
  1348. break;
  1349. case MC_CG_ARB_FREQ_F2:
  1350. WREG32(MC_ARB_DRAM_TIMING_2, mc_arb_dram_timing);
  1351. WREG32(MC_ARB_DRAM_TIMING2_2, mc_arb_dram_timing2);
  1352. WREG32_P(MC_ARB_BURST_TIME, STATE2(burst_time), ~STATE2_MASK);
  1353. break;
  1354. case MC_CG_ARB_FREQ_F3:
  1355. WREG32(MC_ARB_DRAM_TIMING_3, mc_arb_dram_timing);
  1356. WREG32(MC_ARB_DRAM_TIMING2_3, mc_arb_dram_timing2);
  1357. WREG32_P(MC_ARB_BURST_TIME, STATE3(burst_time), ~STATE3_MASK);
  1358. break;
  1359. default:
  1360. return -EINVAL;
  1361. }
  1362. mc_cg_config = RREG32(MC_CG_CONFIG) | 0x0000000F;
  1363. WREG32(MC_CG_CONFIG, mc_cg_config);
  1364. WREG32_P(MC_ARB_CG, CG_ARB_REQ(arb_freq_dest), ~CG_ARB_REQ_MASK);
  1365. return 0;
  1366. }
  1367. static int ni_init_arb_table_index(struct radeon_device *rdev)
  1368. {
  1369. struct rv7xx_power_info *pi = rv770_get_pi(rdev);
  1370. struct ni_power_info *ni_pi = ni_get_pi(rdev);
  1371. u32 tmp;
  1372. int ret;
  1373. ret = rv770_read_smc_sram_dword(rdev, ni_pi->arb_table_start,
  1374. &tmp, pi->sram_end);
  1375. if (ret)
  1376. return ret;
  1377. tmp &= 0x00FFFFFF;
  1378. tmp |= ((u32)MC_CG_ARB_FREQ_F1) << 24;
  1379. return rv770_write_smc_sram_dword(rdev, ni_pi->arb_table_start,
  1380. tmp, pi->sram_end);
  1381. }
  1382. static int ni_initial_switch_from_arb_f0_to_f1(struct radeon_device *rdev)
  1383. {
  1384. return ni_copy_and_switch_arb_sets(rdev, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
  1385. }
  1386. static int ni_force_switch_to_arb_f0(struct radeon_device *rdev)
  1387. {
  1388. struct rv7xx_power_info *pi = rv770_get_pi(rdev);
  1389. struct ni_power_info *ni_pi = ni_get_pi(rdev);
  1390. u32 tmp;
  1391. int ret;
  1392. ret = rv770_read_smc_sram_dword(rdev, ni_pi->arb_table_start,
  1393. &tmp, pi->sram_end);
  1394. if (ret)
  1395. return ret;
  1396. tmp = (tmp >> 24) & 0xff;
  1397. if (tmp == MC_CG_ARB_FREQ_F0)
  1398. return 0;
  1399. return ni_copy_and_switch_arb_sets(rdev, tmp, MC_CG_ARB_FREQ_F0);
  1400. }
  1401. static int ni_populate_memory_timing_parameters(struct radeon_device *rdev,
  1402. struct rv7xx_pl *pl,
  1403. SMC_NIslands_MCArbDramTimingRegisterSet *arb_regs)
  1404. {
  1405. u32 dram_timing;
  1406. u32 dram_timing2;
  1407. arb_regs->mc_arb_rfsh_rate =
  1408. (u8)rv770_calculate_memory_refresh_rate(rdev, pl->sclk);
  1409. radeon_atom_set_engine_dram_timings(rdev,
  1410. pl->sclk,
  1411. pl->mclk);
  1412. dram_timing = RREG32(MC_ARB_DRAM_TIMING);
  1413. dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
  1414. arb_regs->mc_arb_dram_timing = cpu_to_be32(dram_timing);
  1415. arb_regs->mc_arb_dram_timing2 = cpu_to_be32(dram_timing2);
  1416. return 0;
  1417. }
  1418. static int ni_do_program_memory_timing_parameters(struct radeon_device *rdev,
  1419. struct radeon_ps *radeon_state,
  1420. unsigned int first_arb_set)
  1421. {
  1422. struct rv7xx_power_info *pi = rv770_get_pi(rdev);
  1423. struct ni_power_info *ni_pi = ni_get_pi(rdev);
  1424. struct ni_ps *state = ni_get_ps(radeon_state);
  1425. SMC_NIslands_MCArbDramTimingRegisterSet arb_regs = { 0 };
  1426. int i, ret = 0;
  1427. for (i = 0; i < state->performance_level_count; i++) {
  1428. ret = ni_populate_memory_timing_parameters(rdev, &state->performance_levels[i], &arb_regs);
  1429. if (ret)
  1430. break;
  1431. ret = rv770_copy_bytes_to_smc(rdev,
  1432. (u16)(ni_pi->arb_table_start +
  1433. offsetof(SMC_NIslands_MCArbDramTimingRegisters, data) +
  1434. sizeof(SMC_NIslands_MCArbDramTimingRegisterSet) * (first_arb_set + i)),
  1435. (u8 *)&arb_regs,
  1436. (u16)sizeof(SMC_NIslands_MCArbDramTimingRegisterSet),
  1437. pi->sram_end);
  1438. if (ret)
  1439. break;
  1440. }
  1441. return ret;
  1442. }
  1443. static int ni_program_memory_timing_parameters(struct radeon_device *rdev,
  1444. struct radeon_ps *radeon_new_state)
  1445. {
  1446. return ni_do_program_memory_timing_parameters(rdev, radeon_new_state,
  1447. NISLANDS_DRIVER_STATE_ARB_INDEX);
  1448. }
  1449. static void ni_populate_initial_mvdd_value(struct radeon_device *rdev,
  1450. struct NISLANDS_SMC_VOLTAGE_VALUE *voltage)
  1451. {
  1452. struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
  1453. voltage->index = eg_pi->mvdd_high_index;
  1454. voltage->value = cpu_to_be16(MVDD_HIGH_VALUE);
  1455. }
  1456. static int ni_populate_smc_initial_state(struct radeon_device *rdev,
  1457. struct radeon_ps *radeon_initial_state,
  1458. NISLANDS_SMC_STATETABLE *table)
  1459. {
  1460. struct ni_ps *initial_state = ni_get_ps(radeon_initial_state);
  1461. struct rv7xx_power_info *pi = rv770_get_pi(rdev);
  1462. struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
  1463. struct ni_power_info *ni_pi = ni_get_pi(rdev);
  1464. u32 reg;
  1465. int ret;
  1466. table->initialState.levels[0].mclk.vMPLL_AD_FUNC_CNTL =
  1467. cpu_to_be32(ni_pi->clock_registers.mpll_ad_func_cntl);
  1468. table->initialState.levels[0].mclk.vMPLL_AD_FUNC_CNTL_2 =
  1469. cpu_to_be32(ni_pi->clock_registers.mpll_ad_func_cntl_2);
  1470. table->initialState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL =
  1471. cpu_to_be32(ni_pi->clock_registers.mpll_dq_func_cntl);
  1472. table->initialState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL_2 =
  1473. cpu_to_be32(ni_pi->clock_registers.mpll_dq_func_cntl_2);
  1474. table->initialState.levels[0].mclk.vMCLK_PWRMGT_CNTL =
  1475. cpu_to_be32(ni_pi->clock_registers.mclk_pwrmgt_cntl);
  1476. table->initialState.levels[0].mclk.vDLL_CNTL =
  1477. cpu_to_be32(ni_pi->clock_registers.dll_cntl);
  1478. table->initialState.levels[0].mclk.vMPLL_SS =
  1479. cpu_to_be32(ni_pi->clock_registers.mpll_ss1);
  1480. table->initialState.levels[0].mclk.vMPLL_SS2 =
  1481. cpu_to_be32(ni_pi->clock_registers.mpll_ss2);
  1482. table->initialState.levels[0].mclk.mclk_value =
  1483. cpu_to_be32(initial_state->performance_levels[0].mclk);
  1484. table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL =
  1485. cpu_to_be32(ni_pi->clock_registers.cg_spll_func_cntl);
  1486. table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 =
  1487. cpu_to_be32(ni_pi->clock_registers.cg_spll_func_cntl_2);
  1488. table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 =
  1489. cpu_to_be32(ni_pi->clock_registers.cg_spll_func_cntl_3);
  1490. table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_4 =
  1491. cpu_to_be32(ni_pi->clock_registers.cg_spll_func_cntl_4);
  1492. table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM =
  1493. cpu_to_be32(ni_pi->clock_registers.cg_spll_spread_spectrum);
  1494. table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM_2 =
  1495. cpu_to_be32(ni_pi->clock_registers.cg_spll_spread_spectrum_2);
  1496. table->initialState.levels[0].sclk.sclk_value =
  1497. cpu_to_be32(initial_state->performance_levels[0].sclk);
  1498. table->initialState.levels[0].arbRefreshState =
  1499. NISLANDS_INITIAL_STATE_ARB_INDEX;
  1500. table->initialState.levels[0].ACIndex = 0;
  1501. ret = ni_populate_voltage_value(rdev, &eg_pi->vddc_voltage_table,
  1502. initial_state->performance_levels[0].vddc,
  1503. &table->initialState.levels[0].vddc);
  1504. if (!ret) {
  1505. u16 std_vddc;
  1506. ret = ni_get_std_voltage_value(rdev,
  1507. &table->initialState.levels[0].vddc,
  1508. &std_vddc);
  1509. if (!ret)
  1510. ni_populate_std_voltage_value(rdev, std_vddc,
  1511. table->initialState.levels[0].vddc.index,
  1512. &table->initialState.levels[0].std_vddc);
  1513. }
  1514. if (eg_pi->vddci_control)
  1515. ni_populate_voltage_value(rdev,
  1516. &eg_pi->vddci_voltage_table,
  1517. initial_state->performance_levels[0].vddci,
  1518. &table->initialState.levels[0].vddci);
  1519. ni_populate_initial_mvdd_value(rdev, &table->initialState.levels[0].mvdd);
  1520. reg = CG_R(0xffff) | CG_L(0);
  1521. table->initialState.levels[0].aT = cpu_to_be32(reg);
  1522. table->initialState.levels[0].bSP = cpu_to_be32(pi->dsp);
  1523. if (pi->boot_in_gen2)
  1524. table->initialState.levels[0].gen2PCIE = 1;
  1525. else
  1526. table->initialState.levels[0].gen2PCIE = 0;
  1527. if (pi->mem_gddr5) {
  1528. table->initialState.levels[0].strobeMode =
  1529. cypress_get_strobe_mode_settings(rdev,
  1530. initial_state->performance_levels[0].mclk);
  1531. if (initial_state->performance_levels[0].mclk > pi->mclk_edc_enable_threshold)
  1532. table->initialState.levels[0].mcFlags = NISLANDS_SMC_MC_EDC_RD_FLAG | NISLANDS_SMC_MC_EDC_WR_FLAG;
  1533. else
  1534. table->initialState.levels[0].mcFlags = 0;
  1535. }
  1536. table->initialState.levelCount = 1;
  1537. table->initialState.flags |= PPSMC_SWSTATE_FLAG_DC;
  1538. table->initialState.levels[0].dpm2.MaxPS = 0;
  1539. table->initialState.levels[0].dpm2.NearTDPDec = 0;
  1540. table->initialState.levels[0].dpm2.AboveSafeInc = 0;
  1541. table->initialState.levels[0].dpm2.BelowSafeInc = 0;
  1542. reg = MIN_POWER_MASK | MAX_POWER_MASK;
  1543. table->initialState.levels[0].SQPowerThrottle = cpu_to_be32(reg);
  1544. reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
  1545. table->initialState.levels[0].SQPowerThrottle_2 = cpu_to_be32(reg);
  1546. return 0;
  1547. }
  1548. static int ni_populate_smc_acpi_state(struct radeon_device *rdev,
  1549. NISLANDS_SMC_STATETABLE *table)
  1550. {
  1551. struct rv7xx_power_info *pi = rv770_get_pi(rdev);
  1552. struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
  1553. struct ni_power_info *ni_pi = ni_get_pi(rdev);
  1554. u32 mpll_ad_func_cntl = ni_pi->clock_registers.mpll_ad_func_cntl;
  1555. u32 mpll_ad_func_cntl_2 = ni_pi->clock_registers.mpll_ad_func_cntl_2;
  1556. u32 mpll_dq_func_cntl = ni_pi->clock_registers.mpll_dq_func_cntl;
  1557. u32 mpll_dq_func_cntl_2 = ni_pi->clock_registers.mpll_dq_func_cntl_2;
  1558. u32 spll_func_cntl = ni_pi->clock_registers.cg_spll_func_cntl;
  1559. u32 spll_func_cntl_2 = ni_pi->clock_registers.cg_spll_func_cntl_2;
  1560. u32 spll_func_cntl_3 = ni_pi->clock_registers.cg_spll_func_cntl_3;
  1561. u32 spll_func_cntl_4 = ni_pi->clock_registers.cg_spll_func_cntl_4;
  1562. u32 mclk_pwrmgt_cntl = ni_pi->clock_registers.mclk_pwrmgt_cntl;
  1563. u32 dll_cntl = ni_pi->clock_registers.dll_cntl;
  1564. u32 reg;
  1565. int ret;
  1566. table->ACPIState = table->initialState;
  1567. table->ACPIState.flags &= ~PPSMC_SWSTATE_FLAG_DC;
  1568. if (pi->acpi_vddc) {
  1569. ret = ni_populate_voltage_value(rdev,
  1570. &eg_pi->vddc_voltage_table,
  1571. pi->acpi_vddc, &table->ACPIState.levels[0].vddc);
  1572. if (!ret) {
  1573. u16 std_vddc;
  1574. ret = ni_get_std_voltage_value(rdev,
  1575. &table->ACPIState.levels[0].vddc, &std_vddc);
  1576. if (!ret)
  1577. ni_populate_std_voltage_value(rdev, std_vddc,
  1578. table->ACPIState.levels[0].vddc.index,
  1579. &table->ACPIState.levels[0].std_vddc);
  1580. }
  1581. if (pi->pcie_gen2) {
  1582. if (pi->acpi_pcie_gen2)
  1583. table->ACPIState.levels[0].gen2PCIE = 1;
  1584. else
  1585. table->ACPIState.levels[0].gen2PCIE = 0;
  1586. } else {
  1587. table->ACPIState.levels[0].gen2PCIE = 0;
  1588. }
  1589. } else {
  1590. ret = ni_populate_voltage_value(rdev,
  1591. &eg_pi->vddc_voltage_table,
  1592. pi->min_vddc_in_table,
  1593. &table->ACPIState.levels[0].vddc);
  1594. if (!ret) {
  1595. u16 std_vddc;
  1596. ret = ni_get_std_voltage_value(rdev,
  1597. &table->ACPIState.levels[0].vddc,
  1598. &std_vddc);
  1599. if (!ret)
  1600. ni_populate_std_voltage_value(rdev, std_vddc,
  1601. table->ACPIState.levels[0].vddc.index,
  1602. &table->ACPIState.levels[0].std_vddc);
  1603. }
  1604. table->ACPIState.levels[0].gen2PCIE = 0;
  1605. }
  1606. if (eg_pi->acpi_vddci) {
  1607. if (eg_pi->vddci_control)
  1608. ni_populate_voltage_value(rdev,
  1609. &eg_pi->vddci_voltage_table,
  1610. eg_pi->acpi_vddci,
  1611. &table->ACPIState.levels[0].vddci);
  1612. }
  1613. mpll_ad_func_cntl &= ~PDNB;
  1614. mpll_ad_func_cntl_2 |= BIAS_GEN_PDNB | RESET_EN;
  1615. if (pi->mem_gddr5)
  1616. mpll_dq_func_cntl &= ~PDNB;
  1617. mpll_dq_func_cntl_2 |= BIAS_GEN_PDNB | RESET_EN | BYPASS;
  1618. mclk_pwrmgt_cntl |= (MRDCKA0_RESET |
  1619. MRDCKA1_RESET |
  1620. MRDCKB0_RESET |
  1621. MRDCKB1_RESET |
  1622. MRDCKC0_RESET |
  1623. MRDCKC1_RESET |
  1624. MRDCKD0_RESET |
  1625. MRDCKD1_RESET);
  1626. mclk_pwrmgt_cntl &= ~(MRDCKA0_PDNB |
  1627. MRDCKA1_PDNB |
  1628. MRDCKB0_PDNB |
  1629. MRDCKB1_PDNB |
  1630. MRDCKC0_PDNB |
  1631. MRDCKC1_PDNB |
  1632. MRDCKD0_PDNB |
  1633. MRDCKD1_PDNB);
  1634. dll_cntl |= (MRDCKA0_BYPASS |
  1635. MRDCKA1_BYPASS |
  1636. MRDCKB0_BYPASS |
  1637. MRDCKB1_BYPASS |
  1638. MRDCKC0_BYPASS |
  1639. MRDCKC1_BYPASS |
  1640. MRDCKD0_BYPASS |
  1641. MRDCKD1_BYPASS);
  1642. spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
  1643. spll_func_cntl_2 |= SCLK_MUX_SEL(4);
  1644. table->ACPIState.levels[0].mclk.vMPLL_AD_FUNC_CNTL = cpu_to_be32(mpll_ad_func_cntl);
  1645. table->ACPIState.levels[0].mclk.vMPLL_AD_FUNC_CNTL_2 = cpu_to_be32(mpll_ad_func_cntl_2);
  1646. table->ACPIState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL = cpu_to_be32(mpll_dq_func_cntl);
  1647. table->ACPIState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL_2 = cpu_to_be32(mpll_dq_func_cntl_2);
  1648. table->ACPIState.levels[0].mclk.vMCLK_PWRMGT_CNTL = cpu_to_be32(mclk_pwrmgt_cntl);
  1649. table->ACPIState.levels[0].mclk.vDLL_CNTL = cpu_to_be32(dll_cntl);
  1650. table->ACPIState.levels[0].mclk.mclk_value = 0;
  1651. table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL = cpu_to_be32(spll_func_cntl);
  1652. table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 = cpu_to_be32(spll_func_cntl_2);
  1653. table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 = cpu_to_be32(spll_func_cntl_3);
  1654. table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_4 = cpu_to_be32(spll_func_cntl_4);
  1655. table->ACPIState.levels[0].sclk.sclk_value = 0;
  1656. ni_populate_mvdd_value(rdev, 0, &table->ACPIState.levels[0].mvdd);
  1657. if (eg_pi->dynamic_ac_timing)
  1658. table->ACPIState.levels[0].ACIndex = 1;
  1659. table->ACPIState.levels[0].dpm2.MaxPS = 0;
  1660. table->ACPIState.levels[0].dpm2.NearTDPDec = 0;
  1661. table->ACPIState.levels[0].dpm2.AboveSafeInc = 0;
  1662. table->ACPIState.levels[0].dpm2.BelowSafeInc = 0;
  1663. reg = MIN_POWER_MASK | MAX_POWER_MASK;
  1664. table->ACPIState.levels[0].SQPowerThrottle = cpu_to_be32(reg);
  1665. reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
  1666. table->ACPIState.levels[0].SQPowerThrottle_2 = cpu_to_be32(reg);
  1667. return 0;
  1668. }
  1669. static int ni_init_smc_table(struct radeon_device *rdev)
  1670. {
  1671. struct rv7xx_power_info *pi = rv770_get_pi(rdev);
  1672. struct ni_power_info *ni_pi = ni_get_pi(rdev);
  1673. int ret;
  1674. struct radeon_ps *radeon_boot_state = rdev->pm.dpm.boot_ps;
  1675. NISLANDS_SMC_STATETABLE *table = &ni_pi->smc_statetable;
  1676. memset(table, 0, sizeof(NISLANDS_SMC_STATETABLE));
  1677. ni_populate_smc_voltage_tables(rdev, table);
  1678. switch (rdev->pm.int_thermal_type) {
  1679. case THERMAL_TYPE_NI:
  1680. case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
  1681. table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_INTERNAL;
  1682. break;
  1683. case THERMAL_TYPE_NONE:
  1684. table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_NONE;
  1685. break;
  1686. default:
  1687. table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL;
  1688. break;
  1689. }
  1690. if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC)
  1691. table->systemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
  1692. if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT)
  1693. table->systemFlags |= PPSMC_SYSTEMFLAG_REGULATOR_HOT;
  1694. if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
  1695. table->systemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
  1696. if (pi->mem_gddr5)
  1697. table->systemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
  1698. ret = ni_populate_smc_initial_state(rdev, radeon_boot_state, table);
  1699. if (ret)
  1700. return ret;
  1701. ret = ni_populate_smc_acpi_state(rdev, table);
  1702. if (ret)
  1703. return ret;
  1704. table->driverState = table->initialState;
  1705. table->ULVState = table->initialState;
  1706. ret = ni_do_program_memory_timing_parameters(rdev, radeon_boot_state,
  1707. NISLANDS_INITIAL_STATE_ARB_INDEX);
  1708. if (ret)
  1709. return ret;
  1710. return rv770_copy_bytes_to_smc(rdev, pi->state_table_start, (u8 *)table,
  1711. sizeof(NISLANDS_SMC_STATETABLE), pi->sram_end);
  1712. }
  1713. static int ni_calculate_sclk_params(struct radeon_device *rdev,
  1714. u32 engine_clock,
  1715. NISLANDS_SMC_SCLK_VALUE *sclk)
  1716. {
  1717. struct rv7xx_power_info *pi = rv770_get_pi(rdev);
  1718. struct ni_power_info *ni_pi = ni_get_pi(rdev);
  1719. struct atom_clock_dividers dividers;
  1720. u32 spll_func_cntl = ni_pi->clock_registers.cg_spll_func_cntl;
  1721. u32 spll_func_cntl_2 = ni_pi->clock_registers.cg_spll_func_cntl_2;
  1722. u32 spll_func_cntl_3 = ni_pi->clock_registers.cg_spll_func_cntl_3;
  1723. u32 spll_func_cntl_4 = ni_pi->clock_registers.cg_spll_func_cntl_4;
  1724. u32 cg_spll_spread_spectrum = ni_pi->clock_registers.cg_spll_spread_spectrum;
  1725. u32 cg_spll_spread_spectrum_2 = ni_pi->clock_registers.cg_spll_spread_spectrum_2;
  1726. u64 tmp;
  1727. u32 reference_clock = rdev->clock.spll.reference_freq;
  1728. u32 reference_divider;
  1729. u32 fbdiv;
  1730. int ret;
  1731. ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
  1732. engine_clock, false, &dividers);
  1733. if (ret)
  1734. return ret;
  1735. reference_divider = 1 + dividers.ref_div;
  1736. tmp = (u64) engine_clock * reference_divider * dividers.post_div * 16834;
  1737. do_div(tmp, reference_clock);
  1738. fbdiv = (u32) tmp;
  1739. spll_func_cntl &= ~(SPLL_PDIV_A_MASK | SPLL_REF_DIV_MASK);
  1740. spll_func_cntl |= SPLL_REF_DIV(dividers.ref_div);
  1741. spll_func_cntl |= SPLL_PDIV_A(dividers.post_div);
  1742. spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
  1743. spll_func_cntl_2 |= SCLK_MUX_SEL(2);
  1744. spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK;
  1745. spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv);
  1746. spll_func_cntl_3 |= SPLL_DITHEN;
  1747. if (pi->sclk_ss) {
  1748. struct radeon_atom_ss ss;
  1749. u32 vco_freq = engine_clock * dividers.post_div;
  1750. if (radeon_atombios_get_asic_ss_info(rdev, &ss,
  1751. ASIC_INTERNAL_ENGINE_SS, vco_freq)) {
  1752. u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);
  1753. u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000);
  1754. cg_spll_spread_spectrum &= ~CLK_S_MASK;
  1755. cg_spll_spread_spectrum |= CLK_S(clk_s);
  1756. cg_spll_spread_spectrum |= SSEN;
  1757. cg_spll_spread_spectrum_2 &= ~CLK_V_MASK;
  1758. cg_spll_spread_spectrum_2 |= CLK_V(clk_v);
  1759. }
  1760. }
  1761. sclk->sclk_value = engine_clock;
  1762. sclk->vCG_SPLL_FUNC_CNTL = spll_func_cntl;
  1763. sclk->vCG_SPLL_FUNC_CNTL_2 = spll_func_cntl_2;
  1764. sclk->vCG_SPLL_FUNC_CNTL_3 = spll_func_cntl_3;
  1765. sclk->vCG_SPLL_FUNC_CNTL_4 = spll_func_cntl_4;
  1766. sclk->vCG_SPLL_SPREAD_SPECTRUM = cg_spll_spread_spectrum;
  1767. sclk->vCG_SPLL_SPREAD_SPECTRUM_2 = cg_spll_spread_spectrum_2;
  1768. return 0;
  1769. }
  1770. static int ni_populate_sclk_value(struct radeon_device *rdev,
  1771. u32 engine_clock,
  1772. NISLANDS_SMC_SCLK_VALUE *sclk)
  1773. {
  1774. NISLANDS_SMC_SCLK_VALUE sclk_tmp;
  1775. int ret;
  1776. ret = ni_calculate_sclk_params(rdev, engine_clock, &sclk_tmp);
  1777. if (!ret) {
  1778. sclk->sclk_value = cpu_to_be32(sclk_tmp.sclk_value);
  1779. sclk->vCG_SPLL_FUNC_CNTL = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL);
  1780. sclk->vCG_SPLL_FUNC_CNTL_2 = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL_2);
  1781. sclk->vCG_SPLL_FUNC_CNTL_3 = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL_3);
  1782. sclk->vCG_SPLL_FUNC_CNTL_4 = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL_4);
  1783. sclk->vCG_SPLL_SPREAD_SPECTRUM = cpu_to_be32(sclk_tmp.vCG_SPLL_SPREAD_SPECTRUM);
  1784. sclk->vCG_SPLL_SPREAD_SPECTRUM_2 = cpu_to_be32(sclk_tmp.vCG_SPLL_SPREAD_SPECTRUM_2);
  1785. }
  1786. return ret;
  1787. }
  1788. static int ni_init_smc_spll_table(struct radeon_device *rdev)
  1789. {
  1790. struct rv7xx_power_info *pi = rv770_get_pi(rdev);
  1791. struct ni_power_info *ni_pi = ni_get_pi(rdev);
  1792. SMC_NISLANDS_SPLL_DIV_TABLE *spll_table;
  1793. NISLANDS_SMC_SCLK_VALUE sclk_params;
  1794. u32 fb_div;
  1795. u32 p_div;
  1796. u32 clk_s;
  1797. u32 clk_v;
  1798. u32 sclk = 0;
  1799. int i, ret;
  1800. u32 tmp;
  1801. if (ni_pi->spll_table_start == 0)
  1802. return -EINVAL;
  1803. spll_table = kzalloc(sizeof(SMC_NISLANDS_SPLL_DIV_TABLE), GFP_KERNEL);
  1804. if (spll_table == NULL)
  1805. return -ENOMEM;
  1806. for (i = 0; i < 256; i++) {
  1807. ret = ni_calculate_sclk_params(rdev, sclk, &sclk_params);
  1808. if (ret)
  1809. break;
  1810. p_div = (sclk_params.vCG_SPLL_FUNC_CNTL & SPLL_PDIV_A_MASK) >> SPLL_PDIV_A_SHIFT;
  1811. fb_div = (sclk_params.vCG_SPLL_FUNC_CNTL_3 & SPLL_FB_DIV_MASK) >> SPLL_FB_DIV_SHIFT;
  1812. clk_s = (sclk_params.vCG_SPLL_SPREAD_SPECTRUM & CLK_S_MASK) >> CLK_S_SHIFT;
  1813. clk_v = (sclk_params.vCG_SPLL_SPREAD_SPECTRUM_2 & CLK_V_MASK) >> CLK_V_SHIFT;
  1814. fb_div &= ~0x00001FFF;
  1815. fb_div >>= 1;
  1816. clk_v >>= 6;
  1817. if (p_div & ~(SMC_NISLANDS_SPLL_DIV_TABLE_PDIV_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_PDIV_SHIFT))
  1818. ret = -EINVAL;
  1819. if (clk_s & ~(SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT))
  1820. ret = -EINVAL;
  1821. if (clk_s & ~(SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT))
  1822. ret = -EINVAL;
  1823. if (clk_v & ~(SMC_NISLANDS_SPLL_DIV_TABLE_CLKV_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT))
  1824. ret = -EINVAL;
  1825. if (ret)
  1826. break;
  1827. tmp = ((fb_div << SMC_NISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT) & SMC_NISLANDS_SPLL_DIV_TABLE_FBDIV_MASK) |
  1828. ((p_div << SMC_NISLANDS_SPLL_DIV_TABLE_PDIV_SHIFT) & SMC_NISLANDS_SPLL_DIV_TABLE_PDIV_MASK);
  1829. spll_table->freq[i] = cpu_to_be32(tmp);
  1830. tmp = ((clk_v << SMC_NISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT) & SMC_NISLANDS_SPLL_DIV_TABLE_CLKV_MASK) |
  1831. ((clk_s << SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT) & SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_MASK);
  1832. spll_table->ss[i] = cpu_to_be32(tmp);
  1833. sclk += 512;
  1834. }
  1835. if (!ret)
  1836. ret = rv770_copy_bytes_to_smc(rdev, ni_pi->spll_table_start, (u8 *)spll_table,
  1837. sizeof(SMC_NISLANDS_SPLL_DIV_TABLE), pi->sram_end);
  1838. kfree(spll_table);
  1839. return ret;
  1840. }
  1841. static int ni_populate_mclk_value(struct radeon_device *rdev,
  1842. u32 engine_clock,
  1843. u32 memory_clock,
  1844. NISLANDS_SMC_MCLK_VALUE *mclk,
  1845. bool strobe_mode,
  1846. bool dll_state_on)
  1847. {
  1848. struct rv7xx_power_info *pi = rv770_get_pi(rdev);
  1849. struct ni_power_info *ni_pi = ni_get_pi(rdev);
  1850. u32 mpll_ad_func_cntl = ni_pi->clock_registers.mpll_ad_func_cntl;
  1851. u32 mpll_ad_func_cntl_2 = ni_pi->clock_registers.mpll_ad_func_cntl_2;
  1852. u32 mpll_dq_func_cntl = ni_pi->clock_registers.mpll_dq_func_cntl;
  1853. u32 mpll_dq_func_cntl_2 = ni_pi->clock_registers.mpll_dq_func_cntl_2;
  1854. u32 mclk_pwrmgt_cntl = ni_pi->clock_registers.mclk_pwrmgt_cntl;
  1855. u32 dll_cntl = ni_pi->clock_registers.dll_cntl;
  1856. u32 mpll_ss1 = ni_pi->clock_registers.mpll_ss1;
  1857. u32 mpll_ss2 = ni_pi->clock_registers.mpll_ss2;
  1858. struct atom_clock_dividers dividers;
  1859. u32 ibias;
  1860. u32 dll_speed;
  1861. int ret;
  1862. u32 mc_seq_misc7;
  1863. ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_MEMORY_PLL_PARAM,
  1864. memory_clock, strobe_mode, &dividers);
  1865. if (ret)
  1866. return ret;
  1867. if (!strobe_mode) {
  1868. mc_seq_misc7 = RREG32(MC_SEQ_MISC7);
  1869. if (mc_seq_misc7 & 0x8000000)
  1870. dividers.post_div = 1;
  1871. }
  1872. ibias = cypress_map_clkf_to_ibias(rdev, dividers.whole_fb_div);
  1873. mpll_ad_func_cntl &= ~(CLKR_MASK |
  1874. YCLK_POST_DIV_MASK |
  1875. CLKF_MASK |
  1876. CLKFRAC_MASK |
  1877. IBIAS_MASK);
  1878. mpll_ad_func_cntl |= CLKR(dividers.ref_div);
  1879. mpll_ad_func_cntl |= YCLK_POST_DIV(dividers.post_div);
  1880. mpll_ad_func_cntl |= CLKF(dividers.whole_fb_div);
  1881. mpll_ad_func_cntl |= CLKFRAC(dividers.frac_fb_div);
  1882. mpll_ad_func_cntl |= IBIAS(ibias);
  1883. if (dividers.vco_mode)
  1884. mpll_ad_func_cntl_2 |= VCO_MODE;
  1885. else
  1886. mpll_ad_func_cntl_2 &= ~VCO_MODE;
  1887. if (pi->mem_gddr5) {
  1888. mpll_dq_func_cntl &= ~(CLKR_MASK |
  1889. YCLK_POST_DIV_MASK |
  1890. CLKF_MASK |
  1891. CLKFRAC_MASK |
  1892. IBIAS_MASK);
  1893. mpll_dq_func_cntl |= CLKR(dividers.ref_div);
  1894. mpll_dq_func_cntl |= YCLK_POST_DIV(dividers.post_div);
  1895. mpll_dq_func_cntl |= CLKF(dividers.whole_fb_div);
  1896. mpll_dq_func_cntl |= CLKFRAC(dividers.frac_fb_div);
  1897. mpll_dq_func_cntl |= IBIAS(ibias);
  1898. if (strobe_mode)
  1899. mpll_dq_func_cntl &= ~PDNB;
  1900. else
  1901. mpll_dq_func_cntl |= PDNB;
  1902. if (dividers.vco_mode)
  1903. mpll_dq_func_cntl_2 |= VCO_MODE;
  1904. else
  1905. mpll_dq_func_cntl_2 &= ~VCO_MODE;
  1906. }
  1907. if (pi->mclk_ss) {
  1908. struct radeon_atom_ss ss;
  1909. u32 vco_freq = memory_clock * dividers.post_div;
  1910. if (radeon_atombios_get_asic_ss_info(rdev, &ss,
  1911. ASIC_INTERNAL_MEMORY_SS, vco_freq)) {
  1912. u32 reference_clock = rdev->clock.mpll.reference_freq;
  1913. u32 decoded_ref = rv740_get_decoded_reference_divider(dividers.ref_div);
  1914. u32 clk_s = reference_clock * 5 / (decoded_ref * ss.rate);
  1915. u32 clk_v = ss.percentage *
  1916. (0x4000 * dividers.whole_fb_div + 0x800 * dividers.frac_fb_div) / (clk_s * 625);
  1917. mpll_ss1 &= ~CLKV_MASK;
  1918. mpll_ss1 |= CLKV(clk_v);
  1919. mpll_ss2 &= ~CLKS_MASK;
  1920. mpll_ss2 |= CLKS(clk_s);
  1921. }
  1922. }
  1923. dll_speed = rv740_get_dll_speed(pi->mem_gddr5,
  1924. memory_clock);
  1925. mclk_pwrmgt_cntl &= ~DLL_SPEED_MASK;
  1926. mclk_pwrmgt_cntl |= DLL_SPEED(dll_speed);
  1927. if (dll_state_on)
  1928. mclk_pwrmgt_cntl |= (MRDCKA0_PDNB |
  1929. MRDCKA1_PDNB |
  1930. MRDCKB0_PDNB |
  1931. MRDCKB1_PDNB |
  1932. MRDCKC0_PDNB |
  1933. MRDCKC1_PDNB |
  1934. MRDCKD0_PDNB |
  1935. MRDCKD1_PDNB);
  1936. else
  1937. mclk_pwrmgt_cntl &= ~(MRDCKA0_PDNB |
  1938. MRDCKA1_PDNB |
  1939. MRDCKB0_PDNB |
  1940. MRDCKB1_PDNB |
  1941. MRDCKC0_PDNB |
  1942. MRDCKC1_PDNB |
  1943. MRDCKD0_PDNB |
  1944. MRDCKD1_PDNB);
  1945. mclk->mclk_value = cpu_to_be32(memory_clock);
  1946. mclk->vMPLL_AD_FUNC_CNTL = cpu_to_be32(mpll_ad_func_cntl);
  1947. mclk->vMPLL_AD_FUNC_CNTL_2 = cpu_to_be32(mpll_ad_func_cntl_2);
  1948. mclk->vMPLL_DQ_FUNC_CNTL = cpu_to_be32(mpll_dq_func_cntl);
  1949. mclk->vMPLL_DQ_FUNC_CNTL_2 = cpu_to_be32(mpll_dq_func_cntl_2);
  1950. mclk->vMCLK_PWRMGT_CNTL = cpu_to_be32(mclk_pwrmgt_cntl);
  1951. mclk->vDLL_CNTL = cpu_to_be32(dll_cntl);
  1952. mclk->vMPLL_SS = cpu_to_be32(mpll_ss1);
  1953. mclk->vMPLL_SS2 = cpu_to_be32(mpll_ss2);
  1954. return 0;
  1955. }
  1956. static void ni_populate_smc_sp(struct radeon_device *rdev,
  1957. struct radeon_ps *radeon_state,
  1958. NISLANDS_SMC_SWSTATE *smc_state)
  1959. {
  1960. struct ni_ps *ps = ni_get_ps(radeon_state);
  1961. struct rv7xx_power_info *pi = rv770_get_pi(rdev);
  1962. int i;
  1963. for (i = 0; i < ps->performance_level_count - 1; i++)
  1964. smc_state->levels[i].bSP = cpu_to_be32(pi->dsp);
  1965. smc_state->levels[ps->performance_level_count - 1].bSP =
  1966. cpu_to_be32(pi->psp);
  1967. }
  1968. static int ni_convert_power_level_to_smc(struct radeon_device *rdev,
  1969. struct rv7xx_pl *pl,
  1970. NISLANDS_SMC_HW_PERFORMANCE_LEVEL *level)
  1971. {
  1972. struct rv7xx_power_info *pi = rv770_get_pi(rdev);
  1973. struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
  1974. struct ni_power_info *ni_pi = ni_get_pi(rdev);
  1975. int ret;
  1976. bool dll_state_on;
  1977. u16 std_vddc;
  1978. u32 tmp = RREG32(DC_STUTTER_CNTL);
  1979. level->gen2PCIE = pi->pcie_gen2 ?
  1980. ((pl->flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2) ? 1 : 0) : 0;
  1981. ret = ni_populate_sclk_value(rdev, pl->sclk, &level->sclk);
  1982. if (ret)
  1983. return ret;
  1984. level->mcFlags = 0;
  1985. if (pi->mclk_stutter_mode_threshold &&
  1986. (pl->mclk <= pi->mclk_stutter_mode_threshold) &&
  1987. !eg_pi->uvd_enabled &&
  1988. (tmp & DC_STUTTER_ENABLE_A) &&
  1989. (tmp & DC_STUTTER_ENABLE_B))
  1990. level->mcFlags |= NISLANDS_SMC_MC_STUTTER_EN;
  1991. if (pi->mem_gddr5) {
  1992. if (pl->mclk > pi->mclk_edc_enable_threshold)
  1993. level->mcFlags |= NISLANDS_SMC_MC_EDC_RD_FLAG;
  1994. if (pl->mclk > eg_pi->mclk_edc_wr_enable_threshold)
  1995. level->mcFlags |= NISLANDS_SMC_MC_EDC_WR_FLAG;
  1996. level->strobeMode = cypress_get_strobe_mode_settings(rdev, pl->mclk);
  1997. if (level->strobeMode & NISLANDS_SMC_STROBE_ENABLE) {
  1998. if (cypress_get_mclk_frequency_ratio(rdev, pl->mclk, true) >=
  1999. ((RREG32(MC_SEQ_MISC7) >> 16) & 0xf))
  2000. dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
  2001. else
  2002. dll_state_on = ((RREG32(MC_SEQ_MISC6) >> 1) & 0x1) ? true : false;
  2003. } else {
  2004. dll_state_on = false;
  2005. if (pl->mclk > ni_pi->mclk_rtt_mode_threshold)
  2006. level->mcFlags |= NISLANDS_SMC_MC_RTT_ENABLE;
  2007. }
  2008. ret = ni_populate_mclk_value(rdev, pl->sclk, pl->mclk,
  2009. &level->mclk,
  2010. (level->strobeMode & NISLANDS_SMC_STROBE_ENABLE) != 0,
  2011. dll_state_on);
  2012. } else
  2013. ret = ni_populate_mclk_value(rdev, pl->sclk, pl->mclk, &level->mclk, 1, 1);
  2014. if (ret)
  2015. return ret;
  2016. ret = ni_populate_voltage_value(rdev, &eg_pi->vddc_voltage_table,
  2017. pl->vddc, &level->vddc);
  2018. if (ret)
  2019. return ret;
  2020. ret = ni_get_std_voltage_value(rdev, &level->vddc, &std_vddc);
  2021. if (ret)
  2022. return ret;
  2023. ni_populate_std_voltage_value(rdev, std_vddc,
  2024. level->vddc.index, &level->std_vddc);
  2025. if (eg_pi->vddci_control) {
  2026. ret = ni_populate_voltage_value(rdev, &eg_pi->vddci_voltage_table,
  2027. pl->vddci, &level->vddci);
  2028. if (ret)
  2029. return ret;
  2030. }
  2031. ni_populate_mvdd_value(rdev, pl->mclk, &level->mvdd);
  2032. return ret;
  2033. }
  2034. static int ni_populate_smc_t(struct radeon_device *rdev,
  2035. struct radeon_ps *radeon_state,
  2036. NISLANDS_SMC_SWSTATE *smc_state)
  2037. {
  2038. struct rv7xx_power_info *pi = rv770_get_pi(rdev);
  2039. struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
  2040. struct ni_ps *state = ni_get_ps(radeon_state);
  2041. u32 a_t;
  2042. u32 t_l, t_h;
  2043. u32 high_bsp;
  2044. int i, ret;
  2045. if (state->performance_level_count >= 9)
  2046. return -EINVAL;
  2047. if (state->performance_level_count < 2) {
  2048. a_t = CG_R(0xffff) | CG_L(0);
  2049. smc_state->levels[0].aT = cpu_to_be32(a_t);
  2050. return 0;
  2051. }
  2052. smc_state->levels[0].aT = cpu_to_be32(0);
  2053. for (i = 0; i <= state->performance_level_count - 2; i++) {
  2054. if (eg_pi->uvd_enabled)
  2055. ret = r600_calculate_at(
  2056. 1000 * (i * (eg_pi->smu_uvd_hs ? 2 : 8) + 2),
  2057. 100 * R600_AH_DFLT,
  2058. state->performance_levels[i + 1].sclk,
  2059. state->performance_levels[i].sclk,
  2060. &t_l,
  2061. &t_h);
  2062. else
  2063. ret = r600_calculate_at(
  2064. 1000 * (i + 1),
  2065. 100 * R600_AH_DFLT,
  2066. state->performance_levels[i + 1].sclk,
  2067. state->performance_levels[i].sclk,
  2068. &t_l,
  2069. &t_h);
  2070. if (ret) {
  2071. t_h = (i + 1) * 1000 - 50 * R600_AH_DFLT;
  2072. t_l = (i + 1) * 1000 + 50 * R600_AH_DFLT;
  2073. }
  2074. a_t = be32_to_cpu(smc_state->levels[i].aT) & ~CG_R_MASK;
  2075. a_t |= CG_R(t_l * pi->bsp / 20000);
  2076. smc_state->levels[i].aT = cpu_to_be32(a_t);
  2077. high_bsp = (i == state->performance_level_count - 2) ?
  2078. pi->pbsp : pi->bsp;
  2079. a_t = CG_R(0xffff) | CG_L(t_h * high_bsp / 20000);
  2080. smc_state->levels[i + 1].aT = cpu_to_be32(a_t);
  2081. }
  2082. return 0;
  2083. }
  2084. static int ni_populate_power_containment_values(struct radeon_device *rdev,
  2085. struct radeon_ps *radeon_state,
  2086. NISLANDS_SMC_SWSTATE *smc_state)
  2087. {
  2088. struct rv7xx_power_info *pi = rv770_get_pi(rdev);
  2089. struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
  2090. struct ni_power_info *ni_pi = ni_get_pi(rdev);
  2091. struct ni_ps *state = ni_get_ps(radeon_state);
  2092. u32 prev_sclk;
  2093. u32 max_sclk;
  2094. u32 min_sclk;
  2095. int i, ret;
  2096. u32 tdp_limit;
  2097. u32 near_tdp_limit;
  2098. u32 power_boost_limit;
  2099. u8 max_ps_percent;
  2100. if (ni_pi->enable_power_containment == false)
  2101. return 0;
  2102. if (state->performance_level_count == 0)
  2103. return -EINVAL;
  2104. if (smc_state->levelCount != state->performance_level_count)
  2105. return -EINVAL;
  2106. ret = ni_calculate_adjusted_tdp_limits(rdev,
  2107. false, /* ??? */
  2108. rdev->pm.dpm.tdp_adjustment,
  2109. &tdp_limit,
  2110. &near_tdp_limit);
  2111. if (ret)
  2112. return ret;
  2113. power_boost_limit = ni_calculate_power_boost_limit(rdev, radeon_state, near_tdp_limit);
  2114. ret = rv770_write_smc_sram_dword(rdev,
  2115. pi->state_table_start +
  2116. offsetof(NISLANDS_SMC_STATETABLE, dpm2Params) +
  2117. offsetof(PP_NIslands_DPM2Parameters, PowerBoostLimit),
  2118. ni_scale_power_for_smc(power_boost_limit, ni_get_smc_power_scaling_factor(rdev)),
  2119. pi->sram_end);
  2120. if (ret)
  2121. power_boost_limit = 0;
  2122. smc_state->levels[0].dpm2.MaxPS = 0;
  2123. smc_state->levels[0].dpm2.NearTDPDec = 0;
  2124. smc_state->levels[0].dpm2.AboveSafeInc = 0;
  2125. smc_state->levels[0].dpm2.BelowSafeInc = 0;
  2126. smc_state->levels[0].stateFlags |= power_boost_limit ? PPSMC_STATEFLAG_POWERBOOST : 0;
  2127. for (i = 1; i < state->performance_level_count; i++) {
  2128. prev_sclk = state->performance_levels[i-1].sclk;
  2129. max_sclk = state->performance_levels[i].sclk;
  2130. max_ps_percent = (i != (state->performance_level_count - 1)) ?
  2131. NISLANDS_DPM2_MAXPS_PERCENT_M : NISLANDS_DPM2_MAXPS_PERCENT_H;
  2132. if (max_sclk < prev_sclk)
  2133. return -EINVAL;
  2134. if ((max_ps_percent == 0) || (prev_sclk == max_sclk) || eg_pi->uvd_enabled)
  2135. min_sclk = max_sclk;
  2136. else if (1 == i)
  2137. min_sclk = prev_sclk;
  2138. else
  2139. min_sclk = (prev_sclk * (u32)max_ps_percent) / 100;
  2140. if (min_sclk < state->performance_levels[0].sclk)
  2141. min_sclk = state->performance_levels[0].sclk;
  2142. if (min_sclk == 0)
  2143. return -EINVAL;
  2144. smc_state->levels[i].dpm2.MaxPS =
  2145. (u8)((NISLANDS_DPM2_MAX_PULSE_SKIP * (max_sclk - min_sclk)) / max_sclk);
  2146. smc_state->levels[i].dpm2.NearTDPDec = NISLANDS_DPM2_NEAR_TDP_DEC;
  2147. smc_state->levels[i].dpm2.AboveSafeInc = NISLANDS_DPM2_ABOVE_SAFE_INC;
  2148. smc_state->levels[i].dpm2.BelowSafeInc = NISLANDS_DPM2_BELOW_SAFE_INC;
  2149. smc_state->levels[i].stateFlags |=
  2150. ((i != (state->performance_level_count - 1)) && power_boost_limit) ?
  2151. PPSMC_STATEFLAG_POWERBOOST : 0;
  2152. }
  2153. return 0;
  2154. }
  2155. static int ni_populate_sq_ramping_values(struct radeon_device *rdev,
  2156. struct radeon_ps *radeon_state,
  2157. NISLANDS_SMC_SWSTATE *smc_state)
  2158. {
  2159. struct ni_power_info *ni_pi = ni_get_pi(rdev);
  2160. struct ni_ps *state = ni_get_ps(radeon_state);
  2161. u32 sq_power_throttle;
  2162. u32 sq_power_throttle2;
  2163. bool enable_sq_ramping = ni_pi->enable_sq_ramping;
  2164. int i;
  2165. if (state->performance_level_count == 0)
  2166. return -EINVAL;
  2167. if (smc_state->levelCount != state->performance_level_count)
  2168. return -EINVAL;
  2169. if (rdev->pm.dpm.sq_ramping_threshold == 0)
  2170. return -EINVAL;
  2171. if (NISLANDS_DPM2_SQ_RAMP_MAX_POWER > (MAX_POWER_MASK >> MAX_POWER_SHIFT))
  2172. enable_sq_ramping = false;
  2173. if (NISLANDS_DPM2_SQ_RAMP_MIN_POWER > (MIN_POWER_MASK >> MIN_POWER_SHIFT))
  2174. enable_sq_ramping = false;
  2175. if (NISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA > (MAX_POWER_DELTA_MASK >> MAX_POWER_DELTA_SHIFT))
  2176. enable_sq_ramping = false;
  2177. if (NISLANDS_DPM2_SQ_RAMP_STI_SIZE > (STI_SIZE_MASK >> STI_SIZE_SHIFT))
  2178. enable_sq_ramping = false;
  2179. if (NISLANDS_DPM2_SQ_RAMP_LTI_RATIO <= (LTI_RATIO_MASK >> LTI_RATIO_SHIFT))
  2180. enable_sq_ramping = false;
  2181. for (i = 0; i < state->performance_level_count; i++) {
  2182. sq_power_throttle = 0;
  2183. sq_power_throttle2 = 0;
  2184. if ((state->performance_levels[i].sclk >= rdev->pm.dpm.sq_ramping_threshold) &&
  2185. enable_sq_ramping) {
  2186. sq_power_throttle |= MAX_POWER(NISLANDS_DPM2_SQ_RAMP_MAX_POWER);
  2187. sq_power_throttle |= MIN_POWER(NISLANDS_DPM2_SQ_RAMP_MIN_POWER);
  2188. sq_power_throttle2 |= MAX_POWER_DELTA(NISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA);
  2189. sq_power_throttle2 |= STI_SIZE(NISLANDS_DPM2_SQ_RAMP_STI_SIZE);
  2190. sq_power_throttle2 |= LTI_RATIO(NISLANDS_DPM2_SQ_RAMP_LTI_RATIO);
  2191. } else {
  2192. sq_power_throttle |= MAX_POWER_MASK | MIN_POWER_MASK;
  2193. sq_power_throttle2 |= MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
  2194. }
  2195. smc_state->levels[i].SQPowerThrottle = cpu_to_be32(sq_power_throttle);
  2196. smc_state->levels[i].SQPowerThrottle_2 = cpu_to_be32(sq_power_throttle2);
  2197. }
  2198. return 0;
  2199. }
  2200. static int ni_enable_power_containment(struct radeon_device *rdev,
  2201. struct radeon_ps *radeon_new_state,
  2202. bool enable)
  2203. {
  2204. struct ni_power_info *ni_pi = ni_get_pi(rdev);
  2205. PPSMC_Result smc_result;
  2206. int ret = 0;
  2207. if (ni_pi->enable_power_containment) {
  2208. if (enable) {
  2209. if (!r600_is_uvd_state(radeon_new_state->class, radeon_new_state->class2)) {
  2210. smc_result = rv770_send_msg_to_smc(rdev, PPSMC_TDPClampingActive);
  2211. if (smc_result != PPSMC_Result_OK) {
  2212. ret = -EINVAL;
  2213. ni_pi->pc_enabled = false;
  2214. } else {
  2215. ni_pi->pc_enabled = true;
  2216. }
  2217. }
  2218. } else {
  2219. smc_result = rv770_send_msg_to_smc(rdev, PPSMC_TDPClampingInactive);
  2220. if (smc_result != PPSMC_Result_OK)
  2221. ret = -EINVAL;
  2222. ni_pi->pc_enabled = false;
  2223. }
  2224. }
  2225. return ret;
  2226. }
  2227. static int ni_convert_power_state_to_smc(struct radeon_device *rdev,
  2228. struct radeon_ps *radeon_state,
  2229. NISLANDS_SMC_SWSTATE *smc_state)
  2230. {
  2231. struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
  2232. struct ni_power_info *ni_pi = ni_get_pi(rdev);
  2233. struct ni_ps *state = ni_get_ps(radeon_state);
  2234. int i, ret;
  2235. u32 threshold = state->performance_levels[state->performance_level_count - 1].sclk * 100 / 100;
  2236. if (!(radeon_state->caps & ATOM_PPLIB_DISALLOW_ON_DC))
  2237. smc_state->flags |= PPSMC_SWSTATE_FLAG_DC;
  2238. smc_state->levelCount = 0;
  2239. if (state->performance_level_count > NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE)
  2240. return -EINVAL;
  2241. for (i = 0; i < state->performance_level_count; i++) {
  2242. ret = ni_convert_power_level_to_smc(rdev, &state->performance_levels[i],
  2243. &smc_state->levels[i]);
  2244. smc_state->levels[i].arbRefreshState =
  2245. (u8)(NISLANDS_DRIVER_STATE_ARB_INDEX + i);
  2246. if (ret)
  2247. return ret;
  2248. if (ni_pi->enable_power_containment)
  2249. smc_state->levels[i].displayWatermark =
  2250. (state->performance_levels[i].sclk < threshold) ?
  2251. PPSMC_DISPLAY_WATERMARK_LOW : PPSMC_DISPLAY_WATERMARK_HIGH;
  2252. else
  2253. smc_state->levels[i].displayWatermark = (i < 2) ?
  2254. PPSMC_DISPLAY_WATERMARK_LOW : PPSMC_DISPLAY_WATERMARK_HIGH;
  2255. if (eg_pi->dynamic_ac_timing)
  2256. smc_state->levels[i].ACIndex = NISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT + i;
  2257. else
  2258. smc_state->levels[i].ACIndex = 0;
  2259. smc_state->levelCount++;
  2260. }
  2261. rv770_write_smc_soft_register(rdev, NI_SMC_SOFT_REGISTER_watermark_threshold,
  2262. cpu_to_be32(threshold / 512));
  2263. ni_populate_smc_sp(rdev, radeon_state, smc_state);
  2264. ret = ni_populate_power_containment_values(rdev, radeon_state, smc_state);
  2265. if (ret)
  2266. ni_pi->enable_power_containment = false;
  2267. ret = ni_populate_sq_ramping_values(rdev, radeon_state, smc_state);
  2268. if (ret)
  2269. ni_pi->enable_sq_ramping = false;
  2270. return ni_populate_smc_t(rdev, radeon_state, smc_state);
  2271. }
  2272. static int ni_upload_sw_state(struct radeon_device *rdev,
  2273. struct radeon_ps *radeon_new_state)
  2274. {
  2275. struct rv7xx_power_info *pi = rv770_get_pi(rdev);
  2276. u16 address = pi->state_table_start +
  2277. offsetof(NISLANDS_SMC_STATETABLE, driverState);
  2278. u16 state_size = sizeof(NISLANDS_SMC_SWSTATE) +
  2279. ((NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1) * sizeof(NISLANDS_SMC_HW_PERFORMANCE_LEVEL));
  2280. int ret;
  2281. NISLANDS_SMC_SWSTATE *smc_state = kzalloc(state_size, GFP_KERNEL);
  2282. if (smc_state == NULL)
  2283. return -ENOMEM;
  2284. ret = ni_convert_power_state_to_smc(rdev, radeon_new_state, smc_state);
  2285. if (ret)
  2286. goto done;
  2287. ret = rv770_copy_bytes_to_smc(rdev, address, (u8 *)smc_state, state_size, pi->sram_end);
  2288. done:
  2289. kfree(smc_state);
  2290. return ret;
  2291. }
  2292. static int ni_set_mc_special_registers(struct radeon_device *rdev,
  2293. struct ni_mc_reg_table *table)
  2294. {
  2295. struct rv7xx_power_info *pi = rv770_get_pi(rdev);
  2296. u8 i, j, k;
  2297. u32 temp_reg;
  2298. for (i = 0, j = table->last; i < table->last; i++) {
  2299. switch (table->mc_reg_address[i].s1) {
  2300. case MC_SEQ_MISC1 >> 2:
  2301. if (j >= SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE)
  2302. return -EINVAL;
  2303. temp_reg = RREG32(MC_PMG_CMD_EMRS);
  2304. table->mc_reg_address[j].s1 = MC_PMG_CMD_EMRS >> 2;
  2305. table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
  2306. for (k = 0; k < table->num_entries; k++)
  2307. table->mc_reg_table_entry[k].mc_data[j] =
  2308. ((temp_reg & 0xffff0000)) |
  2309. ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
  2310. j++;
  2311. if (j >= SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE)
  2312. return -EINVAL;
  2313. temp_reg = RREG32(MC_PMG_CMD_MRS);
  2314. table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS >> 2;
  2315. table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS_LP >> 2;
  2316. for(k = 0; k < table->num_entries; k++) {
  2317. table->mc_reg_table_entry[k].mc_data[j] =
  2318. (temp_reg & 0xffff0000) |
  2319. (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
  2320. if (!pi->mem_gddr5)
  2321. table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
  2322. }
  2323. j++;
  2324. if (j > SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE)
  2325. return -EINVAL;
  2326. break;
  2327. case MC_SEQ_RESERVE_M >> 2:
  2328. temp_reg = RREG32(MC_PMG_CMD_MRS1);
  2329. table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS1 >> 2;
  2330. table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
  2331. for (k = 0; k < table->num_entries; k++)
  2332. table->mc_reg_table_entry[k].mc_data[j] =
  2333. (temp_reg & 0xffff0000) |
  2334. (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
  2335. j++;
  2336. if (j > SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE)
  2337. return -EINVAL;
  2338. break;
  2339. default:
  2340. break;
  2341. }
  2342. }
  2343. table->last = j;
  2344. return 0;
  2345. }
  2346. static bool ni_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg)
  2347. {
  2348. bool result = true;
  2349. switch (in_reg) {
  2350. case MC_SEQ_RAS_TIMING >> 2:
  2351. *out_reg = MC_SEQ_RAS_TIMING_LP >> 2;
  2352. break;
  2353. case MC_SEQ_CAS_TIMING >> 2:
  2354. *out_reg = MC_SEQ_CAS_TIMING_LP >> 2;
  2355. break;
  2356. case MC_SEQ_MISC_TIMING >> 2:
  2357. *out_reg = MC_SEQ_MISC_TIMING_LP >> 2;
  2358. break;
  2359. case MC_SEQ_MISC_TIMING2 >> 2:
  2360. *out_reg = MC_SEQ_MISC_TIMING2_LP >> 2;
  2361. break;
  2362. case MC_SEQ_RD_CTL_D0 >> 2:
  2363. *out_reg = MC_SEQ_RD_CTL_D0_LP >> 2;
  2364. break;
  2365. case MC_SEQ_RD_CTL_D1 >> 2:
  2366. *out_reg = MC_SEQ_RD_CTL_D1_LP >> 2;
  2367. break;
  2368. case MC_SEQ_WR_CTL_D0 >> 2:
  2369. *out_reg = MC_SEQ_WR_CTL_D0_LP >> 2;
  2370. break;
  2371. case MC_SEQ_WR_CTL_D1 >> 2:
  2372. *out_reg = MC_SEQ_WR_CTL_D1_LP >> 2;
  2373. break;
  2374. case MC_PMG_CMD_EMRS >> 2:
  2375. *out_reg = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
  2376. break;
  2377. case MC_PMG_CMD_MRS >> 2:
  2378. *out_reg = MC_SEQ_PMG_CMD_MRS_LP >> 2;
  2379. break;
  2380. case MC_PMG_CMD_MRS1 >> 2:
  2381. *out_reg = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
  2382. break;
  2383. case MC_SEQ_PMG_TIMING >> 2:
  2384. *out_reg = MC_SEQ_PMG_TIMING_LP >> 2;
  2385. break;
  2386. case MC_PMG_CMD_MRS2 >> 2:
  2387. *out_reg = MC_SEQ_PMG_CMD_MRS2_LP >> 2;
  2388. break;
  2389. default:
  2390. result = false;
  2391. break;
  2392. }
  2393. return result;
  2394. }
  2395. static void ni_set_valid_flag(struct ni_mc_reg_table *table)
  2396. {
  2397. u8 i, j;
  2398. for (i = 0; i < table->last; i++) {
  2399. for (j = 1; j < table->num_entries; j++) {
  2400. if (table->mc_reg_table_entry[j-1].mc_data[i] != table->mc_reg_table_entry[j].mc_data[i]) {
  2401. table->valid_flag |= 1 << i;
  2402. break;
  2403. }
  2404. }
  2405. }
  2406. }
  2407. static void ni_set_s0_mc_reg_index(struct ni_mc_reg_table *table)
  2408. {
  2409. u32 i;
  2410. u16 address;
  2411. for (i = 0; i < table->last; i++)
  2412. table->mc_reg_address[i].s0 =
  2413. ni_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ?
  2414. address : table->mc_reg_address[i].s1;
  2415. }
  2416. static int ni_copy_vbios_mc_reg_table(struct atom_mc_reg_table *table,
  2417. struct ni_mc_reg_table *ni_table)
  2418. {
  2419. u8 i, j;
  2420. if (table->last > SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE)
  2421. return -EINVAL;
  2422. if (table->num_entries > MAX_AC_TIMING_ENTRIES)
  2423. return -EINVAL;
  2424. for (i = 0; i < table->last; i++)
  2425. ni_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
  2426. ni_table->last = table->last;
  2427. for (i = 0; i < table->num_entries; i++) {
  2428. ni_table->mc_reg_table_entry[i].mclk_max =
  2429. table->mc_reg_table_entry[i].mclk_max;
  2430. for (j = 0; j < table->last; j++)
  2431. ni_table->mc_reg_table_entry[i].mc_data[j] =
  2432. table->mc_reg_table_entry[i].mc_data[j];
  2433. }
  2434. ni_table->num_entries = table->num_entries;
  2435. return 0;
  2436. }
  2437. static int ni_initialize_mc_reg_table(struct radeon_device *rdev)
  2438. {
  2439. struct ni_power_info *ni_pi = ni_get_pi(rdev);
  2440. int ret;
  2441. struct atom_mc_reg_table *table;
  2442. struct ni_mc_reg_table *ni_table = &ni_pi->mc_reg_table;
  2443. u8 module_index = rv770_get_memory_module_index(rdev);
  2444. table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL);
  2445. if (!table)
  2446. return -ENOMEM;
  2447. WREG32(MC_SEQ_RAS_TIMING_LP, RREG32(MC_SEQ_RAS_TIMING));
  2448. WREG32(MC_SEQ_CAS_TIMING_LP, RREG32(MC_SEQ_CAS_TIMING));
  2449. WREG32(MC_SEQ_MISC_TIMING_LP, RREG32(MC_SEQ_MISC_TIMING));
  2450. WREG32(MC_SEQ_MISC_TIMING2_LP, RREG32(MC_SEQ_MISC_TIMING2));
  2451. WREG32(MC_SEQ_PMG_CMD_EMRS_LP, RREG32(MC_PMG_CMD_EMRS));
  2452. WREG32(MC_SEQ_PMG_CMD_MRS_LP, RREG32(MC_PMG_CMD_MRS));
  2453. WREG32(MC_SEQ_PMG_CMD_MRS1_LP, RREG32(MC_PMG_CMD_MRS1));
  2454. WREG32(MC_SEQ_WR_CTL_D0_LP, RREG32(MC_SEQ_WR_CTL_D0));
  2455. WREG32(MC_SEQ_WR_CTL_D1_LP, RREG32(MC_SEQ_WR_CTL_D1));
  2456. WREG32(MC_SEQ_RD_CTL_D0_LP, RREG32(MC_SEQ_RD_CTL_D0));
  2457. WREG32(MC_SEQ_RD_CTL_D1_LP, RREG32(MC_SEQ_RD_CTL_D1));
  2458. WREG32(MC_SEQ_PMG_TIMING_LP, RREG32(MC_SEQ_PMG_TIMING));
  2459. WREG32(MC_SEQ_PMG_CMD_MRS2_LP, RREG32(MC_PMG_CMD_MRS2));
  2460. ret = radeon_atom_init_mc_reg_table(rdev, module_index, table);
  2461. if (ret)
  2462. goto init_mc_done;
  2463. ret = ni_copy_vbios_mc_reg_table(table, ni_table);
  2464. if (ret)
  2465. goto init_mc_done;
  2466. ni_set_s0_mc_reg_index(ni_table);
  2467. ret = ni_set_mc_special_registers(rdev, ni_table);
  2468. if (ret)
  2469. goto init_mc_done;
  2470. ni_set_valid_flag(ni_table);
  2471. init_mc_done:
  2472. kfree(table);
  2473. return ret;
  2474. }
  2475. static void ni_populate_mc_reg_addresses(struct radeon_device *rdev,
  2476. SMC_NIslands_MCRegisters *mc_reg_table)
  2477. {
  2478. struct ni_power_info *ni_pi = ni_get_pi(rdev);
  2479. u32 i, j;
  2480. for (i = 0, j = 0; j < ni_pi->mc_reg_table.last; j++) {
  2481. if (ni_pi->mc_reg_table.valid_flag & (1 << j)) {
  2482. if (i >= SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE)
  2483. break;
  2484. mc_reg_table->address[i].s0 =
  2485. cpu_to_be16(ni_pi->mc_reg_table.mc_reg_address[j].s0);
  2486. mc_reg_table->address[i].s1 =
  2487. cpu_to_be16(ni_pi->mc_reg_table.mc_reg_address[j].s1);
  2488. i++;
  2489. }
  2490. }
  2491. mc_reg_table->last = (u8)i;
  2492. }
  2493. static void ni_convert_mc_registers(struct ni_mc_reg_entry *entry,
  2494. SMC_NIslands_MCRegisterSet *data,
  2495. u32 num_entries, u32 valid_flag)
  2496. {
  2497. u32 i, j;
  2498. for (i = 0, j = 0; j < num_entries; j++) {
  2499. if (valid_flag & (1 << j)) {
  2500. data->value[i] = cpu_to_be32(entry->mc_data[j]);
  2501. i++;
  2502. }
  2503. }
  2504. }
  2505. static void ni_convert_mc_reg_table_entry_to_smc(struct radeon_device *rdev,
  2506. struct rv7xx_pl *pl,
  2507. SMC_NIslands_MCRegisterSet *mc_reg_table_data)
  2508. {
  2509. struct ni_power_info *ni_pi = ni_get_pi(rdev);
  2510. u32 i = 0;
  2511. for (i = 0; i < ni_pi->mc_reg_table.num_entries; i++) {
  2512. if (pl->mclk <= ni_pi->mc_reg_table.mc_reg_table_entry[i].mclk_max)
  2513. break;
  2514. }
  2515. if ((i == ni_pi->mc_reg_table.num_entries) && (i > 0))
  2516. --i;
  2517. ni_convert_mc_registers(&ni_pi->mc_reg_table.mc_reg_table_entry[i],
  2518. mc_reg_table_data,
  2519. ni_pi->mc_reg_table.last,
  2520. ni_pi->mc_reg_table.valid_flag);
  2521. }
  2522. static void ni_convert_mc_reg_table_to_smc(struct radeon_device *rdev,
  2523. struct radeon_ps *radeon_state,
  2524. SMC_NIslands_MCRegisters *mc_reg_table)
  2525. {
  2526. struct ni_ps *state = ni_get_ps(radeon_state);
  2527. int i;
  2528. for (i = 0; i < state->performance_level_count; i++) {
  2529. ni_convert_mc_reg_table_entry_to_smc(rdev,
  2530. &state->performance_levels[i],
  2531. &mc_reg_table->data[NISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT + i]);
  2532. }
  2533. }
  2534. static int ni_populate_mc_reg_table(struct radeon_device *rdev,
  2535. struct radeon_ps *radeon_boot_state)
  2536. {
  2537. struct rv7xx_power_info *pi = rv770_get_pi(rdev);
  2538. struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
  2539. struct ni_power_info *ni_pi = ni_get_pi(rdev);
  2540. struct ni_ps *boot_state = ni_get_ps(radeon_boot_state);
  2541. SMC_NIslands_MCRegisters *mc_reg_table = &ni_pi->smc_mc_reg_table;
  2542. memset(mc_reg_table, 0, sizeof(SMC_NIslands_MCRegisters));
  2543. rv770_write_smc_soft_register(rdev, NI_SMC_SOFT_REGISTER_seq_index, 1);
  2544. ni_populate_mc_reg_addresses(rdev, mc_reg_table);
  2545. ni_convert_mc_reg_table_entry_to_smc(rdev, &boot_state->performance_levels[0],
  2546. &mc_reg_table->data[0]);
  2547. ni_convert_mc_registers(&ni_pi->mc_reg_table.mc_reg_table_entry[0],
  2548. &mc_reg_table->data[1],
  2549. ni_pi->mc_reg_table.last,
  2550. ni_pi->mc_reg_table.valid_flag);
  2551. ni_convert_mc_reg_table_to_smc(rdev, radeon_boot_state, mc_reg_table);
  2552. return rv770_copy_bytes_to_smc(rdev, eg_pi->mc_reg_table_start,
  2553. (u8 *)mc_reg_table,
  2554. sizeof(SMC_NIslands_MCRegisters),
  2555. pi->sram_end);
  2556. }
  2557. static int ni_upload_mc_reg_table(struct radeon_device *rdev,
  2558. struct radeon_ps *radeon_new_state)
  2559. {
  2560. struct rv7xx_power_info *pi = rv770_get_pi(rdev);
  2561. struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
  2562. struct ni_power_info *ni_pi = ni_get_pi(rdev);
  2563. struct ni_ps *ni_new_state = ni_get_ps(radeon_new_state);
  2564. SMC_NIslands_MCRegisters *mc_reg_table = &ni_pi->smc_mc_reg_table;
  2565. u16 address;
  2566. memset(mc_reg_table, 0, sizeof(SMC_NIslands_MCRegisters));
  2567. ni_convert_mc_reg_table_to_smc(rdev, radeon_new_state, mc_reg_table);
  2568. address = eg_pi->mc_reg_table_start +
  2569. (u16)offsetof(SMC_NIslands_MCRegisters, data[NISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT]);
  2570. return rv770_copy_bytes_to_smc(rdev, address,
  2571. (u8 *)&mc_reg_table->data[NISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT],
  2572. sizeof(SMC_NIslands_MCRegisterSet) * ni_new_state->performance_level_count,
  2573. pi->sram_end);
  2574. }
  2575. static int ni_init_driver_calculated_leakage_table(struct radeon_device *rdev,
  2576. PP_NIslands_CACTABLES *cac_tables)
  2577. {
  2578. struct ni_power_info *ni_pi = ni_get_pi(rdev);
  2579. struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
  2580. u32 leakage = 0;
  2581. unsigned int i, j, table_size;
  2582. s32 t;
  2583. u32 smc_leakage, max_leakage = 0;
  2584. u32 scaling_factor;
  2585. table_size = eg_pi->vddc_voltage_table.count;
  2586. if (SMC_NISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES < table_size)
  2587. table_size = SMC_NISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES;
  2588. scaling_factor = ni_get_smc_power_scaling_factor(rdev);
  2589. for (i = 0; i < SMC_NISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES; i++) {
  2590. for (j = 0; j < table_size; j++) {
  2591. t = (1000 * ((i + 1) * 8));
  2592. if (t < ni_pi->cac_data.leakage_minimum_temperature)
  2593. t = ni_pi->cac_data.leakage_minimum_temperature;
  2594. ni_calculate_leakage_for_v_and_t(rdev,
  2595. &ni_pi->cac_data.leakage_coefficients,
  2596. eg_pi->vddc_voltage_table.entries[j].value,
  2597. t,
  2598. ni_pi->cac_data.i_leakage,
  2599. &leakage);
  2600. smc_leakage = ni_scale_power_for_smc(leakage, scaling_factor) / 1000;
  2601. if (smc_leakage > max_leakage)
  2602. max_leakage = smc_leakage;
  2603. cac_tables->cac_lkge_lut[i][j] = cpu_to_be32(smc_leakage);
  2604. }
  2605. }
  2606. for (j = table_size; j < SMC_NISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES; j++) {
  2607. for (i = 0; i < SMC_NISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES; i++)
  2608. cac_tables->cac_lkge_lut[i][j] = cpu_to_be32(max_leakage);
  2609. }
  2610. return 0;
  2611. }
  2612. static int ni_init_simplified_leakage_table(struct radeon_device *rdev,
  2613. PP_NIslands_CACTABLES *cac_tables)
  2614. {
  2615. struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
  2616. struct radeon_cac_leakage_table *leakage_table =
  2617. &rdev->pm.dpm.dyn_state.cac_leakage_table;
  2618. u32 i, j, table_size;
  2619. u32 smc_leakage, max_leakage = 0;
  2620. u32 scaling_factor;
  2621. if (!leakage_table)
  2622. return -EINVAL;
  2623. table_size = leakage_table->count;
  2624. if (eg_pi->vddc_voltage_table.count != table_size)
  2625. table_size = (eg_pi->vddc_voltage_table.count < leakage_table->count) ?
  2626. eg_pi->vddc_voltage_table.count : leakage_table->count;
  2627. if (SMC_NISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES < table_size)
  2628. table_size = SMC_NISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES;
  2629. if (table_size == 0)
  2630. return -EINVAL;
  2631. scaling_factor = ni_get_smc_power_scaling_factor(rdev);
  2632. for (j = 0; j < table_size; j++) {
  2633. smc_leakage = leakage_table->entries[j].leakage;
  2634. if (smc_leakage > max_leakage)
  2635. max_leakage = smc_leakage;
  2636. for (i = 0; i < SMC_NISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES; i++)
  2637. cac_tables->cac_lkge_lut[i][j] =
  2638. cpu_to_be32(ni_scale_power_for_smc(smc_leakage, scaling_factor));
  2639. }
  2640. for (j = table_size; j < SMC_NISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES; j++) {
  2641. for (i = 0; i < SMC_NISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES; i++)
  2642. cac_tables->cac_lkge_lut[i][j] =
  2643. cpu_to_be32(ni_scale_power_for_smc(max_leakage, scaling_factor));
  2644. }
  2645. return 0;
  2646. }
  2647. static int ni_initialize_smc_cac_tables(struct radeon_device *rdev)
  2648. {
  2649. struct rv7xx_power_info *pi = rv770_get_pi(rdev);
  2650. struct ni_power_info *ni_pi = ni_get_pi(rdev);
  2651. PP_NIslands_CACTABLES *cac_tables = NULL;
  2652. int i, ret;
  2653. u32 reg;
  2654. if (ni_pi->enable_cac == false)
  2655. return 0;
  2656. cac_tables = kzalloc(sizeof(PP_NIslands_CACTABLES), GFP_KERNEL);
  2657. if (!cac_tables)
  2658. return -ENOMEM;
  2659. reg = RREG32(CG_CAC_CTRL) & ~(TID_CNT_MASK | TID_UNIT_MASK);
  2660. reg |= (TID_CNT(ni_pi->cac_weights->tid_cnt) |
  2661. TID_UNIT(ni_pi->cac_weights->tid_unit));
  2662. WREG32(CG_CAC_CTRL, reg);
  2663. for (i = 0; i < NISLANDS_DCCAC_MAX_LEVELS; i++)
  2664. ni_pi->dc_cac_table[i] = ni_pi->cac_weights->dc_cac[i];
  2665. for (i = 0; i < SMC_NISLANDS_BIF_LUT_NUM_OF_ENTRIES; i++)
  2666. cac_tables->cac_bif_lut[i] = ni_pi->cac_weights->pcie_cac[i];
  2667. ni_pi->cac_data.i_leakage = rdev->pm.dpm.cac_leakage;
  2668. ni_pi->cac_data.pwr_const = 0;
  2669. ni_pi->cac_data.dc_cac_value = ni_pi->dc_cac_table[NISLANDS_DCCAC_LEVEL_0];
  2670. ni_pi->cac_data.bif_cac_value = 0;
  2671. ni_pi->cac_data.mc_wr_weight = ni_pi->cac_weights->mc_write_weight;
  2672. ni_pi->cac_data.mc_rd_weight = ni_pi->cac_weights->mc_read_weight;
  2673. ni_pi->cac_data.allow_ovrflw = 0;
  2674. ni_pi->cac_data.l2num_win_tdp = ni_pi->lta_window_size;
  2675. ni_pi->cac_data.num_win_tdp = 0;
  2676. ni_pi->cac_data.lts_truncate_n = ni_pi->lts_truncate;
  2677. if (ni_pi->driver_calculate_cac_leakage)
  2678. ret = ni_init_driver_calculated_leakage_table(rdev, cac_tables);
  2679. else
  2680. ret = ni_init_simplified_leakage_table(rdev, cac_tables);
  2681. if (ret)
  2682. goto done_free;
  2683. cac_tables->pwr_const = cpu_to_be32(ni_pi->cac_data.pwr_const);
  2684. cac_tables->dc_cacValue = cpu_to_be32(ni_pi->cac_data.dc_cac_value);
  2685. cac_tables->bif_cacValue = cpu_to_be32(ni_pi->cac_data.bif_cac_value);
  2686. cac_tables->AllowOvrflw = ni_pi->cac_data.allow_ovrflw;
  2687. cac_tables->MCWrWeight = ni_pi->cac_data.mc_wr_weight;
  2688. cac_tables->MCRdWeight = ni_pi->cac_data.mc_rd_weight;
  2689. cac_tables->numWin_TDP = ni_pi->cac_data.num_win_tdp;
  2690. cac_tables->l2numWin_TDP = ni_pi->cac_data.l2num_win_tdp;
  2691. cac_tables->lts_truncate_n = ni_pi->cac_data.lts_truncate_n;
  2692. ret = rv770_copy_bytes_to_smc(rdev, ni_pi->cac_table_start, (u8 *)cac_tables,
  2693. sizeof(PP_NIslands_CACTABLES), pi->sram_end);
  2694. done_free:
  2695. if (ret) {
  2696. ni_pi->enable_cac = false;
  2697. ni_pi->enable_power_containment = false;
  2698. }
  2699. kfree(cac_tables);
  2700. return 0;
  2701. }
  2702. static int ni_initialize_hardware_cac_manager(struct radeon_device *rdev)
  2703. {
  2704. struct ni_power_info *ni_pi = ni_get_pi(rdev);
  2705. u32 reg;
  2706. if (!ni_pi->enable_cac ||
  2707. !ni_pi->cac_configuration_required)
  2708. return 0;
  2709. if (ni_pi->cac_weights == NULL)
  2710. return -EINVAL;
  2711. reg = RREG32_CG(CG_CAC_REGION_1_WEIGHT_0) & ~(WEIGHT_TCP_SIG0_MASK |
  2712. WEIGHT_TCP_SIG1_MASK |
  2713. WEIGHT_TA_SIG_MASK);
  2714. reg |= (WEIGHT_TCP_SIG0(ni_pi->cac_weights->weight_tcp_sig0) |
  2715. WEIGHT_TCP_SIG1(ni_pi->cac_weights->weight_tcp_sig1) |
  2716. WEIGHT_TA_SIG(ni_pi->cac_weights->weight_ta_sig));
  2717. WREG32_CG(CG_CAC_REGION_1_WEIGHT_0, reg);
  2718. reg = RREG32_CG(CG_CAC_REGION_1_WEIGHT_1) & ~(WEIGHT_TCC_EN0_MASK |
  2719. WEIGHT_TCC_EN1_MASK |
  2720. WEIGHT_TCC_EN2_MASK);
  2721. reg |= (WEIGHT_TCC_EN0(ni_pi->cac_weights->weight_tcc_en0) |
  2722. WEIGHT_TCC_EN1(ni_pi->cac_weights->weight_tcc_en1) |
  2723. WEIGHT_TCC_EN2(ni_pi->cac_weights->weight_tcc_en2));
  2724. WREG32_CG(CG_CAC_REGION_1_WEIGHT_1, reg);
  2725. reg = RREG32_CG(CG_CAC_REGION_2_WEIGHT_0) & ~(WEIGHT_CB_EN0_MASK |
  2726. WEIGHT_CB_EN1_MASK |
  2727. WEIGHT_CB_EN2_MASK |
  2728. WEIGHT_CB_EN3_MASK);
  2729. reg |= (WEIGHT_CB_EN0(ni_pi->cac_weights->weight_cb_en0) |
  2730. WEIGHT_CB_EN1(ni_pi->cac_weights->weight_cb_en1) |
  2731. WEIGHT_CB_EN2(ni_pi->cac_weights->weight_cb_en2) |
  2732. WEIGHT_CB_EN3(ni_pi->cac_weights->weight_cb_en3));
  2733. WREG32_CG(CG_CAC_REGION_2_WEIGHT_0, reg);
  2734. reg = RREG32_CG(CG_CAC_REGION_2_WEIGHT_1) & ~(WEIGHT_DB_SIG0_MASK |
  2735. WEIGHT_DB_SIG1_MASK |
  2736. WEIGHT_DB_SIG2_MASK |
  2737. WEIGHT_DB_SIG3_MASK);
  2738. reg |= (WEIGHT_DB_SIG0(ni_pi->cac_weights->weight_db_sig0) |
  2739. WEIGHT_DB_SIG1(ni_pi->cac_weights->weight_db_sig1) |
  2740. WEIGHT_DB_SIG2(ni_pi->cac_weights->weight_db_sig2) |
  2741. WEIGHT_DB_SIG3(ni_pi->cac_weights->weight_db_sig3));
  2742. WREG32_CG(CG_CAC_REGION_2_WEIGHT_1, reg);
  2743. reg = RREG32_CG(CG_CAC_REGION_2_WEIGHT_2) & ~(WEIGHT_SXM_SIG0_MASK |
  2744. WEIGHT_SXM_SIG1_MASK |
  2745. WEIGHT_SXM_SIG2_MASK |
  2746. WEIGHT_SXS_SIG0_MASK |
  2747. WEIGHT_SXS_SIG1_MASK);
  2748. reg |= (WEIGHT_SXM_SIG0(ni_pi->cac_weights->weight_sxm_sig0) |
  2749. WEIGHT_SXM_SIG1(ni_pi->cac_weights->weight_sxm_sig1) |
  2750. WEIGHT_SXM_SIG2(ni_pi->cac_weights->weight_sxm_sig2) |
  2751. WEIGHT_SXS_SIG0(ni_pi->cac_weights->weight_sxs_sig0) |
  2752. WEIGHT_SXS_SIG1(ni_pi->cac_weights->weight_sxs_sig1));
  2753. WREG32_CG(CG_CAC_REGION_2_WEIGHT_2, reg);
  2754. reg = RREG32_CG(CG_CAC_REGION_3_WEIGHT_0) & ~(WEIGHT_XBR_0_MASK |
  2755. WEIGHT_XBR_1_MASK |
  2756. WEIGHT_XBR_2_MASK |
  2757. WEIGHT_SPI_SIG0_MASK);
  2758. reg |= (WEIGHT_XBR_0(ni_pi->cac_weights->weight_xbr_0) |
  2759. WEIGHT_XBR_1(ni_pi->cac_weights->weight_xbr_1) |
  2760. WEIGHT_XBR_2(ni_pi->cac_weights->weight_xbr_2) |
  2761. WEIGHT_SPI_SIG0(ni_pi->cac_weights->weight_spi_sig0));
  2762. WREG32_CG(CG_CAC_REGION_3_WEIGHT_0, reg);
  2763. reg = RREG32_CG(CG_CAC_REGION_3_WEIGHT_1) & ~(WEIGHT_SPI_SIG1_MASK |
  2764. WEIGHT_SPI_SIG2_MASK |
  2765. WEIGHT_SPI_SIG3_MASK |
  2766. WEIGHT_SPI_SIG4_MASK |
  2767. WEIGHT_SPI_SIG5_MASK);
  2768. reg |= (WEIGHT_SPI_SIG1(ni_pi->cac_weights->weight_spi_sig1) |
  2769. WEIGHT_SPI_SIG2(ni_pi->cac_weights->weight_spi_sig2) |
  2770. WEIGHT_SPI_SIG3(ni_pi->cac_weights->weight_spi_sig3) |
  2771. WEIGHT_SPI_SIG4(ni_pi->cac_weights->weight_spi_sig4) |
  2772. WEIGHT_SPI_SIG5(ni_pi->cac_weights->weight_spi_sig5));
  2773. WREG32_CG(CG_CAC_REGION_3_WEIGHT_1, reg);
  2774. reg = RREG32_CG(CG_CAC_REGION_4_WEIGHT_0) & ~(WEIGHT_LDS_SIG0_MASK |
  2775. WEIGHT_LDS_SIG1_MASK |
  2776. WEIGHT_SC_MASK);
  2777. reg |= (WEIGHT_LDS_SIG0(ni_pi->cac_weights->weight_lds_sig0) |
  2778. WEIGHT_LDS_SIG1(ni_pi->cac_weights->weight_lds_sig1) |
  2779. WEIGHT_SC(ni_pi->cac_weights->weight_sc));
  2780. WREG32_CG(CG_CAC_REGION_4_WEIGHT_0, reg);
  2781. reg = RREG32_CG(CG_CAC_REGION_4_WEIGHT_1) & ~(WEIGHT_BIF_MASK |
  2782. WEIGHT_CP_MASK |
  2783. WEIGHT_PA_SIG0_MASK |
  2784. WEIGHT_PA_SIG1_MASK |
  2785. WEIGHT_VGT_SIG0_MASK);
  2786. reg |= (WEIGHT_BIF(ni_pi->cac_weights->weight_bif) |
  2787. WEIGHT_CP(ni_pi->cac_weights->weight_cp) |
  2788. WEIGHT_PA_SIG0(ni_pi->cac_weights->weight_pa_sig0) |
  2789. WEIGHT_PA_SIG1(ni_pi->cac_weights->weight_pa_sig1) |
  2790. WEIGHT_VGT_SIG0(ni_pi->cac_weights->weight_vgt_sig0));
  2791. WREG32_CG(CG_CAC_REGION_4_WEIGHT_1, reg);
  2792. reg = RREG32_CG(CG_CAC_REGION_4_WEIGHT_2) & ~(WEIGHT_VGT_SIG1_MASK |
  2793. WEIGHT_VGT_SIG2_MASK |
  2794. WEIGHT_DC_SIG0_MASK |
  2795. WEIGHT_DC_SIG1_MASK |
  2796. WEIGHT_DC_SIG2_MASK);
  2797. reg |= (WEIGHT_VGT_SIG1(ni_pi->cac_weights->weight_vgt_sig1) |
  2798. WEIGHT_VGT_SIG2(ni_pi->cac_weights->weight_vgt_sig2) |
  2799. WEIGHT_DC_SIG0(ni_pi->cac_weights->weight_dc_sig0) |
  2800. WEIGHT_DC_SIG1(ni_pi->cac_weights->weight_dc_sig1) |
  2801. WEIGHT_DC_SIG2(ni_pi->cac_weights->weight_dc_sig2));
  2802. WREG32_CG(CG_CAC_REGION_4_WEIGHT_2, reg);
  2803. reg = RREG32_CG(CG_CAC_REGION_4_WEIGHT_3) & ~(WEIGHT_DC_SIG3_MASK |
  2804. WEIGHT_UVD_SIG0_MASK |
  2805. WEIGHT_UVD_SIG1_MASK |
  2806. WEIGHT_SPARE0_MASK |
  2807. WEIGHT_SPARE1_MASK);
  2808. reg |= (WEIGHT_DC_SIG3(ni_pi->cac_weights->weight_dc_sig3) |
  2809. WEIGHT_UVD_SIG0(ni_pi->cac_weights->weight_uvd_sig0) |
  2810. WEIGHT_UVD_SIG1(ni_pi->cac_weights->weight_uvd_sig1) |
  2811. WEIGHT_SPARE0(ni_pi->cac_weights->weight_spare0) |
  2812. WEIGHT_SPARE1(ni_pi->cac_weights->weight_spare1));
  2813. WREG32_CG(CG_CAC_REGION_4_WEIGHT_3, reg);
  2814. reg = RREG32_CG(CG_CAC_REGION_5_WEIGHT_0) & ~(WEIGHT_SQ_VSP_MASK |
  2815. WEIGHT_SQ_VSP0_MASK);
  2816. reg |= (WEIGHT_SQ_VSP(ni_pi->cac_weights->weight_sq_vsp) |
  2817. WEIGHT_SQ_VSP0(ni_pi->cac_weights->weight_sq_vsp0));
  2818. WREG32_CG(CG_CAC_REGION_5_WEIGHT_0, reg);
  2819. reg = RREG32_CG(CG_CAC_REGION_5_WEIGHT_1) & ~(WEIGHT_SQ_GPR_MASK);
  2820. reg |= WEIGHT_SQ_GPR(ni_pi->cac_weights->weight_sq_gpr);
  2821. WREG32_CG(CG_CAC_REGION_5_WEIGHT_1, reg);
  2822. reg = RREG32_CG(CG_CAC_REGION_4_OVERRIDE_4) & ~(OVR_MODE_SPARE_0_MASK |
  2823. OVR_VAL_SPARE_0_MASK |
  2824. OVR_MODE_SPARE_1_MASK |
  2825. OVR_VAL_SPARE_1_MASK);
  2826. reg |= (OVR_MODE_SPARE_0(ni_pi->cac_weights->ovr_mode_spare_0) |
  2827. OVR_VAL_SPARE_0(ni_pi->cac_weights->ovr_val_spare_0) |
  2828. OVR_MODE_SPARE_1(ni_pi->cac_weights->ovr_mode_spare_1) |
  2829. OVR_VAL_SPARE_1(ni_pi->cac_weights->ovr_val_spare_1));
  2830. WREG32_CG(CG_CAC_REGION_4_OVERRIDE_4, reg);
  2831. reg = RREG32(SQ_CAC_THRESHOLD) & ~(VSP_MASK |
  2832. VSP0_MASK |
  2833. GPR_MASK);
  2834. reg |= (VSP(ni_pi->cac_weights->vsp) |
  2835. VSP0(ni_pi->cac_weights->vsp0) |
  2836. GPR(ni_pi->cac_weights->gpr));
  2837. WREG32(SQ_CAC_THRESHOLD, reg);
  2838. reg = (MCDW_WR_ENABLE |
  2839. MCDX_WR_ENABLE |
  2840. MCDY_WR_ENABLE |
  2841. MCDZ_WR_ENABLE |
  2842. INDEX(0x09D4));
  2843. WREG32(MC_CG_CONFIG, reg);
  2844. reg = (READ_WEIGHT(ni_pi->cac_weights->mc_read_weight) |
  2845. WRITE_WEIGHT(ni_pi->cac_weights->mc_write_weight) |
  2846. ALLOW_OVERFLOW);
  2847. WREG32(MC_CG_DATAPORT, reg);
  2848. return 0;
  2849. }
  2850. static int ni_enable_smc_cac(struct radeon_device *rdev,
  2851. struct radeon_ps *radeon_new_state,
  2852. bool enable)
  2853. {
  2854. struct ni_power_info *ni_pi = ni_get_pi(rdev);
  2855. int ret = 0;
  2856. PPSMC_Result smc_result;
  2857. if (ni_pi->enable_cac) {
  2858. if (enable) {
  2859. if (!r600_is_uvd_state(radeon_new_state->class, radeon_new_state->class2)) {
  2860. smc_result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_CollectCAC_PowerCorreln);
  2861. if (ni_pi->support_cac_long_term_average) {
  2862. smc_result = rv770_send_msg_to_smc(rdev, PPSMC_CACLongTermAvgEnable);
  2863. if (PPSMC_Result_OK != smc_result)
  2864. ni_pi->support_cac_long_term_average = false;
  2865. }
  2866. smc_result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_EnableCac);
  2867. if (PPSMC_Result_OK != smc_result)
  2868. ret = -EINVAL;
  2869. ni_pi->cac_enabled = (PPSMC_Result_OK == smc_result) ? true : false;
  2870. }
  2871. } else if (ni_pi->cac_enabled) {
  2872. smc_result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_DisableCac);
  2873. ni_pi->cac_enabled = false;
  2874. if (ni_pi->support_cac_long_term_average) {
  2875. smc_result = rv770_send_msg_to_smc(rdev, PPSMC_CACLongTermAvgDisable);
  2876. if (PPSMC_Result_OK != smc_result)
  2877. ni_pi->support_cac_long_term_average = false;
  2878. }
  2879. }
  2880. }
  2881. return ret;
  2882. }
  2883. static int ni_pcie_performance_request(struct radeon_device *rdev,
  2884. u8 perf_req, bool advertise)
  2885. {
  2886. struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
  2887. #if defined(CONFIG_ACPI)
  2888. if ((perf_req == PCIE_PERF_REQ_PECI_GEN1) ||
  2889. (perf_req == PCIE_PERF_REQ_PECI_GEN2)) {
  2890. if (eg_pi->pcie_performance_request_registered == false)
  2891. radeon_acpi_pcie_notify_device_ready(rdev);
  2892. eg_pi->pcie_performance_request_registered = true;
  2893. return radeon_acpi_pcie_performance_request(rdev, perf_req, advertise);
  2894. } else if ((perf_req == PCIE_PERF_REQ_REMOVE_REGISTRY) &&
  2895. eg_pi->pcie_performance_request_registered) {
  2896. eg_pi->pcie_performance_request_registered = false;
  2897. return radeon_acpi_pcie_performance_request(rdev, perf_req, advertise);
  2898. }
  2899. #endif
  2900. return 0;
  2901. }
  2902. static int ni_advertise_gen2_capability(struct radeon_device *rdev)
  2903. {
  2904. struct rv7xx_power_info *pi = rv770_get_pi(rdev);
  2905. u32 tmp;
  2906. tmp = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
  2907. if ((tmp & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
  2908. (tmp & LC_OTHER_SIDE_SUPPORTS_GEN2))
  2909. pi->pcie_gen2 = true;
  2910. else
  2911. pi->pcie_gen2 = false;
  2912. if (!pi->pcie_gen2)
  2913. ni_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN2, true);
  2914. return 0;
  2915. }
  2916. static void ni_enable_bif_dynamic_pcie_gen2(struct radeon_device *rdev,
  2917. bool enable)
  2918. {
  2919. struct rv7xx_power_info *pi = rv770_get_pi(rdev);
  2920. u32 tmp, bif;
  2921. tmp = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
  2922. if ((tmp & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
  2923. (tmp & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
  2924. if (enable) {
  2925. if (!pi->boot_in_gen2) {
  2926. bif = RREG32(CG_BIF_REQ_AND_RSP) & ~CG_CLIENT_REQ_MASK;
  2927. bif |= CG_CLIENT_REQ(0xd);
  2928. WREG32(CG_BIF_REQ_AND_RSP, bif);
  2929. }
  2930. tmp &= ~LC_HW_VOLTAGE_IF_CONTROL_MASK;
  2931. tmp |= LC_HW_VOLTAGE_IF_CONTROL(1);
  2932. tmp |= LC_GEN2_EN_STRAP;
  2933. tmp |= LC_CLR_FAILED_SPD_CHANGE_CNT;
  2934. WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, tmp);
  2935. udelay(10);
  2936. tmp &= ~LC_CLR_FAILED_SPD_CHANGE_CNT;
  2937. WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, tmp);
  2938. } else {
  2939. if (!pi->boot_in_gen2) {
  2940. bif = RREG32(CG_BIF_REQ_AND_RSP) & ~CG_CLIENT_REQ_MASK;
  2941. bif |= CG_CLIENT_REQ(0xd);
  2942. WREG32(CG_BIF_REQ_AND_RSP, bif);
  2943. tmp &= ~LC_HW_VOLTAGE_IF_CONTROL_MASK;
  2944. tmp &= ~LC_GEN2_EN_STRAP;
  2945. }
  2946. WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, tmp);
  2947. }
  2948. }
  2949. }
  2950. static void ni_enable_dynamic_pcie_gen2(struct radeon_device *rdev,
  2951. bool enable)
  2952. {
  2953. ni_enable_bif_dynamic_pcie_gen2(rdev, enable);
  2954. if (enable)
  2955. WREG32_P(GENERAL_PWRMGT, ENABLE_GEN2PCIE, ~ENABLE_GEN2PCIE);
  2956. else
  2957. WREG32_P(GENERAL_PWRMGT, 0, ~ENABLE_GEN2PCIE);
  2958. }
  2959. void ni_set_uvd_clock_before_set_eng_clock(struct radeon_device *rdev,
  2960. struct radeon_ps *new_ps,
  2961. struct radeon_ps *old_ps)
  2962. {
  2963. struct ni_ps *new_state = ni_get_ps(new_ps);
  2964. struct ni_ps *current_state = ni_get_ps(old_ps);
  2965. if ((new_ps->vclk == old_ps->vclk) &&
  2966. (new_ps->dclk == old_ps->dclk))
  2967. return;
  2968. if (new_state->performance_levels[new_state->performance_level_count - 1].sclk >=
  2969. current_state->performance_levels[current_state->performance_level_count - 1].sclk)
  2970. return;
  2971. radeon_set_uvd_clocks(rdev, new_ps->vclk, new_ps->dclk);
  2972. }
  2973. void ni_set_uvd_clock_after_set_eng_clock(struct radeon_device *rdev,
  2974. struct radeon_ps *new_ps,
  2975. struct radeon_ps *old_ps)
  2976. {
  2977. struct ni_ps *new_state = ni_get_ps(new_ps);
  2978. struct ni_ps *current_state = ni_get_ps(old_ps);
  2979. if ((new_ps->vclk == old_ps->vclk) &&
  2980. (new_ps->dclk == old_ps->dclk))
  2981. return;
  2982. if (new_state->performance_levels[new_state->performance_level_count - 1].sclk <
  2983. current_state->performance_levels[current_state->performance_level_count - 1].sclk)
  2984. return;
  2985. radeon_set_uvd_clocks(rdev, new_ps->vclk, new_ps->dclk);
  2986. }
  2987. void ni_dpm_setup_asic(struct radeon_device *rdev)
  2988. {
  2989. struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
  2990. ni_read_clock_registers(rdev);
  2991. btc_read_arb_registers(rdev);
  2992. rv770_get_memory_type(rdev);
  2993. if (eg_pi->pcie_performance_request)
  2994. ni_advertise_gen2_capability(rdev);
  2995. rv770_get_pcie_gen2_status(rdev);
  2996. rv770_enable_acpi_pm(rdev);
  2997. }
  2998. void ni_update_current_ps(struct radeon_device *rdev,
  2999. struct radeon_ps *rps)
  3000. {
  3001. struct ni_ps *new_ps = ni_get_ps(rps);
  3002. struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
  3003. struct ni_power_info *ni_pi = ni_get_pi(rdev);
  3004. eg_pi->current_rps = *rps;
  3005. ni_pi->current_ps = *new_ps;
  3006. eg_pi->current_rps.ps_priv = &ni_pi->current_ps;
  3007. }
  3008. void ni_update_requested_ps(struct radeon_device *rdev,
  3009. struct radeon_ps *rps)
  3010. {
  3011. struct ni_ps *new_ps = ni_get_ps(rps);
  3012. struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
  3013. struct ni_power_info *ni_pi = ni_get_pi(rdev);
  3014. eg_pi->requested_rps = *rps;
  3015. ni_pi->requested_ps = *new_ps;
  3016. eg_pi->requested_rps.ps_priv = &ni_pi->requested_ps;
  3017. }
  3018. int ni_dpm_enable(struct radeon_device *rdev)
  3019. {
  3020. struct rv7xx_power_info *pi = rv770_get_pi(rdev);
  3021. struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
  3022. struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
  3023. int ret;
  3024. if (pi->gfx_clock_gating)
  3025. ni_cg_clockgating_default(rdev);
  3026. if (btc_dpm_enabled(rdev))
  3027. return -EINVAL;
  3028. if (pi->mg_clock_gating)
  3029. ni_mg_clockgating_default(rdev);
  3030. if (eg_pi->ls_clock_gating)
  3031. ni_ls_clockgating_default(rdev);
  3032. if (pi->voltage_control) {
  3033. rv770_enable_voltage_control(rdev, true);
  3034. ret = cypress_construct_voltage_tables(rdev);
  3035. if (ret) {
  3036. DRM_ERROR("cypress_construct_voltage_tables failed\n");
  3037. return ret;
  3038. }
  3039. }
  3040. if (eg_pi->dynamic_ac_timing) {
  3041. ret = ni_initialize_mc_reg_table(rdev);
  3042. if (ret)
  3043. eg_pi->dynamic_ac_timing = false;
  3044. }
  3045. if (pi->dynamic_ss)
  3046. cypress_enable_spread_spectrum(rdev, true);
  3047. if (pi->thermal_protection)
  3048. rv770_enable_thermal_protection(rdev, true);
  3049. rv770_setup_bsp(rdev);
  3050. rv770_program_git(rdev);
  3051. rv770_program_tp(rdev);
  3052. rv770_program_tpp(rdev);
  3053. rv770_program_sstp(rdev);
  3054. cypress_enable_display_gap(rdev);
  3055. rv770_program_vc(rdev);
  3056. if (pi->dynamic_pcie_gen2)
  3057. ni_enable_dynamic_pcie_gen2(rdev, true);
  3058. ret = rv770_upload_firmware(rdev);
  3059. if (ret) {
  3060. DRM_ERROR("rv770_upload_firmware failed\n");
  3061. return ret;
  3062. }
  3063. ret = ni_process_firmware_header(rdev);
  3064. if (ret) {
  3065. DRM_ERROR("ni_process_firmware_header failed\n");
  3066. return ret;
  3067. }
  3068. ret = ni_initial_switch_from_arb_f0_to_f1(rdev);
  3069. if (ret) {
  3070. DRM_ERROR("ni_initial_switch_from_arb_f0_to_f1 failed\n");
  3071. return ret;
  3072. }
  3073. ret = ni_init_smc_table(rdev);
  3074. if (ret) {
  3075. DRM_ERROR("ni_init_smc_table failed\n");
  3076. return ret;
  3077. }
  3078. ret = ni_init_smc_spll_table(rdev);
  3079. if (ret) {
  3080. DRM_ERROR("ni_init_smc_spll_table failed\n");
  3081. return ret;
  3082. }
  3083. ret = ni_init_arb_table_index(rdev);
  3084. if (ret) {
  3085. DRM_ERROR("ni_init_arb_table_index failed\n");
  3086. return ret;
  3087. }
  3088. if (eg_pi->dynamic_ac_timing) {
  3089. ret = ni_populate_mc_reg_table(rdev, boot_ps);
  3090. if (ret) {
  3091. DRM_ERROR("ni_populate_mc_reg_table failed\n");
  3092. return ret;
  3093. }
  3094. }
  3095. ret = ni_initialize_smc_cac_tables(rdev);
  3096. if (ret) {
  3097. DRM_ERROR("ni_initialize_smc_cac_tables failed\n");
  3098. return ret;
  3099. }
  3100. ret = ni_initialize_hardware_cac_manager(rdev);
  3101. if (ret) {
  3102. DRM_ERROR("ni_initialize_hardware_cac_manager failed\n");
  3103. return ret;
  3104. }
  3105. ret = ni_populate_smc_tdp_limits(rdev, boot_ps);
  3106. if (ret) {
  3107. DRM_ERROR("ni_populate_smc_tdp_limits failed\n");
  3108. return ret;
  3109. }
  3110. ni_program_response_times(rdev);
  3111. r7xx_start_smc(rdev);
  3112. ret = cypress_notify_smc_display_change(rdev, false);
  3113. if (ret) {
  3114. DRM_ERROR("cypress_notify_smc_display_change failed\n");
  3115. return ret;
  3116. }
  3117. cypress_enable_sclk_control(rdev, true);
  3118. if (eg_pi->memory_transition)
  3119. cypress_enable_mclk_control(rdev, true);
  3120. cypress_start_dpm(rdev);
  3121. if (pi->gfx_clock_gating)
  3122. ni_gfx_clockgating_enable(rdev, true);
  3123. if (pi->mg_clock_gating)
  3124. ni_mg_clockgating_enable(rdev, true);
  3125. if (eg_pi->ls_clock_gating)
  3126. ni_ls_clockgating_enable(rdev, true);
  3127. if (rdev->irq.installed &&
  3128. r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
  3129. PPSMC_Result result;
  3130. ret = rv770_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, 0xff * 1000);
  3131. if (ret)
  3132. return ret;
  3133. rdev->irq.dpm_thermal = true;
  3134. radeon_irq_set(rdev);
  3135. result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_EnableThermalInterrupt);
  3136. if (result != PPSMC_Result_OK)
  3137. DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
  3138. }
  3139. rv770_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
  3140. ni_update_current_ps(rdev, boot_ps);
  3141. return 0;
  3142. }
  3143. void ni_dpm_disable(struct radeon_device *rdev)
  3144. {
  3145. struct rv7xx_power_info *pi = rv770_get_pi(rdev);
  3146. struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
  3147. struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
  3148. if (!btc_dpm_enabled(rdev))
  3149. return;
  3150. rv770_clear_vc(rdev);
  3151. if (pi->thermal_protection)
  3152. rv770_enable_thermal_protection(rdev, false);
  3153. ni_enable_power_containment(rdev, boot_ps, false);
  3154. ni_enable_smc_cac(rdev, boot_ps, false);
  3155. cypress_enable_spread_spectrum(rdev, false);
  3156. rv770_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, false);
  3157. if (pi->dynamic_pcie_gen2)
  3158. ni_enable_dynamic_pcie_gen2(rdev, false);
  3159. if (rdev->irq.installed &&
  3160. r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
  3161. rdev->irq.dpm_thermal = false;
  3162. radeon_irq_set(rdev);
  3163. }
  3164. if (pi->gfx_clock_gating)
  3165. ni_gfx_clockgating_enable(rdev, false);
  3166. if (pi->mg_clock_gating)
  3167. ni_mg_clockgating_enable(rdev, false);
  3168. if (eg_pi->ls_clock_gating)
  3169. ni_ls_clockgating_enable(rdev, false);
  3170. ni_stop_dpm(rdev);
  3171. btc_reset_to_default(rdev);
  3172. ni_stop_smc(rdev);
  3173. ni_force_switch_to_arb_f0(rdev);
  3174. ni_update_current_ps(rdev, boot_ps);
  3175. }
  3176. static int ni_power_control_set_level(struct radeon_device *rdev)
  3177. {
  3178. struct radeon_ps *new_ps = rdev->pm.dpm.requested_ps;
  3179. int ret;
  3180. ret = ni_restrict_performance_levels_before_switch(rdev);
  3181. if (ret)
  3182. return ret;
  3183. ret = rv770_halt_smc(rdev);
  3184. if (ret)
  3185. return ret;
  3186. ret = ni_populate_smc_tdp_limits(rdev, new_ps);
  3187. if (ret)
  3188. return ret;
  3189. ret = rv770_resume_smc(rdev);
  3190. if (ret)
  3191. return ret;
  3192. ret = rv770_set_sw_state(rdev);
  3193. if (ret)
  3194. return ret;
  3195. return 0;
  3196. }
  3197. int ni_dpm_pre_set_power_state(struct radeon_device *rdev)
  3198. {
  3199. struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
  3200. struct radeon_ps requested_ps = *rdev->pm.dpm.requested_ps;
  3201. struct radeon_ps *new_ps = &requested_ps;
  3202. ni_update_requested_ps(rdev, new_ps);
  3203. ni_apply_state_adjust_rules(rdev, &eg_pi->requested_rps);
  3204. return 0;
  3205. }
  3206. int ni_dpm_set_power_state(struct radeon_device *rdev)
  3207. {
  3208. struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
  3209. struct radeon_ps *new_ps = &eg_pi->requested_rps;
  3210. struct radeon_ps *old_ps = &eg_pi->current_rps;
  3211. int ret;
  3212. ret = ni_restrict_performance_levels_before_switch(rdev);
  3213. if (ret) {
  3214. DRM_ERROR("ni_restrict_performance_levels_before_switch failed\n");
  3215. return ret;
  3216. }
  3217. ni_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps);
  3218. ret = ni_enable_power_containment(rdev, new_ps, false);
  3219. if (ret) {
  3220. DRM_ERROR("ni_enable_power_containment failed\n");
  3221. return ret;
  3222. }
  3223. ret = ni_enable_smc_cac(rdev, new_ps, false);
  3224. if (ret) {
  3225. DRM_ERROR("ni_enable_smc_cac failed\n");
  3226. return ret;
  3227. }
  3228. ret = rv770_halt_smc(rdev);
  3229. if (ret) {
  3230. DRM_ERROR("rv770_halt_smc failed\n");
  3231. return ret;
  3232. }
  3233. if (eg_pi->smu_uvd_hs)
  3234. btc_notify_uvd_to_smc(rdev, new_ps);
  3235. ret = ni_upload_sw_state(rdev, new_ps);
  3236. if (ret) {
  3237. DRM_ERROR("ni_upload_sw_state failed\n");
  3238. return ret;
  3239. }
  3240. if (eg_pi->dynamic_ac_timing) {
  3241. ret = ni_upload_mc_reg_table(rdev, new_ps);
  3242. if (ret) {
  3243. DRM_ERROR("ni_upload_mc_reg_table failed\n");
  3244. return ret;
  3245. }
  3246. }
  3247. ret = ni_program_memory_timing_parameters(rdev, new_ps);
  3248. if (ret) {
  3249. DRM_ERROR("ni_program_memory_timing_parameters failed\n");
  3250. return ret;
  3251. }
  3252. ret = rv770_resume_smc(rdev);
  3253. if (ret) {
  3254. DRM_ERROR("rv770_resume_smc failed\n");
  3255. return ret;
  3256. }
  3257. ret = rv770_set_sw_state(rdev);
  3258. if (ret) {
  3259. DRM_ERROR("rv770_set_sw_state failed\n");
  3260. return ret;
  3261. }
  3262. ni_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps);
  3263. ret = ni_enable_smc_cac(rdev, new_ps, true);
  3264. if (ret) {
  3265. DRM_ERROR("ni_enable_smc_cac failed\n");
  3266. return ret;
  3267. }
  3268. ret = ni_enable_power_containment(rdev, new_ps, true);
  3269. if (ret) {
  3270. DRM_ERROR("ni_enable_power_containment failed\n");
  3271. return ret;
  3272. }
  3273. /* update tdp */
  3274. ret = ni_power_control_set_level(rdev);
  3275. if (ret) {
  3276. DRM_ERROR("ni_power_control_set_level failed\n");
  3277. return ret;
  3278. }
  3279. ret = ni_unrestrict_performance_levels_after_switch(rdev);
  3280. if (ret) {
  3281. DRM_ERROR("ni_unrestrict_performance_levels_after_switch failed\n");
  3282. return ret;
  3283. }
  3284. return 0;
  3285. }
  3286. void ni_dpm_post_set_power_state(struct radeon_device *rdev)
  3287. {
  3288. struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
  3289. struct radeon_ps *new_ps = &eg_pi->requested_rps;
  3290. ni_update_current_ps(rdev, new_ps);
  3291. }
  3292. void ni_dpm_reset_asic(struct radeon_device *rdev)
  3293. {
  3294. ni_restrict_performance_levels_before_switch(rdev);
  3295. rv770_set_boot_state(rdev);
  3296. }
  3297. union power_info {
  3298. struct _ATOM_POWERPLAY_INFO info;
  3299. struct _ATOM_POWERPLAY_INFO_V2 info_2;
  3300. struct _ATOM_POWERPLAY_INFO_V3 info_3;
  3301. struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
  3302. struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
  3303. struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
  3304. };
  3305. union pplib_clock_info {
  3306. struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
  3307. struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
  3308. struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
  3309. struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
  3310. };
  3311. union pplib_power_state {
  3312. struct _ATOM_PPLIB_STATE v1;
  3313. struct _ATOM_PPLIB_STATE_V2 v2;
  3314. };
  3315. static void ni_parse_pplib_non_clock_info(struct radeon_device *rdev,
  3316. struct radeon_ps *rps,
  3317. struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
  3318. u8 table_rev)
  3319. {
  3320. rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
  3321. rps->class = le16_to_cpu(non_clock_info->usClassification);
  3322. rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
  3323. if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
  3324. rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
  3325. rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
  3326. } else if (r600_is_uvd_state(rps->class, rps->class2)) {
  3327. rps->vclk = RV770_DEFAULT_VCLK_FREQ;
  3328. rps->dclk = RV770_DEFAULT_DCLK_FREQ;
  3329. } else {
  3330. rps->vclk = 0;
  3331. rps->dclk = 0;
  3332. }
  3333. if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
  3334. rdev->pm.dpm.boot_ps = rps;
  3335. if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
  3336. rdev->pm.dpm.uvd_ps = rps;
  3337. }
  3338. static void ni_parse_pplib_clock_info(struct radeon_device *rdev,
  3339. struct radeon_ps *rps, int index,
  3340. union pplib_clock_info *clock_info)
  3341. {
  3342. struct rv7xx_power_info *pi = rv770_get_pi(rdev);
  3343. struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
  3344. struct ni_ps *ps = ni_get_ps(rps);
  3345. u16 vddc;
  3346. struct rv7xx_pl *pl = &ps->performance_levels[index];
  3347. ps->performance_level_count = index + 1;
  3348. pl->sclk = le16_to_cpu(clock_info->evergreen.usEngineClockLow);
  3349. pl->sclk |= clock_info->evergreen.ucEngineClockHigh << 16;
  3350. pl->mclk = le16_to_cpu(clock_info->evergreen.usMemoryClockLow);
  3351. pl->mclk |= clock_info->evergreen.ucMemoryClockHigh << 16;
  3352. pl->vddc = le16_to_cpu(clock_info->evergreen.usVDDC);
  3353. pl->vddci = le16_to_cpu(clock_info->evergreen.usVDDCI);
  3354. pl->flags = le32_to_cpu(clock_info->evergreen.ulFlags);
  3355. /* patch up vddc if necessary */
  3356. if (pl->vddc == 0xff01) {
  3357. if (radeon_atom_get_max_vddc(rdev, 0, 0, &vddc) == 0)
  3358. pl->vddc = vddc;
  3359. }
  3360. if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
  3361. pi->acpi_vddc = pl->vddc;
  3362. eg_pi->acpi_vddci = pl->vddci;
  3363. if (ps->performance_levels[0].flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2)
  3364. pi->acpi_pcie_gen2 = true;
  3365. else
  3366. pi->acpi_pcie_gen2 = false;
  3367. }
  3368. if (rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) {
  3369. eg_pi->ulv.supported = true;
  3370. eg_pi->ulv.pl = pl;
  3371. }
  3372. if (pi->min_vddc_in_table > pl->vddc)
  3373. pi->min_vddc_in_table = pl->vddc;
  3374. if (pi->max_vddc_in_table < pl->vddc)
  3375. pi->max_vddc_in_table = pl->vddc;
  3376. /* patch up boot state */
  3377. if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
  3378. u16 vddc, vddci, mvdd;
  3379. radeon_atombios_get_default_voltages(rdev, &vddc, &vddci, &mvdd);
  3380. pl->mclk = rdev->clock.default_mclk;
  3381. pl->sclk = rdev->clock.default_sclk;
  3382. pl->vddc = vddc;
  3383. pl->vddci = vddci;
  3384. }
  3385. if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) ==
  3386. ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
  3387. rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk = pl->sclk;
  3388. rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk = pl->mclk;
  3389. rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc = pl->vddc;
  3390. rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci = pl->vddci;
  3391. }
  3392. }
  3393. static int ni_parse_power_table(struct radeon_device *rdev)
  3394. {
  3395. struct radeon_mode_info *mode_info = &rdev->mode_info;
  3396. struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
  3397. union pplib_power_state *power_state;
  3398. int i, j;
  3399. union pplib_clock_info *clock_info;
  3400. union power_info *power_info;
  3401. int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
  3402. u16 data_offset;
  3403. u8 frev, crev;
  3404. struct ni_ps *ps;
  3405. if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
  3406. &frev, &crev, &data_offset))
  3407. return -EINVAL;
  3408. power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
  3409. rdev->pm.dpm.ps = kzalloc(sizeof(struct radeon_ps) *
  3410. power_info->pplib.ucNumStates, GFP_KERNEL);
  3411. if (!rdev->pm.dpm.ps)
  3412. return -ENOMEM;
  3413. rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
  3414. rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
  3415. rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
  3416. for (i = 0; i < power_info->pplib.ucNumStates; i++) {
  3417. power_state = (union pplib_power_state *)
  3418. (mode_info->atom_context->bios + data_offset +
  3419. le16_to_cpu(power_info->pplib.usStateArrayOffset) +
  3420. i * power_info->pplib.ucStateEntrySize);
  3421. non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
  3422. (mode_info->atom_context->bios + data_offset +
  3423. le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset) +
  3424. (power_state->v1.ucNonClockStateIndex *
  3425. power_info->pplib.ucNonClockSize));
  3426. if (power_info->pplib.ucStateEntrySize - 1) {
  3427. ps = kzalloc(sizeof(struct ni_ps), GFP_KERNEL);
  3428. if (ps == NULL) {
  3429. kfree(rdev->pm.dpm.ps);
  3430. return -ENOMEM;
  3431. }
  3432. rdev->pm.dpm.ps[i].ps_priv = ps;
  3433. ni_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
  3434. non_clock_info,
  3435. power_info->pplib.ucNonClockSize);
  3436. for (j = 0; j < (power_info->pplib.ucStateEntrySize - 1); j++) {
  3437. clock_info = (union pplib_clock_info *)
  3438. (mode_info->atom_context->bios + data_offset +
  3439. le16_to_cpu(power_info->pplib.usClockInfoArrayOffset) +
  3440. (power_state->v1.ucClockStateIndices[j] *
  3441. power_info->pplib.ucClockInfoSize));
  3442. ni_parse_pplib_clock_info(rdev,
  3443. &rdev->pm.dpm.ps[i], j,
  3444. clock_info);
  3445. }
  3446. }
  3447. }
  3448. rdev->pm.dpm.num_ps = power_info->pplib.ucNumStates;
  3449. return 0;
  3450. }
  3451. int ni_dpm_init(struct radeon_device *rdev)
  3452. {
  3453. struct rv7xx_power_info *pi;
  3454. struct evergreen_power_info *eg_pi;
  3455. struct ni_power_info *ni_pi;
  3456. int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
  3457. u16 data_offset, size;
  3458. u8 frev, crev;
  3459. struct atom_clock_dividers dividers;
  3460. int ret;
  3461. ni_pi = kzalloc(sizeof(struct ni_power_info), GFP_KERNEL);
  3462. if (ni_pi == NULL)
  3463. return -ENOMEM;
  3464. rdev->pm.dpm.priv = ni_pi;
  3465. eg_pi = &ni_pi->eg;
  3466. pi = &eg_pi->rv7xx;
  3467. rv770_get_max_vddc(rdev);
  3468. eg_pi->ulv.supported = false;
  3469. pi->acpi_vddc = 0;
  3470. eg_pi->acpi_vddci = 0;
  3471. pi->min_vddc_in_table = 0;
  3472. pi->max_vddc_in_table = 0;
  3473. ret = ni_parse_power_table(rdev);
  3474. if (ret)
  3475. return ret;
  3476. ret = r600_parse_extended_power_table(rdev);
  3477. if (ret)
  3478. return ret;
  3479. rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
  3480. kzalloc(4 * sizeof(struct radeon_clock_voltage_dependency_entry), GFP_KERNEL);
  3481. if (!rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
  3482. r600_free_extended_power_table(rdev);
  3483. return -ENOMEM;
  3484. }
  3485. rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4;
  3486. rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0;
  3487. rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0;
  3488. rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000;
  3489. rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 720;
  3490. rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000;
  3491. rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 810;
  3492. rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000;
  3493. rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 900;
  3494. ni_patch_dependency_tables_based_on_leakage(rdev);
  3495. if (rdev->pm.dpm.voltage_response_time == 0)
  3496. rdev->pm.dpm.voltage_response_time = R600_VOLTAGERESPONSETIME_DFLT;
  3497. if (rdev->pm.dpm.backbias_response_time == 0)
  3498. rdev->pm.dpm.backbias_response_time = R600_BACKBIASRESPONSETIME_DFLT;
  3499. ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
  3500. 0, false, &dividers);
  3501. if (ret)
  3502. pi->ref_div = dividers.ref_div + 1;
  3503. else
  3504. pi->ref_div = R600_REFERENCEDIVIDER_DFLT;
  3505. pi->rlp = RV770_RLP_DFLT;
  3506. pi->rmp = RV770_RMP_DFLT;
  3507. pi->lhp = RV770_LHP_DFLT;
  3508. pi->lmp = RV770_LMP_DFLT;
  3509. eg_pi->ats[0].rlp = RV770_RLP_DFLT;
  3510. eg_pi->ats[0].rmp = RV770_RMP_DFLT;
  3511. eg_pi->ats[0].lhp = RV770_LHP_DFLT;
  3512. eg_pi->ats[0].lmp = RV770_LMP_DFLT;
  3513. eg_pi->ats[1].rlp = BTC_RLP_UVD_DFLT;
  3514. eg_pi->ats[1].rmp = BTC_RMP_UVD_DFLT;
  3515. eg_pi->ats[1].lhp = BTC_LHP_UVD_DFLT;
  3516. eg_pi->ats[1].lmp = BTC_LMP_UVD_DFLT;
  3517. eg_pi->smu_uvd_hs = true;
  3518. if (rdev->pdev->device == 0x6707) {
  3519. pi->mclk_strobe_mode_threshold = 55000;
  3520. pi->mclk_edc_enable_threshold = 55000;
  3521. eg_pi->mclk_edc_wr_enable_threshold = 55000;
  3522. } else {
  3523. pi->mclk_strobe_mode_threshold = 40000;
  3524. pi->mclk_edc_enable_threshold = 40000;
  3525. eg_pi->mclk_edc_wr_enable_threshold = 40000;
  3526. }
  3527. ni_pi->mclk_rtt_mode_threshold = eg_pi->mclk_edc_wr_enable_threshold;
  3528. pi->voltage_control =
  3529. radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, 0);
  3530. pi->mvdd_control =
  3531. radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_MVDDC, 0);
  3532. eg_pi->vddci_control =
  3533. radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, 0);
  3534. if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
  3535. &frev, &crev, &data_offset)) {
  3536. pi->sclk_ss = true;
  3537. pi->mclk_ss = true;
  3538. pi->dynamic_ss = true;
  3539. } else {
  3540. pi->sclk_ss = false;
  3541. pi->mclk_ss = false;
  3542. pi->dynamic_ss = true;
  3543. }
  3544. pi->asi = RV770_ASI_DFLT;
  3545. pi->pasi = CYPRESS_HASI_DFLT;
  3546. pi->vrc = CYPRESS_VRC_DFLT;
  3547. pi->power_gating = false;
  3548. pi->gfx_clock_gating = true;
  3549. pi->mg_clock_gating = true;
  3550. pi->mgcgtssm = true;
  3551. eg_pi->ls_clock_gating = false;
  3552. eg_pi->sclk_deep_sleep = false;
  3553. pi->dynamic_pcie_gen2 = true;
  3554. if (pi->gfx_clock_gating &&
  3555. (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE))
  3556. pi->thermal_protection = true;
  3557. else
  3558. pi->thermal_protection = false;
  3559. pi->display_gap = true;
  3560. pi->dcodt = true;
  3561. pi->ulps = true;
  3562. eg_pi->dynamic_ac_timing = true;
  3563. eg_pi->abm = true;
  3564. eg_pi->mcls = true;
  3565. eg_pi->light_sleep = true;
  3566. eg_pi->memory_transition = true;
  3567. #if defined(CONFIG_ACPI)
  3568. eg_pi->pcie_performance_request =
  3569. radeon_acpi_is_pcie_performance_request_supported(rdev);
  3570. #else
  3571. eg_pi->pcie_performance_request = false;
  3572. #endif
  3573. eg_pi->dll_default_on = false;
  3574. eg_pi->sclk_deep_sleep = false;
  3575. pi->mclk_stutter_mode_threshold = 0;
  3576. pi->sram_end = SMC_RAM_END;
  3577. rdev->pm.dpm.dyn_state.mclk_sclk_ratio = 3;
  3578. rdev->pm.dpm.dyn_state.vddc_vddci_delta = 200;
  3579. rdev->pm.dpm.dyn_state.min_vddc_for_pcie_gen2 = 900;
  3580. rdev->pm.dpm.dyn_state.valid_sclk_values.count = ARRAY_SIZE(btc_valid_sclk);
  3581. rdev->pm.dpm.dyn_state.valid_sclk_values.values = btc_valid_sclk;
  3582. rdev->pm.dpm.dyn_state.valid_mclk_values.count = 0;
  3583. rdev->pm.dpm.dyn_state.valid_mclk_values.values = NULL;
  3584. rdev->pm.dpm.dyn_state.sclk_mclk_delta = 12500;
  3585. ni_pi->cac_data.leakage_coefficients.at = 516;
  3586. ni_pi->cac_data.leakage_coefficients.bt = 18;
  3587. ni_pi->cac_data.leakage_coefficients.av = 51;
  3588. ni_pi->cac_data.leakage_coefficients.bv = 2957;
  3589. switch (rdev->pdev->device) {
  3590. case 0x6700:
  3591. case 0x6701:
  3592. case 0x6702:
  3593. case 0x6703:
  3594. case 0x6718:
  3595. ni_pi->cac_weights = &cac_weights_cayman_xt;
  3596. break;
  3597. case 0x6705:
  3598. case 0x6719:
  3599. case 0x671D:
  3600. case 0x671C:
  3601. default:
  3602. ni_pi->cac_weights = &cac_weights_cayman_pro;
  3603. break;
  3604. case 0x6704:
  3605. case 0x6706:
  3606. case 0x6707:
  3607. case 0x6708:
  3608. case 0x6709:
  3609. ni_pi->cac_weights = &cac_weights_cayman_le;
  3610. break;
  3611. }
  3612. if (ni_pi->cac_weights->enable_power_containment_by_default) {
  3613. ni_pi->enable_power_containment = true;
  3614. ni_pi->enable_cac = true;
  3615. ni_pi->enable_sq_ramping = true;
  3616. } else {
  3617. ni_pi->enable_power_containment = false;
  3618. ni_pi->enable_cac = false;
  3619. ni_pi->enable_sq_ramping = false;
  3620. }
  3621. ni_pi->driver_calculate_cac_leakage = false;
  3622. ni_pi->cac_configuration_required = true;
  3623. if (ni_pi->cac_configuration_required) {
  3624. ni_pi->support_cac_long_term_average = true;
  3625. ni_pi->lta_window_size = ni_pi->cac_weights->l2_lta_window_size;
  3626. ni_pi->lts_truncate = ni_pi->cac_weights->lts_truncate;
  3627. } else {
  3628. ni_pi->support_cac_long_term_average = false;
  3629. ni_pi->lta_window_size = 0;
  3630. ni_pi->lts_truncate = 0;
  3631. }
  3632. ni_pi->use_power_boost_limit = true;
  3633. return 0;
  3634. }
  3635. void ni_dpm_fini(struct radeon_device *rdev)
  3636. {
  3637. int i;
  3638. for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
  3639. kfree(rdev->pm.dpm.ps[i].ps_priv);
  3640. }
  3641. kfree(rdev->pm.dpm.ps);
  3642. kfree(rdev->pm.dpm.priv);
  3643. kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries);
  3644. r600_free_extended_power_table(rdev);
  3645. }
  3646. void ni_dpm_print_power_state(struct radeon_device *rdev,
  3647. struct radeon_ps *rps)
  3648. {
  3649. struct ni_ps *ps = ni_get_ps(rps);
  3650. struct rv7xx_pl *pl;
  3651. int i;
  3652. r600_dpm_print_class_info(rps->class, rps->class2);
  3653. r600_dpm_print_cap_info(rps->caps);
  3654. printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
  3655. for (i = 0; i < ps->performance_level_count; i++) {
  3656. pl = &ps->performance_levels[i];
  3657. if (rdev->family >= CHIP_TAHITI)
  3658. printk("\t\tpower level %d sclk: %u mclk: %u vddc: %u vddci: %u pcie gen: %u\n",
  3659. i, pl->sclk, pl->mclk, pl->vddc, pl->vddci, pl->pcie_gen + 1);
  3660. else
  3661. printk("\t\tpower level %d sclk: %u mclk: %u vddc: %u vddci: %u\n",
  3662. i, pl->sclk, pl->mclk, pl->vddc, pl->vddci);
  3663. }
  3664. r600_dpm_print_ps_status(rdev, rps);
  3665. }
  3666. void ni_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
  3667. struct seq_file *m)
  3668. {
  3669. struct radeon_ps *rps = rdev->pm.dpm.current_ps;
  3670. struct ni_ps *ps = ni_get_ps(rps);
  3671. struct rv7xx_pl *pl;
  3672. u32 current_index =
  3673. (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_STATE_INDEX_MASK) >>
  3674. CURRENT_STATE_INDEX_SHIFT;
  3675. if (current_index >= ps->performance_level_count) {
  3676. seq_printf(m, "invalid dpm profile %d\n", current_index);
  3677. } else {
  3678. pl = &ps->performance_levels[current_index];
  3679. seq_printf(m, "uvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
  3680. seq_printf(m, "power level %d sclk: %u mclk: %u vddc: %u vddci: %u\n",
  3681. current_index, pl->sclk, pl->mclk, pl->vddc, pl->vddci);
  3682. }
  3683. }
  3684. u32 ni_dpm_get_sclk(struct radeon_device *rdev, bool low)
  3685. {
  3686. struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
  3687. struct ni_ps *requested_state = ni_get_ps(&eg_pi->requested_rps);
  3688. if (low)
  3689. return requested_state->performance_levels[0].sclk;
  3690. else
  3691. return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk;
  3692. }
  3693. u32 ni_dpm_get_mclk(struct radeon_device *rdev, bool low)
  3694. {
  3695. struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
  3696. struct ni_ps *requested_state = ni_get_ps(&eg_pi->requested_rps);
  3697. if (low)
  3698. return requested_state->performance_levels[0].mclk;
  3699. else
  3700. return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk;
  3701. }