kv_dpm.c 71 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639
  1. /*
  2. * Copyright 2013 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. */
  23. #include "drmP.h"
  24. #include "radeon.h"
  25. #include "cikd.h"
  26. #include "r600_dpm.h"
  27. #include "kv_dpm.h"
  28. #include "radeon_asic.h"
  29. #include <linux/seq_file.h>
  30. #define KV_MAX_DEEPSLEEP_DIVIDER_ID 5
  31. #define KV_MINIMUM_ENGINE_CLOCK 800
  32. #define SMC_RAM_END 0x40000
  33. static void kv_init_graphics_levels(struct radeon_device *rdev);
  34. static int kv_calculate_ds_divider(struct radeon_device *rdev);
  35. static int kv_calculate_nbps_level_settings(struct radeon_device *rdev);
  36. static int kv_calculate_dpm_settings(struct radeon_device *rdev);
  37. static void kv_enable_new_levels(struct radeon_device *rdev);
  38. static void kv_program_nbps_index_settings(struct radeon_device *rdev,
  39. struct radeon_ps *new_rps);
  40. static int kv_set_enabled_levels(struct radeon_device *rdev);
  41. static int kv_force_dpm_highest(struct radeon_device *rdev);
  42. static int kv_force_dpm_lowest(struct radeon_device *rdev);
  43. static void kv_apply_state_adjust_rules(struct radeon_device *rdev,
  44. struct radeon_ps *new_rps,
  45. struct radeon_ps *old_rps);
  46. static int kv_set_thermal_temperature_range(struct radeon_device *rdev,
  47. int min_temp, int max_temp);
  48. static int kv_init_fps_limits(struct radeon_device *rdev);
  49. void kv_dpm_powergate_uvd(struct radeon_device *rdev, bool gate);
  50. static void kv_dpm_powergate_vce(struct radeon_device *rdev, bool gate);
  51. static void kv_dpm_powergate_samu(struct radeon_device *rdev, bool gate);
  52. static void kv_dpm_powergate_acp(struct radeon_device *rdev, bool gate);
  53. extern void cik_enter_rlc_safe_mode(struct radeon_device *rdev);
  54. extern void cik_exit_rlc_safe_mode(struct radeon_device *rdev);
  55. extern void cik_update_cg(struct radeon_device *rdev,
  56. u32 block, bool enable);
  57. static const struct kv_lcac_config_values sx_local_cac_cfg_kv[] =
  58. {
  59. { 0, 4, 1 },
  60. { 1, 4, 1 },
  61. { 2, 5, 1 },
  62. { 3, 4, 2 },
  63. { 4, 1, 1 },
  64. { 5, 5, 2 },
  65. { 6, 6, 1 },
  66. { 7, 9, 2 },
  67. { 0xffffffff }
  68. };
  69. static const struct kv_lcac_config_values mc0_local_cac_cfg_kv[] =
  70. {
  71. { 0, 4, 1 },
  72. { 0xffffffff }
  73. };
  74. static const struct kv_lcac_config_values mc1_local_cac_cfg_kv[] =
  75. {
  76. { 0, 4, 1 },
  77. { 0xffffffff }
  78. };
  79. static const struct kv_lcac_config_values mc2_local_cac_cfg_kv[] =
  80. {
  81. { 0, 4, 1 },
  82. { 0xffffffff }
  83. };
  84. static const struct kv_lcac_config_values mc3_local_cac_cfg_kv[] =
  85. {
  86. { 0, 4, 1 },
  87. { 0xffffffff }
  88. };
  89. static const struct kv_lcac_config_values cpl_local_cac_cfg_kv[] =
  90. {
  91. { 0, 4, 1 },
  92. { 1, 4, 1 },
  93. { 2, 5, 1 },
  94. { 3, 4, 1 },
  95. { 4, 1, 1 },
  96. { 5, 5, 1 },
  97. { 6, 6, 1 },
  98. { 7, 9, 1 },
  99. { 8, 4, 1 },
  100. { 9, 2, 1 },
  101. { 10, 3, 1 },
  102. { 11, 6, 1 },
  103. { 12, 8, 2 },
  104. { 13, 1, 1 },
  105. { 14, 2, 1 },
  106. { 15, 3, 1 },
  107. { 16, 1, 1 },
  108. { 17, 4, 1 },
  109. { 18, 3, 1 },
  110. { 19, 1, 1 },
  111. { 20, 8, 1 },
  112. { 21, 5, 1 },
  113. { 22, 1, 1 },
  114. { 23, 1, 1 },
  115. { 24, 4, 1 },
  116. { 27, 6, 1 },
  117. { 28, 1, 1 },
  118. { 0xffffffff }
  119. };
  120. static const struct kv_lcac_config_reg sx0_cac_config_reg[] =
  121. {
  122. { 0xc0400d00, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
  123. };
  124. static const struct kv_lcac_config_reg mc0_cac_config_reg[] =
  125. {
  126. { 0xc0400d30, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
  127. };
  128. static const struct kv_lcac_config_reg mc1_cac_config_reg[] =
  129. {
  130. { 0xc0400d3c, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
  131. };
  132. static const struct kv_lcac_config_reg mc2_cac_config_reg[] =
  133. {
  134. { 0xc0400d48, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
  135. };
  136. static const struct kv_lcac_config_reg mc3_cac_config_reg[] =
  137. {
  138. { 0xc0400d54, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
  139. };
  140. static const struct kv_lcac_config_reg cpl_cac_config_reg[] =
  141. {
  142. { 0xc0400d80, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
  143. };
  144. static const struct kv_pt_config_reg didt_config_kv[] =
  145. {
  146. { 0x10, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
  147. { 0x10, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
  148. { 0x10, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
  149. { 0x10, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
  150. { 0x11, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
  151. { 0x11, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
  152. { 0x11, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
  153. { 0x11, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
  154. { 0x12, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
  155. { 0x12, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
  156. { 0x12, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
  157. { 0x12, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
  158. { 0x2, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
  159. { 0x2, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
  160. { 0x2, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
  161. { 0x1, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
  162. { 0x1, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
  163. { 0x0, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
  164. { 0x30, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
  165. { 0x30, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
  166. { 0x30, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
  167. { 0x30, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
  168. { 0x31, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
  169. { 0x31, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
  170. { 0x31, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
  171. { 0x31, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
  172. { 0x32, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
  173. { 0x32, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
  174. { 0x32, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
  175. { 0x32, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
  176. { 0x22, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
  177. { 0x22, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
  178. { 0x22, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
  179. { 0x21, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
  180. { 0x21, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
  181. { 0x20, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
  182. { 0x50, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
  183. { 0x50, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
  184. { 0x50, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
  185. { 0x50, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
  186. { 0x51, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
  187. { 0x51, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
  188. { 0x51, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
  189. { 0x51, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
  190. { 0x52, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
  191. { 0x52, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
  192. { 0x52, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
  193. { 0x52, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
  194. { 0x42, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
  195. { 0x42, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
  196. { 0x42, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
  197. { 0x41, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
  198. { 0x41, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
  199. { 0x40, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
  200. { 0x70, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
  201. { 0x70, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
  202. { 0x70, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
  203. { 0x70, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
  204. { 0x71, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
  205. { 0x71, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
  206. { 0x71, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
  207. { 0x71, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
  208. { 0x72, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
  209. { 0x72, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
  210. { 0x72, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
  211. { 0x72, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
  212. { 0x62, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
  213. { 0x62, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
  214. { 0x62, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
  215. { 0x61, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
  216. { 0x61, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
  217. { 0x60, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
  218. { 0xFFFFFFFF }
  219. };
  220. static struct kv_ps *kv_get_ps(struct radeon_ps *rps)
  221. {
  222. struct kv_ps *ps = rps->ps_priv;
  223. return ps;
  224. }
  225. static struct kv_power_info *kv_get_pi(struct radeon_device *rdev)
  226. {
  227. struct kv_power_info *pi = rdev->pm.dpm.priv;
  228. return pi;
  229. }
  230. #if 0
  231. static void kv_program_local_cac_table(struct radeon_device *rdev,
  232. const struct kv_lcac_config_values *local_cac_table,
  233. const struct kv_lcac_config_reg *local_cac_reg)
  234. {
  235. u32 i, count, data;
  236. const struct kv_lcac_config_values *values = local_cac_table;
  237. while (values->block_id != 0xffffffff) {
  238. count = values->signal_id;
  239. for (i = 0; i < count; i++) {
  240. data = ((values->block_id << local_cac_reg->block_shift) &
  241. local_cac_reg->block_mask);
  242. data |= ((i << local_cac_reg->signal_shift) &
  243. local_cac_reg->signal_mask);
  244. data |= ((values->t << local_cac_reg->t_shift) &
  245. local_cac_reg->t_mask);
  246. data |= ((1 << local_cac_reg->enable_shift) &
  247. local_cac_reg->enable_mask);
  248. WREG32_SMC(local_cac_reg->cntl, data);
  249. }
  250. values++;
  251. }
  252. }
  253. #endif
  254. static int kv_program_pt_config_registers(struct radeon_device *rdev,
  255. const struct kv_pt_config_reg *cac_config_regs)
  256. {
  257. const struct kv_pt_config_reg *config_regs = cac_config_regs;
  258. u32 data;
  259. u32 cache = 0;
  260. if (config_regs == NULL)
  261. return -EINVAL;
  262. while (config_regs->offset != 0xFFFFFFFF) {
  263. if (config_regs->type == KV_CONFIGREG_CACHE) {
  264. cache |= ((config_regs->value << config_regs->shift) & config_regs->mask);
  265. } else {
  266. switch (config_regs->type) {
  267. case KV_CONFIGREG_SMC_IND:
  268. data = RREG32_SMC(config_regs->offset);
  269. break;
  270. case KV_CONFIGREG_DIDT_IND:
  271. data = RREG32_DIDT(config_regs->offset);
  272. break;
  273. default:
  274. data = RREG32(config_regs->offset << 2);
  275. break;
  276. }
  277. data &= ~config_regs->mask;
  278. data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
  279. data |= cache;
  280. cache = 0;
  281. switch (config_regs->type) {
  282. case KV_CONFIGREG_SMC_IND:
  283. WREG32_SMC(config_regs->offset, data);
  284. break;
  285. case KV_CONFIGREG_DIDT_IND:
  286. WREG32_DIDT(config_regs->offset, data);
  287. break;
  288. default:
  289. WREG32(config_regs->offset << 2, data);
  290. break;
  291. }
  292. }
  293. config_regs++;
  294. }
  295. return 0;
  296. }
  297. static void kv_do_enable_didt(struct radeon_device *rdev, bool enable)
  298. {
  299. struct kv_power_info *pi = kv_get_pi(rdev);
  300. u32 data;
  301. if (pi->caps_sq_ramping) {
  302. data = RREG32_DIDT(DIDT_SQ_CTRL0);
  303. if (enable)
  304. data |= DIDT_CTRL_EN;
  305. else
  306. data &= ~DIDT_CTRL_EN;
  307. WREG32_DIDT(DIDT_SQ_CTRL0, data);
  308. }
  309. if (pi->caps_db_ramping) {
  310. data = RREG32_DIDT(DIDT_DB_CTRL0);
  311. if (enable)
  312. data |= DIDT_CTRL_EN;
  313. else
  314. data &= ~DIDT_CTRL_EN;
  315. WREG32_DIDT(DIDT_DB_CTRL0, data);
  316. }
  317. if (pi->caps_td_ramping) {
  318. data = RREG32_DIDT(DIDT_TD_CTRL0);
  319. if (enable)
  320. data |= DIDT_CTRL_EN;
  321. else
  322. data &= ~DIDT_CTRL_EN;
  323. WREG32_DIDT(DIDT_TD_CTRL0, data);
  324. }
  325. if (pi->caps_tcp_ramping) {
  326. data = RREG32_DIDT(DIDT_TCP_CTRL0);
  327. if (enable)
  328. data |= DIDT_CTRL_EN;
  329. else
  330. data &= ~DIDT_CTRL_EN;
  331. WREG32_DIDT(DIDT_TCP_CTRL0, data);
  332. }
  333. }
  334. static int kv_enable_didt(struct radeon_device *rdev, bool enable)
  335. {
  336. struct kv_power_info *pi = kv_get_pi(rdev);
  337. int ret;
  338. if (pi->caps_sq_ramping ||
  339. pi->caps_db_ramping ||
  340. pi->caps_td_ramping ||
  341. pi->caps_tcp_ramping) {
  342. cik_enter_rlc_safe_mode(rdev);
  343. if (enable) {
  344. ret = kv_program_pt_config_registers(rdev, didt_config_kv);
  345. if (ret) {
  346. cik_exit_rlc_safe_mode(rdev);
  347. return ret;
  348. }
  349. }
  350. kv_do_enable_didt(rdev, enable);
  351. cik_exit_rlc_safe_mode(rdev);
  352. }
  353. return 0;
  354. }
  355. #if 0
  356. static void kv_initialize_hardware_cac_manager(struct radeon_device *rdev)
  357. {
  358. struct kv_power_info *pi = kv_get_pi(rdev);
  359. if (pi->caps_cac) {
  360. WREG32_SMC(LCAC_SX0_OVR_SEL, 0);
  361. WREG32_SMC(LCAC_SX0_OVR_VAL, 0);
  362. kv_program_local_cac_table(rdev, sx_local_cac_cfg_kv, sx0_cac_config_reg);
  363. WREG32_SMC(LCAC_MC0_OVR_SEL, 0);
  364. WREG32_SMC(LCAC_MC0_OVR_VAL, 0);
  365. kv_program_local_cac_table(rdev, mc0_local_cac_cfg_kv, mc0_cac_config_reg);
  366. WREG32_SMC(LCAC_MC1_OVR_SEL, 0);
  367. WREG32_SMC(LCAC_MC1_OVR_VAL, 0);
  368. kv_program_local_cac_table(rdev, mc1_local_cac_cfg_kv, mc1_cac_config_reg);
  369. WREG32_SMC(LCAC_MC2_OVR_SEL, 0);
  370. WREG32_SMC(LCAC_MC2_OVR_VAL, 0);
  371. kv_program_local_cac_table(rdev, mc2_local_cac_cfg_kv, mc2_cac_config_reg);
  372. WREG32_SMC(LCAC_MC3_OVR_SEL, 0);
  373. WREG32_SMC(LCAC_MC3_OVR_VAL, 0);
  374. kv_program_local_cac_table(rdev, mc3_local_cac_cfg_kv, mc3_cac_config_reg);
  375. WREG32_SMC(LCAC_CPL_OVR_SEL, 0);
  376. WREG32_SMC(LCAC_CPL_OVR_VAL, 0);
  377. kv_program_local_cac_table(rdev, cpl_local_cac_cfg_kv, cpl_cac_config_reg);
  378. }
  379. }
  380. #endif
  381. static int kv_enable_smc_cac(struct radeon_device *rdev, bool enable)
  382. {
  383. struct kv_power_info *pi = kv_get_pi(rdev);
  384. int ret = 0;
  385. if (pi->caps_cac) {
  386. if (enable) {
  387. ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_EnableCac);
  388. if (ret)
  389. pi->cac_enabled = false;
  390. else
  391. pi->cac_enabled = true;
  392. } else if (pi->cac_enabled) {
  393. kv_notify_message_to_smu(rdev, PPSMC_MSG_DisableCac);
  394. pi->cac_enabled = false;
  395. }
  396. }
  397. return ret;
  398. }
  399. static int kv_process_firmware_header(struct radeon_device *rdev)
  400. {
  401. struct kv_power_info *pi = kv_get_pi(rdev);
  402. u32 tmp;
  403. int ret;
  404. ret = kv_read_smc_sram_dword(rdev, SMU7_FIRMWARE_HEADER_LOCATION +
  405. offsetof(SMU7_Firmware_Header, DpmTable),
  406. &tmp, pi->sram_end);
  407. if (ret == 0)
  408. pi->dpm_table_start = tmp;
  409. ret = kv_read_smc_sram_dword(rdev, SMU7_FIRMWARE_HEADER_LOCATION +
  410. offsetof(SMU7_Firmware_Header, SoftRegisters),
  411. &tmp, pi->sram_end);
  412. if (ret == 0)
  413. pi->soft_regs_start = tmp;
  414. return ret;
  415. }
  416. static int kv_enable_dpm_voltage_scaling(struct radeon_device *rdev)
  417. {
  418. struct kv_power_info *pi = kv_get_pi(rdev);
  419. int ret;
  420. pi->graphics_voltage_change_enable = 1;
  421. ret = kv_copy_bytes_to_smc(rdev,
  422. pi->dpm_table_start +
  423. offsetof(SMU7_Fusion_DpmTable, GraphicsVoltageChangeEnable),
  424. &pi->graphics_voltage_change_enable,
  425. sizeof(u8), pi->sram_end);
  426. return ret;
  427. }
  428. static int kv_set_dpm_interval(struct radeon_device *rdev)
  429. {
  430. struct kv_power_info *pi = kv_get_pi(rdev);
  431. int ret;
  432. pi->graphics_interval = 1;
  433. ret = kv_copy_bytes_to_smc(rdev,
  434. pi->dpm_table_start +
  435. offsetof(SMU7_Fusion_DpmTable, GraphicsInterval),
  436. &pi->graphics_interval,
  437. sizeof(u8), pi->sram_end);
  438. return ret;
  439. }
  440. static int kv_set_dpm_boot_state(struct radeon_device *rdev)
  441. {
  442. struct kv_power_info *pi = kv_get_pi(rdev);
  443. int ret;
  444. ret = kv_copy_bytes_to_smc(rdev,
  445. pi->dpm_table_start +
  446. offsetof(SMU7_Fusion_DpmTable, GraphicsBootLevel),
  447. &pi->graphics_boot_level,
  448. sizeof(u8), pi->sram_end);
  449. return ret;
  450. }
  451. static void kv_program_vc(struct radeon_device *rdev)
  452. {
  453. WREG32_SMC(CG_FTV_0, 0x3FFFC000);
  454. }
  455. static void kv_clear_vc(struct radeon_device *rdev)
  456. {
  457. WREG32_SMC(CG_FTV_0, 0);
  458. }
  459. static int kv_set_divider_value(struct radeon_device *rdev,
  460. u32 index, u32 sclk)
  461. {
  462. struct kv_power_info *pi = kv_get_pi(rdev);
  463. struct atom_clock_dividers dividers;
  464. int ret;
  465. ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
  466. sclk, false, &dividers);
  467. if (ret)
  468. return ret;
  469. pi->graphics_level[index].SclkDid = (u8)dividers.post_div;
  470. pi->graphics_level[index].SclkFrequency = cpu_to_be32(sclk);
  471. return 0;
  472. }
  473. static u16 kv_convert_8bit_index_to_voltage(struct radeon_device *rdev,
  474. u16 voltage)
  475. {
  476. return 6200 - (voltage * 25);
  477. }
  478. static u16 kv_convert_2bit_index_to_voltage(struct radeon_device *rdev,
  479. u32 vid_2bit)
  480. {
  481. struct kv_power_info *pi = kv_get_pi(rdev);
  482. u32 vid_8bit = sumo_convert_vid2_to_vid7(rdev,
  483. &pi->sys_info.vid_mapping_table,
  484. vid_2bit);
  485. return kv_convert_8bit_index_to_voltage(rdev, (u16)vid_8bit);
  486. }
  487. static int kv_set_vid(struct radeon_device *rdev, u32 index, u32 vid)
  488. {
  489. struct kv_power_info *pi = kv_get_pi(rdev);
  490. pi->graphics_level[index].VoltageDownH = (u8)pi->voltage_drop_t;
  491. pi->graphics_level[index].MinVddNb =
  492. cpu_to_be32(kv_convert_2bit_index_to_voltage(rdev, vid));
  493. return 0;
  494. }
  495. static int kv_set_at(struct radeon_device *rdev, u32 index, u32 at)
  496. {
  497. struct kv_power_info *pi = kv_get_pi(rdev);
  498. pi->graphics_level[index].AT = cpu_to_be16((u16)at);
  499. return 0;
  500. }
  501. static void kv_dpm_power_level_enable(struct radeon_device *rdev,
  502. u32 index, bool enable)
  503. {
  504. struct kv_power_info *pi = kv_get_pi(rdev);
  505. pi->graphics_level[index].EnabledForActivity = enable ? 1 : 0;
  506. }
  507. static void kv_start_dpm(struct radeon_device *rdev)
  508. {
  509. u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
  510. tmp |= GLOBAL_PWRMGT_EN;
  511. WREG32_SMC(GENERAL_PWRMGT, tmp);
  512. kv_smc_dpm_enable(rdev, true);
  513. }
  514. static void kv_stop_dpm(struct radeon_device *rdev)
  515. {
  516. kv_smc_dpm_enable(rdev, false);
  517. }
  518. static void kv_start_am(struct radeon_device *rdev)
  519. {
  520. u32 sclk_pwrmgt_cntl = RREG32_SMC(SCLK_PWRMGT_CNTL);
  521. sclk_pwrmgt_cntl &= ~(RESET_SCLK_CNT | RESET_BUSY_CNT);
  522. sclk_pwrmgt_cntl |= DYNAMIC_PM_EN;
  523. WREG32_SMC(SCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl);
  524. }
  525. static void kv_reset_am(struct radeon_device *rdev)
  526. {
  527. u32 sclk_pwrmgt_cntl = RREG32_SMC(SCLK_PWRMGT_CNTL);
  528. sclk_pwrmgt_cntl |= (RESET_SCLK_CNT | RESET_BUSY_CNT);
  529. WREG32_SMC(SCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl);
  530. }
  531. static int kv_freeze_sclk_dpm(struct radeon_device *rdev, bool freeze)
  532. {
  533. return kv_notify_message_to_smu(rdev, freeze ?
  534. PPSMC_MSG_SCLKDPM_FreezeLevel : PPSMC_MSG_SCLKDPM_UnfreezeLevel);
  535. }
  536. static int kv_force_lowest_valid(struct radeon_device *rdev)
  537. {
  538. return kv_force_dpm_lowest(rdev);
  539. }
  540. static int kv_unforce_levels(struct radeon_device *rdev)
  541. {
  542. return kv_notify_message_to_smu(rdev, PPSMC_MSG_NoForcedLevel);
  543. }
  544. static int kv_update_sclk_t(struct radeon_device *rdev)
  545. {
  546. struct kv_power_info *pi = kv_get_pi(rdev);
  547. u32 low_sclk_interrupt_t = 0;
  548. int ret = 0;
  549. if (pi->caps_sclk_throttle_low_notification) {
  550. low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t);
  551. ret = kv_copy_bytes_to_smc(rdev,
  552. pi->dpm_table_start +
  553. offsetof(SMU7_Fusion_DpmTable, LowSclkInterruptT),
  554. (u8 *)&low_sclk_interrupt_t,
  555. sizeof(u32), pi->sram_end);
  556. }
  557. return ret;
  558. }
  559. static int kv_program_bootup_state(struct radeon_device *rdev)
  560. {
  561. struct kv_power_info *pi = kv_get_pi(rdev);
  562. u32 i;
  563. struct radeon_clock_voltage_dependency_table *table =
  564. &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
  565. if (table && table->count) {
  566. for (i = pi->graphics_dpm_level_count - 1; i >= 0; i--) {
  567. if ((table->entries[i].clk == pi->boot_pl.sclk) ||
  568. (i == 0))
  569. break;
  570. }
  571. pi->graphics_boot_level = (u8)i;
  572. kv_dpm_power_level_enable(rdev, i, true);
  573. } else {
  574. struct sumo_sclk_voltage_mapping_table *table =
  575. &pi->sys_info.sclk_voltage_mapping_table;
  576. if (table->num_max_dpm_entries == 0)
  577. return -EINVAL;
  578. for (i = pi->graphics_dpm_level_count - 1; i >= 0; i--) {
  579. if ((table->entries[i].sclk_frequency == pi->boot_pl.sclk) ||
  580. (i == 0))
  581. break;
  582. }
  583. pi->graphics_boot_level = (u8)i;
  584. kv_dpm_power_level_enable(rdev, i, true);
  585. }
  586. return 0;
  587. }
  588. static int kv_enable_auto_thermal_throttling(struct radeon_device *rdev)
  589. {
  590. struct kv_power_info *pi = kv_get_pi(rdev);
  591. int ret;
  592. pi->graphics_therm_throttle_enable = 1;
  593. ret = kv_copy_bytes_to_smc(rdev,
  594. pi->dpm_table_start +
  595. offsetof(SMU7_Fusion_DpmTable, GraphicsThermThrottleEnable),
  596. &pi->graphics_therm_throttle_enable,
  597. sizeof(u8), pi->sram_end);
  598. return ret;
  599. }
  600. static int kv_upload_dpm_settings(struct radeon_device *rdev)
  601. {
  602. struct kv_power_info *pi = kv_get_pi(rdev);
  603. int ret;
  604. ret = kv_copy_bytes_to_smc(rdev,
  605. pi->dpm_table_start +
  606. offsetof(SMU7_Fusion_DpmTable, GraphicsLevel),
  607. (u8 *)&pi->graphics_level,
  608. sizeof(SMU7_Fusion_GraphicsLevel) * SMU7_MAX_LEVELS_GRAPHICS,
  609. pi->sram_end);
  610. if (ret)
  611. return ret;
  612. ret = kv_copy_bytes_to_smc(rdev,
  613. pi->dpm_table_start +
  614. offsetof(SMU7_Fusion_DpmTable, GraphicsDpmLevelCount),
  615. &pi->graphics_dpm_level_count,
  616. sizeof(u8), pi->sram_end);
  617. return ret;
  618. }
  619. static u32 kv_get_clock_difference(u32 a, u32 b)
  620. {
  621. return (a >= b) ? a - b : b - a;
  622. }
  623. static u32 kv_get_clk_bypass(struct radeon_device *rdev, u32 clk)
  624. {
  625. struct kv_power_info *pi = kv_get_pi(rdev);
  626. u32 value;
  627. if (pi->caps_enable_dfs_bypass) {
  628. if (kv_get_clock_difference(clk, 40000) < 200)
  629. value = 3;
  630. else if (kv_get_clock_difference(clk, 30000) < 200)
  631. value = 2;
  632. else if (kv_get_clock_difference(clk, 20000) < 200)
  633. value = 7;
  634. else if (kv_get_clock_difference(clk, 15000) < 200)
  635. value = 6;
  636. else if (kv_get_clock_difference(clk, 10000) < 200)
  637. value = 8;
  638. else
  639. value = 0;
  640. } else {
  641. value = 0;
  642. }
  643. return value;
  644. }
  645. static int kv_populate_uvd_table(struct radeon_device *rdev)
  646. {
  647. struct kv_power_info *pi = kv_get_pi(rdev);
  648. struct radeon_uvd_clock_voltage_dependency_table *table =
  649. &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
  650. struct atom_clock_dividers dividers;
  651. int ret;
  652. u32 i;
  653. if (table == NULL || table->count == 0)
  654. return 0;
  655. pi->uvd_level_count = 0;
  656. for (i = 0; i < table->count; i++) {
  657. if (pi->high_voltage_t &&
  658. (pi->high_voltage_t < table->entries[i].v))
  659. break;
  660. pi->uvd_level[i].VclkFrequency = cpu_to_be32(table->entries[i].vclk);
  661. pi->uvd_level[i].DclkFrequency = cpu_to_be32(table->entries[i].dclk);
  662. pi->uvd_level[i].MinVddNb = cpu_to_be16(table->entries[i].v);
  663. pi->uvd_level[i].VClkBypassCntl =
  664. (u8)kv_get_clk_bypass(rdev, table->entries[i].vclk);
  665. pi->uvd_level[i].DClkBypassCntl =
  666. (u8)kv_get_clk_bypass(rdev, table->entries[i].dclk);
  667. ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
  668. table->entries[i].vclk, false, &dividers);
  669. if (ret)
  670. return ret;
  671. pi->uvd_level[i].VclkDivider = (u8)dividers.post_div;
  672. ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
  673. table->entries[i].dclk, false, &dividers);
  674. if (ret)
  675. return ret;
  676. pi->uvd_level[i].DclkDivider = (u8)dividers.post_div;
  677. pi->uvd_level_count++;
  678. }
  679. ret = kv_copy_bytes_to_smc(rdev,
  680. pi->dpm_table_start +
  681. offsetof(SMU7_Fusion_DpmTable, UvdLevelCount),
  682. (u8 *)&pi->uvd_level_count,
  683. sizeof(u8), pi->sram_end);
  684. if (ret)
  685. return ret;
  686. pi->uvd_interval = 1;
  687. ret = kv_copy_bytes_to_smc(rdev,
  688. pi->dpm_table_start +
  689. offsetof(SMU7_Fusion_DpmTable, UVDInterval),
  690. &pi->uvd_interval,
  691. sizeof(u8), pi->sram_end);
  692. if (ret)
  693. return ret;
  694. ret = kv_copy_bytes_to_smc(rdev,
  695. pi->dpm_table_start +
  696. offsetof(SMU7_Fusion_DpmTable, UvdLevel),
  697. (u8 *)&pi->uvd_level,
  698. sizeof(SMU7_Fusion_UvdLevel) * SMU7_MAX_LEVELS_UVD,
  699. pi->sram_end);
  700. return ret;
  701. }
  702. static int kv_populate_vce_table(struct radeon_device *rdev)
  703. {
  704. struct kv_power_info *pi = kv_get_pi(rdev);
  705. int ret;
  706. u32 i;
  707. struct radeon_vce_clock_voltage_dependency_table *table =
  708. &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
  709. struct atom_clock_dividers dividers;
  710. if (table == NULL || table->count == 0)
  711. return 0;
  712. pi->vce_level_count = 0;
  713. for (i = 0; i < table->count; i++) {
  714. if (pi->high_voltage_t &&
  715. pi->high_voltage_t < table->entries[i].v)
  716. break;
  717. pi->vce_level[i].Frequency = cpu_to_be32(table->entries[i].evclk);
  718. pi->vce_level[i].MinVoltage = cpu_to_be16(table->entries[i].v);
  719. pi->vce_level[i].ClkBypassCntl =
  720. (u8)kv_get_clk_bypass(rdev, table->entries[i].evclk);
  721. ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
  722. table->entries[i].evclk, false, &dividers);
  723. if (ret)
  724. return ret;
  725. pi->vce_level[i].Divider = (u8)dividers.post_div;
  726. pi->vce_level_count++;
  727. }
  728. ret = kv_copy_bytes_to_smc(rdev,
  729. pi->dpm_table_start +
  730. offsetof(SMU7_Fusion_DpmTable, VceLevelCount),
  731. (u8 *)&pi->vce_level_count,
  732. sizeof(u8),
  733. pi->sram_end);
  734. if (ret)
  735. return ret;
  736. pi->vce_interval = 1;
  737. ret = kv_copy_bytes_to_smc(rdev,
  738. pi->dpm_table_start +
  739. offsetof(SMU7_Fusion_DpmTable, VCEInterval),
  740. (u8 *)&pi->vce_interval,
  741. sizeof(u8),
  742. pi->sram_end);
  743. if (ret)
  744. return ret;
  745. ret = kv_copy_bytes_to_smc(rdev,
  746. pi->dpm_table_start +
  747. offsetof(SMU7_Fusion_DpmTable, VceLevel),
  748. (u8 *)&pi->vce_level,
  749. sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_VCE,
  750. pi->sram_end);
  751. return ret;
  752. }
  753. static int kv_populate_samu_table(struct radeon_device *rdev)
  754. {
  755. struct kv_power_info *pi = kv_get_pi(rdev);
  756. struct radeon_clock_voltage_dependency_table *table =
  757. &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table;
  758. struct atom_clock_dividers dividers;
  759. int ret;
  760. u32 i;
  761. if (table == NULL || table->count == 0)
  762. return 0;
  763. pi->samu_level_count = 0;
  764. for (i = 0; i < table->count; i++) {
  765. if (pi->high_voltage_t &&
  766. pi->high_voltage_t < table->entries[i].v)
  767. break;
  768. pi->samu_level[i].Frequency = cpu_to_be32(table->entries[i].clk);
  769. pi->samu_level[i].MinVoltage = cpu_to_be16(table->entries[i].v);
  770. pi->samu_level[i].ClkBypassCntl =
  771. (u8)kv_get_clk_bypass(rdev, table->entries[i].clk);
  772. ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
  773. table->entries[i].clk, false, &dividers);
  774. if (ret)
  775. return ret;
  776. pi->samu_level[i].Divider = (u8)dividers.post_div;
  777. pi->samu_level_count++;
  778. }
  779. ret = kv_copy_bytes_to_smc(rdev,
  780. pi->dpm_table_start +
  781. offsetof(SMU7_Fusion_DpmTable, SamuLevelCount),
  782. (u8 *)&pi->samu_level_count,
  783. sizeof(u8),
  784. pi->sram_end);
  785. if (ret)
  786. return ret;
  787. pi->samu_interval = 1;
  788. ret = kv_copy_bytes_to_smc(rdev,
  789. pi->dpm_table_start +
  790. offsetof(SMU7_Fusion_DpmTable, SAMUInterval),
  791. (u8 *)&pi->samu_interval,
  792. sizeof(u8),
  793. pi->sram_end);
  794. if (ret)
  795. return ret;
  796. ret = kv_copy_bytes_to_smc(rdev,
  797. pi->dpm_table_start +
  798. offsetof(SMU7_Fusion_DpmTable, SamuLevel),
  799. (u8 *)&pi->samu_level,
  800. sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_SAMU,
  801. pi->sram_end);
  802. if (ret)
  803. return ret;
  804. return ret;
  805. }
  806. static int kv_populate_acp_table(struct radeon_device *rdev)
  807. {
  808. struct kv_power_info *pi = kv_get_pi(rdev);
  809. struct radeon_clock_voltage_dependency_table *table =
  810. &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
  811. struct atom_clock_dividers dividers;
  812. int ret;
  813. u32 i;
  814. if (table == NULL || table->count == 0)
  815. return 0;
  816. pi->acp_level_count = 0;
  817. for (i = 0; i < table->count; i++) {
  818. pi->acp_level[i].Frequency = cpu_to_be32(table->entries[i].clk);
  819. pi->acp_level[i].MinVoltage = cpu_to_be16(table->entries[i].v);
  820. ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
  821. table->entries[i].clk, false, &dividers);
  822. if (ret)
  823. return ret;
  824. pi->acp_level[i].Divider = (u8)dividers.post_div;
  825. pi->acp_level_count++;
  826. }
  827. ret = kv_copy_bytes_to_smc(rdev,
  828. pi->dpm_table_start +
  829. offsetof(SMU7_Fusion_DpmTable, AcpLevelCount),
  830. (u8 *)&pi->acp_level_count,
  831. sizeof(u8),
  832. pi->sram_end);
  833. if (ret)
  834. return ret;
  835. pi->acp_interval = 1;
  836. ret = kv_copy_bytes_to_smc(rdev,
  837. pi->dpm_table_start +
  838. offsetof(SMU7_Fusion_DpmTable, ACPInterval),
  839. (u8 *)&pi->acp_interval,
  840. sizeof(u8),
  841. pi->sram_end);
  842. if (ret)
  843. return ret;
  844. ret = kv_copy_bytes_to_smc(rdev,
  845. pi->dpm_table_start +
  846. offsetof(SMU7_Fusion_DpmTable, AcpLevel),
  847. (u8 *)&pi->acp_level,
  848. sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_ACP,
  849. pi->sram_end);
  850. if (ret)
  851. return ret;
  852. return ret;
  853. }
  854. static void kv_calculate_dfs_bypass_settings(struct radeon_device *rdev)
  855. {
  856. struct kv_power_info *pi = kv_get_pi(rdev);
  857. u32 i;
  858. struct radeon_clock_voltage_dependency_table *table =
  859. &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
  860. if (table && table->count) {
  861. for (i = 0; i < pi->graphics_dpm_level_count; i++) {
  862. if (pi->caps_enable_dfs_bypass) {
  863. if (kv_get_clock_difference(table->entries[i].clk, 40000) < 200)
  864. pi->graphics_level[i].ClkBypassCntl = 3;
  865. else if (kv_get_clock_difference(table->entries[i].clk, 30000) < 200)
  866. pi->graphics_level[i].ClkBypassCntl = 2;
  867. else if (kv_get_clock_difference(table->entries[i].clk, 26600) < 200)
  868. pi->graphics_level[i].ClkBypassCntl = 7;
  869. else if (kv_get_clock_difference(table->entries[i].clk , 20000) < 200)
  870. pi->graphics_level[i].ClkBypassCntl = 6;
  871. else if (kv_get_clock_difference(table->entries[i].clk , 10000) < 200)
  872. pi->graphics_level[i].ClkBypassCntl = 8;
  873. else
  874. pi->graphics_level[i].ClkBypassCntl = 0;
  875. } else {
  876. pi->graphics_level[i].ClkBypassCntl = 0;
  877. }
  878. }
  879. } else {
  880. struct sumo_sclk_voltage_mapping_table *table =
  881. &pi->sys_info.sclk_voltage_mapping_table;
  882. for (i = 0; i < pi->graphics_dpm_level_count; i++) {
  883. if (pi->caps_enable_dfs_bypass) {
  884. if (kv_get_clock_difference(table->entries[i].sclk_frequency, 40000) < 200)
  885. pi->graphics_level[i].ClkBypassCntl = 3;
  886. else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 30000) < 200)
  887. pi->graphics_level[i].ClkBypassCntl = 2;
  888. else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 26600) < 200)
  889. pi->graphics_level[i].ClkBypassCntl = 7;
  890. else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 20000) < 200)
  891. pi->graphics_level[i].ClkBypassCntl = 6;
  892. else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 10000) < 200)
  893. pi->graphics_level[i].ClkBypassCntl = 8;
  894. else
  895. pi->graphics_level[i].ClkBypassCntl = 0;
  896. } else {
  897. pi->graphics_level[i].ClkBypassCntl = 0;
  898. }
  899. }
  900. }
  901. }
  902. static int kv_enable_ulv(struct radeon_device *rdev, bool enable)
  903. {
  904. return kv_notify_message_to_smu(rdev, enable ?
  905. PPSMC_MSG_EnableULV : PPSMC_MSG_DisableULV);
  906. }
  907. static void kv_update_current_ps(struct radeon_device *rdev,
  908. struct radeon_ps *rps)
  909. {
  910. struct kv_ps *new_ps = kv_get_ps(rps);
  911. struct kv_power_info *pi = kv_get_pi(rdev);
  912. pi->current_rps = *rps;
  913. pi->current_ps = *new_ps;
  914. pi->current_rps.ps_priv = &pi->current_ps;
  915. }
  916. static void kv_update_requested_ps(struct radeon_device *rdev,
  917. struct radeon_ps *rps)
  918. {
  919. struct kv_ps *new_ps = kv_get_ps(rps);
  920. struct kv_power_info *pi = kv_get_pi(rdev);
  921. pi->requested_rps = *rps;
  922. pi->requested_ps = *new_ps;
  923. pi->requested_rps.ps_priv = &pi->requested_ps;
  924. }
  925. int kv_dpm_enable(struct radeon_device *rdev)
  926. {
  927. struct kv_power_info *pi = kv_get_pi(rdev);
  928. int ret;
  929. cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
  930. RADEON_CG_BLOCK_SDMA |
  931. RADEON_CG_BLOCK_BIF |
  932. RADEON_CG_BLOCK_HDP), false);
  933. ret = kv_process_firmware_header(rdev);
  934. if (ret) {
  935. DRM_ERROR("kv_process_firmware_header failed\n");
  936. return ret;
  937. }
  938. kv_init_fps_limits(rdev);
  939. kv_init_graphics_levels(rdev);
  940. ret = kv_program_bootup_state(rdev);
  941. if (ret) {
  942. DRM_ERROR("kv_program_bootup_state failed\n");
  943. return ret;
  944. }
  945. kv_calculate_dfs_bypass_settings(rdev);
  946. ret = kv_upload_dpm_settings(rdev);
  947. if (ret) {
  948. DRM_ERROR("kv_upload_dpm_settings failed\n");
  949. return ret;
  950. }
  951. ret = kv_populate_uvd_table(rdev);
  952. if (ret) {
  953. DRM_ERROR("kv_populate_uvd_table failed\n");
  954. return ret;
  955. }
  956. ret = kv_populate_vce_table(rdev);
  957. if (ret) {
  958. DRM_ERROR("kv_populate_vce_table failed\n");
  959. return ret;
  960. }
  961. ret = kv_populate_samu_table(rdev);
  962. if (ret) {
  963. DRM_ERROR("kv_populate_samu_table failed\n");
  964. return ret;
  965. }
  966. ret = kv_populate_acp_table(rdev);
  967. if (ret) {
  968. DRM_ERROR("kv_populate_acp_table failed\n");
  969. return ret;
  970. }
  971. kv_program_vc(rdev);
  972. #if 0
  973. kv_initialize_hardware_cac_manager(rdev);
  974. #endif
  975. kv_start_am(rdev);
  976. if (pi->enable_auto_thermal_throttling) {
  977. ret = kv_enable_auto_thermal_throttling(rdev);
  978. if (ret) {
  979. DRM_ERROR("kv_enable_auto_thermal_throttling failed\n");
  980. return ret;
  981. }
  982. }
  983. ret = kv_enable_dpm_voltage_scaling(rdev);
  984. if (ret) {
  985. DRM_ERROR("kv_enable_dpm_voltage_scaling failed\n");
  986. return ret;
  987. }
  988. ret = kv_set_dpm_interval(rdev);
  989. if (ret) {
  990. DRM_ERROR("kv_set_dpm_interval failed\n");
  991. return ret;
  992. }
  993. ret = kv_set_dpm_boot_state(rdev);
  994. if (ret) {
  995. DRM_ERROR("kv_set_dpm_boot_state failed\n");
  996. return ret;
  997. }
  998. ret = kv_enable_ulv(rdev, true);
  999. if (ret) {
  1000. DRM_ERROR("kv_enable_ulv failed\n");
  1001. return ret;
  1002. }
  1003. kv_start_dpm(rdev);
  1004. ret = kv_enable_didt(rdev, true);
  1005. if (ret) {
  1006. DRM_ERROR("kv_enable_didt failed\n");
  1007. return ret;
  1008. }
  1009. ret = kv_enable_smc_cac(rdev, true);
  1010. if (ret) {
  1011. DRM_ERROR("kv_enable_smc_cac failed\n");
  1012. return ret;
  1013. }
  1014. if (rdev->irq.installed &&
  1015. r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
  1016. ret = kv_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
  1017. if (ret) {
  1018. DRM_ERROR("kv_set_thermal_temperature_range failed\n");
  1019. return ret;
  1020. }
  1021. rdev->irq.dpm_thermal = true;
  1022. radeon_irq_set(rdev);
  1023. }
  1024. /* powerdown unused blocks for now */
  1025. kv_dpm_powergate_acp(rdev, true);
  1026. kv_dpm_powergate_samu(rdev, true);
  1027. kv_dpm_powergate_vce(rdev, true);
  1028. kv_dpm_powergate_uvd(rdev, true);
  1029. cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
  1030. RADEON_CG_BLOCK_SDMA |
  1031. RADEON_CG_BLOCK_BIF |
  1032. RADEON_CG_BLOCK_HDP), true);
  1033. kv_update_current_ps(rdev, rdev->pm.dpm.boot_ps);
  1034. return ret;
  1035. }
  1036. void kv_dpm_disable(struct radeon_device *rdev)
  1037. {
  1038. cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
  1039. RADEON_CG_BLOCK_SDMA |
  1040. RADEON_CG_BLOCK_BIF |
  1041. RADEON_CG_BLOCK_HDP), false);
  1042. /* powerup blocks */
  1043. kv_dpm_powergate_acp(rdev, false);
  1044. kv_dpm_powergate_samu(rdev, false);
  1045. kv_dpm_powergate_vce(rdev, false);
  1046. kv_dpm_powergate_uvd(rdev, false);
  1047. kv_enable_smc_cac(rdev, false);
  1048. kv_enable_didt(rdev, false);
  1049. kv_clear_vc(rdev);
  1050. kv_stop_dpm(rdev);
  1051. kv_enable_ulv(rdev, false);
  1052. kv_reset_am(rdev);
  1053. kv_update_current_ps(rdev, rdev->pm.dpm.boot_ps);
  1054. }
  1055. #if 0
  1056. static int kv_write_smc_soft_register(struct radeon_device *rdev,
  1057. u16 reg_offset, u32 value)
  1058. {
  1059. struct kv_power_info *pi = kv_get_pi(rdev);
  1060. return kv_copy_bytes_to_smc(rdev, pi->soft_regs_start + reg_offset,
  1061. (u8 *)&value, sizeof(u16), pi->sram_end);
  1062. }
  1063. static int kv_read_smc_soft_register(struct radeon_device *rdev,
  1064. u16 reg_offset, u32 *value)
  1065. {
  1066. struct kv_power_info *pi = kv_get_pi(rdev);
  1067. return kv_read_smc_sram_dword(rdev, pi->soft_regs_start + reg_offset,
  1068. value, pi->sram_end);
  1069. }
  1070. #endif
  1071. static void kv_init_sclk_t(struct radeon_device *rdev)
  1072. {
  1073. struct kv_power_info *pi = kv_get_pi(rdev);
  1074. pi->low_sclk_interrupt_t = 0;
  1075. }
  1076. static int kv_init_fps_limits(struct radeon_device *rdev)
  1077. {
  1078. struct kv_power_info *pi = kv_get_pi(rdev);
  1079. int ret = 0;
  1080. if (pi->caps_fps) {
  1081. u16 tmp;
  1082. tmp = 45;
  1083. pi->fps_high_t = cpu_to_be16(tmp);
  1084. ret = kv_copy_bytes_to_smc(rdev,
  1085. pi->dpm_table_start +
  1086. offsetof(SMU7_Fusion_DpmTable, FpsHighT),
  1087. (u8 *)&pi->fps_high_t,
  1088. sizeof(u16), pi->sram_end);
  1089. tmp = 30;
  1090. pi->fps_low_t = cpu_to_be16(tmp);
  1091. ret = kv_copy_bytes_to_smc(rdev,
  1092. pi->dpm_table_start +
  1093. offsetof(SMU7_Fusion_DpmTable, FpsLowT),
  1094. (u8 *)&pi->fps_low_t,
  1095. sizeof(u16), pi->sram_end);
  1096. }
  1097. return ret;
  1098. }
  1099. static void kv_init_powergate_state(struct radeon_device *rdev)
  1100. {
  1101. struct kv_power_info *pi = kv_get_pi(rdev);
  1102. pi->uvd_power_gated = false;
  1103. pi->vce_power_gated = false;
  1104. pi->samu_power_gated = false;
  1105. pi->acp_power_gated = false;
  1106. }
  1107. static int kv_enable_uvd_dpm(struct radeon_device *rdev, bool enable)
  1108. {
  1109. return kv_notify_message_to_smu(rdev, enable ?
  1110. PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable);
  1111. }
  1112. #if 0
  1113. static int kv_enable_vce_dpm(struct radeon_device *rdev, bool enable)
  1114. {
  1115. return kv_notify_message_to_smu(rdev, enable ?
  1116. PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable);
  1117. }
  1118. #endif
  1119. static int kv_enable_samu_dpm(struct radeon_device *rdev, bool enable)
  1120. {
  1121. return kv_notify_message_to_smu(rdev, enable ?
  1122. PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable);
  1123. }
  1124. static int kv_enable_acp_dpm(struct radeon_device *rdev, bool enable)
  1125. {
  1126. return kv_notify_message_to_smu(rdev, enable ?
  1127. PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable);
  1128. }
  1129. static int kv_update_uvd_dpm(struct radeon_device *rdev, bool gate)
  1130. {
  1131. struct kv_power_info *pi = kv_get_pi(rdev);
  1132. struct radeon_uvd_clock_voltage_dependency_table *table =
  1133. &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
  1134. int ret;
  1135. if (!gate) {
  1136. if (!pi->caps_uvd_dpm || table->count || pi->caps_stable_p_state)
  1137. pi->uvd_boot_level = table->count - 1;
  1138. else
  1139. pi->uvd_boot_level = 0;
  1140. ret = kv_copy_bytes_to_smc(rdev,
  1141. pi->dpm_table_start +
  1142. offsetof(SMU7_Fusion_DpmTable, UvdBootLevel),
  1143. (uint8_t *)&pi->uvd_boot_level,
  1144. sizeof(u8), pi->sram_end);
  1145. if (ret)
  1146. return ret;
  1147. if (!pi->caps_uvd_dpm ||
  1148. pi->caps_stable_p_state)
  1149. kv_send_msg_to_smc_with_parameter(rdev,
  1150. PPSMC_MSG_UVDDPM_SetEnabledMask,
  1151. (1 << pi->uvd_boot_level));
  1152. }
  1153. return kv_enable_uvd_dpm(rdev, !gate);
  1154. }
  1155. #if 0
  1156. static u8 kv_get_vce_boot_level(struct radeon_device *rdev)
  1157. {
  1158. u8 i;
  1159. struct radeon_vce_clock_voltage_dependency_table *table =
  1160. &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
  1161. for (i = 0; i < table->count; i++) {
  1162. if (table->entries[i].evclk >= 0) /* XXX */
  1163. break;
  1164. }
  1165. return i;
  1166. }
  1167. static int kv_update_vce_dpm(struct radeon_device *rdev,
  1168. struct radeon_ps *radeon_new_state,
  1169. struct radeon_ps *radeon_current_state)
  1170. {
  1171. struct kv_power_info *pi = kv_get_pi(rdev);
  1172. struct radeon_vce_clock_voltage_dependency_table *table =
  1173. &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
  1174. int ret;
  1175. if (radeon_new_state->evclk > 0 && radeon_current_state->evclk == 0) {
  1176. if (pi->caps_stable_p_state)
  1177. pi->vce_boot_level = table->count - 1;
  1178. else
  1179. pi->vce_boot_level = kv_get_vce_boot_level(rdev);
  1180. ret = kv_copy_bytes_to_smc(rdev,
  1181. pi->dpm_table_start +
  1182. offsetof(SMU7_Fusion_DpmTable, VceBootLevel),
  1183. (u8 *)&pi->vce_boot_level,
  1184. sizeof(u8),
  1185. pi->sram_end);
  1186. if (ret)
  1187. return ret;
  1188. if (pi->caps_stable_p_state)
  1189. kv_send_msg_to_smc_with_parameter(rdev,
  1190. PPSMC_MSG_VCEDPM_SetEnabledMask,
  1191. (1 << pi->vce_boot_level));
  1192. kv_enable_vce_dpm(rdev, true);
  1193. } else if (radeon_new_state->evclk == 0 && radeon_current_state->evclk > 0) {
  1194. kv_enable_vce_dpm(rdev, false);
  1195. }
  1196. return 0;
  1197. }
  1198. #endif
  1199. static int kv_update_samu_dpm(struct radeon_device *rdev, bool gate)
  1200. {
  1201. struct kv_power_info *pi = kv_get_pi(rdev);
  1202. struct radeon_clock_voltage_dependency_table *table =
  1203. &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table;
  1204. int ret;
  1205. if (!gate) {
  1206. if (pi->caps_stable_p_state)
  1207. pi->samu_boot_level = table->count - 1;
  1208. else
  1209. pi->samu_boot_level = 0;
  1210. ret = kv_copy_bytes_to_smc(rdev,
  1211. pi->dpm_table_start +
  1212. offsetof(SMU7_Fusion_DpmTable, SamuBootLevel),
  1213. (u8 *)&pi->samu_boot_level,
  1214. sizeof(u8),
  1215. pi->sram_end);
  1216. if (ret)
  1217. return ret;
  1218. if (pi->caps_stable_p_state)
  1219. kv_send_msg_to_smc_with_parameter(rdev,
  1220. PPSMC_MSG_SAMUDPM_SetEnabledMask,
  1221. (1 << pi->samu_boot_level));
  1222. }
  1223. return kv_enable_samu_dpm(rdev, !gate);
  1224. }
  1225. static int kv_update_acp_dpm(struct radeon_device *rdev, bool gate)
  1226. {
  1227. struct kv_power_info *pi = kv_get_pi(rdev);
  1228. struct radeon_clock_voltage_dependency_table *table =
  1229. &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
  1230. int ret;
  1231. if (!gate) {
  1232. if (pi->caps_stable_p_state)
  1233. pi->acp_boot_level = table->count - 1;
  1234. else
  1235. pi->acp_boot_level = 0;
  1236. ret = kv_copy_bytes_to_smc(rdev,
  1237. pi->dpm_table_start +
  1238. offsetof(SMU7_Fusion_DpmTable, AcpBootLevel),
  1239. (u8 *)&pi->acp_boot_level,
  1240. sizeof(u8),
  1241. pi->sram_end);
  1242. if (ret)
  1243. return ret;
  1244. if (pi->caps_stable_p_state)
  1245. kv_send_msg_to_smc_with_parameter(rdev,
  1246. PPSMC_MSG_ACPDPM_SetEnabledMask,
  1247. (1 << pi->acp_boot_level));
  1248. }
  1249. return kv_enable_acp_dpm(rdev, !gate);
  1250. }
  1251. void kv_dpm_powergate_uvd(struct radeon_device *rdev, bool gate)
  1252. {
  1253. struct kv_power_info *pi = kv_get_pi(rdev);
  1254. if (pi->uvd_power_gated == gate)
  1255. return;
  1256. pi->uvd_power_gated = gate;
  1257. if (gate) {
  1258. uvd_v1_0_stop(rdev);
  1259. cik_update_cg(rdev, RADEON_CG_BLOCK_UVD, false);
  1260. kv_update_uvd_dpm(rdev, gate);
  1261. if (pi->caps_uvd_pg)
  1262. kv_notify_message_to_smu(rdev, PPSMC_MSG_UVDPowerOFF);
  1263. } else {
  1264. if (pi->caps_uvd_pg)
  1265. kv_notify_message_to_smu(rdev, PPSMC_MSG_UVDPowerON);
  1266. uvd_v4_2_resume(rdev);
  1267. uvd_v1_0_start(rdev);
  1268. cik_update_cg(rdev, RADEON_CG_BLOCK_UVD, true);
  1269. kv_update_uvd_dpm(rdev, gate);
  1270. }
  1271. }
  1272. static void kv_dpm_powergate_vce(struct radeon_device *rdev, bool gate)
  1273. {
  1274. struct kv_power_info *pi = kv_get_pi(rdev);
  1275. if (pi->vce_power_gated == gate)
  1276. return;
  1277. pi->vce_power_gated = gate;
  1278. if (gate) {
  1279. if (pi->caps_vce_pg)
  1280. kv_notify_message_to_smu(rdev, PPSMC_MSG_VCEPowerOFF);
  1281. } else {
  1282. if (pi->caps_vce_pg)
  1283. kv_notify_message_to_smu(rdev, PPSMC_MSG_VCEPowerON);
  1284. }
  1285. }
  1286. static void kv_dpm_powergate_samu(struct radeon_device *rdev, bool gate)
  1287. {
  1288. struct kv_power_info *pi = kv_get_pi(rdev);
  1289. if (pi->samu_power_gated == gate)
  1290. return;
  1291. pi->samu_power_gated = gate;
  1292. if (gate) {
  1293. kv_update_samu_dpm(rdev, true);
  1294. if (pi->caps_samu_pg)
  1295. kv_notify_message_to_smu(rdev, PPSMC_MSG_SAMPowerOFF);
  1296. } else {
  1297. if (pi->caps_samu_pg)
  1298. kv_notify_message_to_smu(rdev, PPSMC_MSG_SAMPowerON);
  1299. kv_update_samu_dpm(rdev, false);
  1300. }
  1301. }
  1302. static void kv_dpm_powergate_acp(struct radeon_device *rdev, bool gate)
  1303. {
  1304. struct kv_power_info *pi = kv_get_pi(rdev);
  1305. if (pi->acp_power_gated == gate)
  1306. return;
  1307. if (rdev->family == CHIP_KABINI)
  1308. return;
  1309. pi->acp_power_gated = gate;
  1310. if (gate) {
  1311. kv_update_acp_dpm(rdev, true);
  1312. if (pi->caps_acp_pg)
  1313. kv_notify_message_to_smu(rdev, PPSMC_MSG_ACPPowerOFF);
  1314. } else {
  1315. if (pi->caps_acp_pg)
  1316. kv_notify_message_to_smu(rdev, PPSMC_MSG_ACPPowerON);
  1317. kv_update_acp_dpm(rdev, false);
  1318. }
  1319. }
  1320. static void kv_set_valid_clock_range(struct radeon_device *rdev,
  1321. struct radeon_ps *new_rps)
  1322. {
  1323. struct kv_ps *new_ps = kv_get_ps(new_rps);
  1324. struct kv_power_info *pi = kv_get_pi(rdev);
  1325. u32 i;
  1326. struct radeon_clock_voltage_dependency_table *table =
  1327. &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
  1328. if (table && table->count) {
  1329. for (i = 0; i < pi->graphics_dpm_level_count; i++) {
  1330. if ((table->entries[i].clk >= new_ps->levels[0].sclk) ||
  1331. (i == (pi->graphics_dpm_level_count - 1))) {
  1332. pi->lowest_valid = i;
  1333. break;
  1334. }
  1335. }
  1336. for (i = pi->graphics_dpm_level_count - 1; i >= 0; i--) {
  1337. if ((table->entries[i].clk <= new_ps->levels[new_ps->num_levels -1].sclk) ||
  1338. (i == 0)) {
  1339. pi->highest_valid = i;
  1340. break;
  1341. }
  1342. }
  1343. if (pi->lowest_valid > pi->highest_valid) {
  1344. if ((new_ps->levels[0].sclk - table->entries[pi->highest_valid].clk) >
  1345. (table->entries[pi->lowest_valid].clk - new_ps->levels[new_ps->num_levels - 1].sclk))
  1346. pi->highest_valid = pi->lowest_valid;
  1347. else
  1348. pi->lowest_valid = pi->highest_valid;
  1349. }
  1350. } else {
  1351. struct sumo_sclk_voltage_mapping_table *table =
  1352. &pi->sys_info.sclk_voltage_mapping_table;
  1353. for (i = 0; i < (int)pi->graphics_dpm_level_count; i++) {
  1354. if (table->entries[i].sclk_frequency >= new_ps->levels[0].sclk ||
  1355. i == (int)(pi->graphics_dpm_level_count - 1)) {
  1356. pi->lowest_valid = i;
  1357. break;
  1358. }
  1359. }
  1360. for (i = pi->graphics_dpm_level_count - 1; i >= 0; i--) {
  1361. if (table->entries[i].sclk_frequency <=
  1362. new_ps->levels[new_ps->num_levels - 1].sclk ||
  1363. i == 0) {
  1364. pi->highest_valid = i;
  1365. break;
  1366. }
  1367. }
  1368. if (pi->lowest_valid > pi->highest_valid) {
  1369. if ((new_ps->levels[0].sclk -
  1370. table->entries[pi->highest_valid].sclk_frequency) >
  1371. (table->entries[pi->lowest_valid].sclk_frequency -
  1372. new_ps->levels[new_ps->num_levels -1].sclk))
  1373. pi->highest_valid = pi->lowest_valid;
  1374. else
  1375. pi->lowest_valid = pi->highest_valid;
  1376. }
  1377. }
  1378. }
  1379. static int kv_update_dfs_bypass_settings(struct radeon_device *rdev,
  1380. struct radeon_ps *new_rps)
  1381. {
  1382. struct kv_ps *new_ps = kv_get_ps(new_rps);
  1383. struct kv_power_info *pi = kv_get_pi(rdev);
  1384. int ret = 0;
  1385. u8 clk_bypass_cntl;
  1386. if (pi->caps_enable_dfs_bypass) {
  1387. clk_bypass_cntl = new_ps->need_dfs_bypass ?
  1388. pi->graphics_level[pi->graphics_boot_level].ClkBypassCntl : 0;
  1389. ret = kv_copy_bytes_to_smc(rdev,
  1390. (pi->dpm_table_start +
  1391. offsetof(SMU7_Fusion_DpmTable, GraphicsLevel) +
  1392. (pi->graphics_boot_level * sizeof(SMU7_Fusion_GraphicsLevel)) +
  1393. offsetof(SMU7_Fusion_GraphicsLevel, ClkBypassCntl)),
  1394. &clk_bypass_cntl,
  1395. sizeof(u8), pi->sram_end);
  1396. }
  1397. return ret;
  1398. }
  1399. static int kv_enable_nb_dpm(struct radeon_device *rdev)
  1400. {
  1401. struct kv_power_info *pi = kv_get_pi(rdev);
  1402. int ret = 0;
  1403. if (pi->enable_nb_dpm && !pi->nb_dpm_enabled) {
  1404. ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_NBDPM_Enable);
  1405. if (ret == 0)
  1406. pi->nb_dpm_enabled = true;
  1407. }
  1408. return ret;
  1409. }
  1410. int kv_dpm_force_performance_level(struct radeon_device *rdev,
  1411. enum radeon_dpm_forced_level level)
  1412. {
  1413. int ret;
  1414. if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
  1415. ret = kv_force_dpm_highest(rdev);
  1416. if (ret)
  1417. return ret;
  1418. } else if (level == RADEON_DPM_FORCED_LEVEL_LOW) {
  1419. ret = kv_force_dpm_lowest(rdev);
  1420. if (ret)
  1421. return ret;
  1422. } else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) {
  1423. ret = kv_unforce_levels(rdev);
  1424. if (ret)
  1425. return ret;
  1426. }
  1427. rdev->pm.dpm.forced_level = level;
  1428. return 0;
  1429. }
  1430. int kv_dpm_pre_set_power_state(struct radeon_device *rdev)
  1431. {
  1432. struct kv_power_info *pi = kv_get_pi(rdev);
  1433. struct radeon_ps requested_ps = *rdev->pm.dpm.requested_ps;
  1434. struct radeon_ps *new_ps = &requested_ps;
  1435. kv_update_requested_ps(rdev, new_ps);
  1436. kv_apply_state_adjust_rules(rdev,
  1437. &pi->requested_rps,
  1438. &pi->current_rps);
  1439. return 0;
  1440. }
  1441. int kv_dpm_set_power_state(struct radeon_device *rdev)
  1442. {
  1443. struct kv_power_info *pi = kv_get_pi(rdev);
  1444. struct radeon_ps *new_ps = &pi->requested_rps;
  1445. /*struct radeon_ps *old_ps = &pi->current_rps;*/
  1446. int ret;
  1447. cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
  1448. RADEON_CG_BLOCK_SDMA |
  1449. RADEON_CG_BLOCK_BIF |
  1450. RADEON_CG_BLOCK_HDP), false);
  1451. if (rdev->family == CHIP_KABINI) {
  1452. if (pi->enable_dpm) {
  1453. kv_set_valid_clock_range(rdev, new_ps);
  1454. kv_update_dfs_bypass_settings(rdev, new_ps);
  1455. ret = kv_calculate_ds_divider(rdev);
  1456. if (ret) {
  1457. DRM_ERROR("kv_calculate_ds_divider failed\n");
  1458. return ret;
  1459. }
  1460. kv_calculate_nbps_level_settings(rdev);
  1461. kv_calculate_dpm_settings(rdev);
  1462. kv_force_lowest_valid(rdev);
  1463. kv_enable_new_levels(rdev);
  1464. kv_upload_dpm_settings(rdev);
  1465. kv_program_nbps_index_settings(rdev, new_ps);
  1466. kv_unforce_levels(rdev);
  1467. kv_set_enabled_levels(rdev);
  1468. kv_force_lowest_valid(rdev);
  1469. kv_unforce_levels(rdev);
  1470. #if 0
  1471. ret = kv_update_vce_dpm(rdev, new_ps, old_ps);
  1472. if (ret) {
  1473. DRM_ERROR("kv_update_vce_dpm failed\n");
  1474. return ret;
  1475. }
  1476. #endif
  1477. kv_update_sclk_t(rdev);
  1478. }
  1479. } else {
  1480. if (pi->enable_dpm) {
  1481. kv_set_valid_clock_range(rdev, new_ps);
  1482. kv_update_dfs_bypass_settings(rdev, new_ps);
  1483. ret = kv_calculate_ds_divider(rdev);
  1484. if (ret) {
  1485. DRM_ERROR("kv_calculate_ds_divider failed\n");
  1486. return ret;
  1487. }
  1488. kv_calculate_nbps_level_settings(rdev);
  1489. kv_calculate_dpm_settings(rdev);
  1490. kv_freeze_sclk_dpm(rdev, true);
  1491. kv_upload_dpm_settings(rdev);
  1492. kv_program_nbps_index_settings(rdev, new_ps);
  1493. kv_freeze_sclk_dpm(rdev, false);
  1494. kv_set_enabled_levels(rdev);
  1495. #if 0
  1496. ret = kv_update_vce_dpm(rdev, new_ps, old_ps);
  1497. if (ret) {
  1498. DRM_ERROR("kv_update_vce_dpm failed\n");
  1499. return ret;
  1500. }
  1501. #endif
  1502. kv_update_sclk_t(rdev);
  1503. kv_enable_nb_dpm(rdev);
  1504. }
  1505. }
  1506. cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
  1507. RADEON_CG_BLOCK_SDMA |
  1508. RADEON_CG_BLOCK_BIF |
  1509. RADEON_CG_BLOCK_HDP), true);
  1510. rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO;
  1511. return 0;
  1512. }
  1513. void kv_dpm_post_set_power_state(struct radeon_device *rdev)
  1514. {
  1515. struct kv_power_info *pi = kv_get_pi(rdev);
  1516. struct radeon_ps *new_ps = &pi->requested_rps;
  1517. kv_update_current_ps(rdev, new_ps);
  1518. }
  1519. void kv_dpm_setup_asic(struct radeon_device *rdev)
  1520. {
  1521. sumo_take_smu_control(rdev, true);
  1522. kv_init_powergate_state(rdev);
  1523. kv_init_sclk_t(rdev);
  1524. }
  1525. void kv_dpm_reset_asic(struct radeon_device *rdev)
  1526. {
  1527. kv_force_lowest_valid(rdev);
  1528. kv_init_graphics_levels(rdev);
  1529. kv_program_bootup_state(rdev);
  1530. kv_upload_dpm_settings(rdev);
  1531. kv_force_lowest_valid(rdev);
  1532. kv_unforce_levels(rdev);
  1533. }
  1534. //XXX use sumo_dpm_display_configuration_changed
  1535. static void kv_construct_max_power_limits_table(struct radeon_device *rdev,
  1536. struct radeon_clock_and_voltage_limits *table)
  1537. {
  1538. struct kv_power_info *pi = kv_get_pi(rdev);
  1539. if (pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries > 0) {
  1540. int idx = pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries - 1;
  1541. table->sclk =
  1542. pi->sys_info.sclk_voltage_mapping_table.entries[idx].sclk_frequency;
  1543. table->vddc =
  1544. kv_convert_2bit_index_to_voltage(rdev,
  1545. pi->sys_info.sclk_voltage_mapping_table.entries[idx].vid_2bit);
  1546. }
  1547. table->mclk = pi->sys_info.nbp_memory_clock[0];
  1548. }
  1549. static void kv_patch_voltage_values(struct radeon_device *rdev)
  1550. {
  1551. int i;
  1552. struct radeon_uvd_clock_voltage_dependency_table *table =
  1553. &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
  1554. if (table->count) {
  1555. for (i = 0; i < table->count; i++)
  1556. table->entries[i].v =
  1557. kv_convert_8bit_index_to_voltage(rdev,
  1558. table->entries[i].v);
  1559. }
  1560. }
  1561. static void kv_construct_boot_state(struct radeon_device *rdev)
  1562. {
  1563. struct kv_power_info *pi = kv_get_pi(rdev);
  1564. pi->boot_pl.sclk = pi->sys_info.bootup_sclk;
  1565. pi->boot_pl.vddc_index = pi->sys_info.bootup_nb_voltage_index;
  1566. pi->boot_pl.ds_divider_index = 0;
  1567. pi->boot_pl.ss_divider_index = 0;
  1568. pi->boot_pl.allow_gnb_slow = 1;
  1569. pi->boot_pl.force_nbp_state = 0;
  1570. pi->boot_pl.display_wm = 0;
  1571. pi->boot_pl.vce_wm = 0;
  1572. }
  1573. static int kv_force_dpm_highest(struct radeon_device *rdev)
  1574. {
  1575. int ret;
  1576. u32 enable_mask, i;
  1577. ret = kv_dpm_get_enable_mask(rdev, &enable_mask);
  1578. if (ret)
  1579. return ret;
  1580. for (i = SMU7_MAX_LEVELS_GRAPHICS - 1; i >= 0; i--) {
  1581. if (enable_mask & (1 << i))
  1582. break;
  1583. }
  1584. return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i);
  1585. }
  1586. static int kv_force_dpm_lowest(struct radeon_device *rdev)
  1587. {
  1588. int ret;
  1589. u32 enable_mask, i;
  1590. ret = kv_dpm_get_enable_mask(rdev, &enable_mask);
  1591. if (ret)
  1592. return ret;
  1593. for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) {
  1594. if (enable_mask & (1 << i))
  1595. break;
  1596. }
  1597. return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i);
  1598. }
  1599. static u8 kv_get_sleep_divider_id_from_clock(struct radeon_device *rdev,
  1600. u32 sclk, u32 min_sclk_in_sr)
  1601. {
  1602. struct kv_power_info *pi = kv_get_pi(rdev);
  1603. u32 i;
  1604. u32 temp;
  1605. u32 min = (min_sclk_in_sr > KV_MINIMUM_ENGINE_CLOCK) ?
  1606. min_sclk_in_sr : KV_MINIMUM_ENGINE_CLOCK;
  1607. if (sclk < min)
  1608. return 0;
  1609. if (!pi->caps_sclk_ds)
  1610. return 0;
  1611. for (i = KV_MAX_DEEPSLEEP_DIVIDER_ID; i <= 0; i--) {
  1612. temp = sclk / sumo_get_sleep_divider_from_id(i);
  1613. if ((temp >= min) || (i == 0))
  1614. break;
  1615. }
  1616. return (u8)i;
  1617. }
  1618. static int kv_get_high_voltage_limit(struct radeon_device *rdev, int *limit)
  1619. {
  1620. struct kv_power_info *pi = kv_get_pi(rdev);
  1621. struct radeon_clock_voltage_dependency_table *table =
  1622. &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
  1623. int i;
  1624. if (table && table->count) {
  1625. for (i = table->count - 1; i >= 0; i--) {
  1626. if (pi->high_voltage_t &&
  1627. (kv_convert_8bit_index_to_voltage(rdev, table->entries[i].v) <=
  1628. pi->high_voltage_t)) {
  1629. *limit = i;
  1630. return 0;
  1631. }
  1632. }
  1633. } else {
  1634. struct sumo_sclk_voltage_mapping_table *table =
  1635. &pi->sys_info.sclk_voltage_mapping_table;
  1636. for (i = table->num_max_dpm_entries - 1; i >= 0; i--) {
  1637. if (pi->high_voltage_t &&
  1638. (kv_convert_2bit_index_to_voltage(rdev, table->entries[i].vid_2bit) <=
  1639. pi->high_voltage_t)) {
  1640. *limit = i;
  1641. return 0;
  1642. }
  1643. }
  1644. }
  1645. *limit = 0;
  1646. return 0;
  1647. }
  1648. static void kv_apply_state_adjust_rules(struct radeon_device *rdev,
  1649. struct radeon_ps *new_rps,
  1650. struct radeon_ps *old_rps)
  1651. {
  1652. struct kv_ps *ps = kv_get_ps(new_rps);
  1653. struct kv_power_info *pi = kv_get_pi(rdev);
  1654. u32 min_sclk = 10000; /* ??? */
  1655. u32 sclk, mclk = 0;
  1656. int i, limit;
  1657. bool force_high;
  1658. struct radeon_clock_voltage_dependency_table *table =
  1659. &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
  1660. u32 stable_p_state_sclk = 0;
  1661. struct radeon_clock_and_voltage_limits *max_limits =
  1662. &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
  1663. mclk = max_limits->mclk;
  1664. sclk = min_sclk;
  1665. if (pi->caps_stable_p_state) {
  1666. stable_p_state_sclk = (max_limits->sclk * 75) / 100;
  1667. for (i = table->count - 1; i >= 0; i++) {
  1668. if (stable_p_state_sclk >= table->entries[i].clk) {
  1669. stable_p_state_sclk = table->entries[i].clk;
  1670. break;
  1671. }
  1672. }
  1673. if (i > 0)
  1674. stable_p_state_sclk = table->entries[0].clk;
  1675. sclk = stable_p_state_sclk;
  1676. }
  1677. ps->need_dfs_bypass = true;
  1678. for (i = 0; i < ps->num_levels; i++) {
  1679. if (ps->levels[i].sclk < sclk)
  1680. ps->levels[i].sclk = sclk;
  1681. }
  1682. if (table && table->count) {
  1683. for (i = 0; i < ps->num_levels; i++) {
  1684. if (pi->high_voltage_t &&
  1685. (pi->high_voltage_t <
  1686. kv_convert_8bit_index_to_voltage(rdev, ps->levels[i].vddc_index))) {
  1687. kv_get_high_voltage_limit(rdev, &limit);
  1688. ps->levels[i].sclk = table->entries[limit].clk;
  1689. }
  1690. }
  1691. } else {
  1692. struct sumo_sclk_voltage_mapping_table *table =
  1693. &pi->sys_info.sclk_voltage_mapping_table;
  1694. for (i = 0; i < ps->num_levels; i++) {
  1695. if (pi->high_voltage_t &&
  1696. (pi->high_voltage_t <
  1697. kv_convert_8bit_index_to_voltage(rdev, ps->levels[i].vddc_index))) {
  1698. kv_get_high_voltage_limit(rdev, &limit);
  1699. ps->levels[i].sclk = table->entries[limit].sclk_frequency;
  1700. }
  1701. }
  1702. }
  1703. if (pi->caps_stable_p_state) {
  1704. for (i = 0; i < ps->num_levels; i++) {
  1705. ps->levels[i].sclk = stable_p_state_sclk;
  1706. }
  1707. }
  1708. pi->video_start = new_rps->dclk || new_rps->vclk;
  1709. if ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) ==
  1710. ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)
  1711. pi->battery_state = true;
  1712. else
  1713. pi->battery_state = false;
  1714. if (rdev->family == CHIP_KABINI) {
  1715. ps->dpm0_pg_nb_ps_lo = 0x1;
  1716. ps->dpm0_pg_nb_ps_hi = 0x0;
  1717. ps->dpmx_nb_ps_lo = 0x1;
  1718. ps->dpmx_nb_ps_hi = 0x0;
  1719. } else {
  1720. ps->dpm0_pg_nb_ps_lo = 0x1;
  1721. ps->dpm0_pg_nb_ps_hi = 0x0;
  1722. ps->dpmx_nb_ps_lo = 0x2;
  1723. ps->dpmx_nb_ps_hi = 0x1;
  1724. if (pi->sys_info.nb_dpm_enable && pi->battery_state) {
  1725. force_high = (mclk >= pi->sys_info.nbp_memory_clock[3]) ||
  1726. pi->video_start || (rdev->pm.dpm.new_active_crtc_count >= 3) ||
  1727. pi->disable_nb_ps3_in_battery;
  1728. ps->dpm0_pg_nb_ps_lo = force_high ? 0x2 : 0x3;
  1729. ps->dpm0_pg_nb_ps_hi = 0x2;
  1730. ps->dpmx_nb_ps_lo = force_high ? 0x2 : 0x3;
  1731. ps->dpmx_nb_ps_hi = 0x2;
  1732. }
  1733. }
  1734. }
  1735. static void kv_dpm_power_level_enabled_for_throttle(struct radeon_device *rdev,
  1736. u32 index, bool enable)
  1737. {
  1738. struct kv_power_info *pi = kv_get_pi(rdev);
  1739. pi->graphics_level[index].EnabledForThrottle = enable ? 1 : 0;
  1740. }
  1741. static int kv_calculate_ds_divider(struct radeon_device *rdev)
  1742. {
  1743. struct kv_power_info *pi = kv_get_pi(rdev);
  1744. u32 sclk_in_sr = 10000; /* ??? */
  1745. u32 i;
  1746. if (pi->lowest_valid > pi->highest_valid)
  1747. return -EINVAL;
  1748. for (i = pi->lowest_valid; i <= pi->highest_valid; i++) {
  1749. pi->graphics_level[i].DeepSleepDivId =
  1750. kv_get_sleep_divider_id_from_clock(rdev,
  1751. be32_to_cpu(pi->graphics_level[i].SclkFrequency),
  1752. sclk_in_sr);
  1753. }
  1754. return 0;
  1755. }
  1756. static int kv_calculate_nbps_level_settings(struct radeon_device *rdev)
  1757. {
  1758. struct kv_power_info *pi = kv_get_pi(rdev);
  1759. u32 i;
  1760. bool force_high;
  1761. struct radeon_clock_and_voltage_limits *max_limits =
  1762. &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
  1763. u32 mclk = max_limits->mclk;
  1764. if (pi->lowest_valid > pi->highest_valid)
  1765. return -EINVAL;
  1766. if (rdev->family == CHIP_KABINI) {
  1767. for (i = pi->lowest_valid; i <= pi->highest_valid; i++) {
  1768. pi->graphics_level[i].GnbSlow = 1;
  1769. pi->graphics_level[i].ForceNbPs1 = 0;
  1770. pi->graphics_level[i].UpH = 0;
  1771. }
  1772. if (!pi->sys_info.nb_dpm_enable)
  1773. return 0;
  1774. force_high = ((mclk >= pi->sys_info.nbp_memory_clock[3]) ||
  1775. (rdev->pm.dpm.new_active_crtc_count >= 3) || pi->video_start);
  1776. if (force_high) {
  1777. for (i = pi->lowest_valid; i <= pi->highest_valid; i++)
  1778. pi->graphics_level[i].GnbSlow = 0;
  1779. } else {
  1780. if (pi->battery_state)
  1781. pi->graphics_level[0].ForceNbPs1 = 1;
  1782. pi->graphics_level[1].GnbSlow = 0;
  1783. pi->graphics_level[2].GnbSlow = 0;
  1784. pi->graphics_level[3].GnbSlow = 0;
  1785. pi->graphics_level[4].GnbSlow = 0;
  1786. }
  1787. } else {
  1788. for (i = pi->lowest_valid; i <= pi->highest_valid; i++) {
  1789. pi->graphics_level[i].GnbSlow = 1;
  1790. pi->graphics_level[i].ForceNbPs1 = 0;
  1791. pi->graphics_level[i].UpH = 0;
  1792. }
  1793. if (pi->sys_info.nb_dpm_enable && pi->battery_state) {
  1794. pi->graphics_level[pi->lowest_valid].UpH = 0x28;
  1795. pi->graphics_level[pi->lowest_valid].GnbSlow = 0;
  1796. if (pi->lowest_valid != pi->highest_valid)
  1797. pi->graphics_level[pi->lowest_valid].ForceNbPs1 = 1;
  1798. }
  1799. }
  1800. return 0;
  1801. }
  1802. static int kv_calculate_dpm_settings(struct radeon_device *rdev)
  1803. {
  1804. struct kv_power_info *pi = kv_get_pi(rdev);
  1805. u32 i;
  1806. if (pi->lowest_valid > pi->highest_valid)
  1807. return -EINVAL;
  1808. for (i = pi->lowest_valid; i <= pi->highest_valid; i++)
  1809. pi->graphics_level[i].DisplayWatermark = (i == pi->highest_valid) ? 1 : 0;
  1810. return 0;
  1811. }
  1812. static void kv_init_graphics_levels(struct radeon_device *rdev)
  1813. {
  1814. struct kv_power_info *pi = kv_get_pi(rdev);
  1815. u32 i;
  1816. struct radeon_clock_voltage_dependency_table *table =
  1817. &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
  1818. if (table && table->count) {
  1819. u32 vid_2bit;
  1820. pi->graphics_dpm_level_count = 0;
  1821. for (i = 0; i < table->count; i++) {
  1822. if (pi->high_voltage_t &&
  1823. (pi->high_voltage_t <
  1824. kv_convert_8bit_index_to_voltage(rdev, table->entries[i].v)))
  1825. break;
  1826. kv_set_divider_value(rdev, i, table->entries[i].clk);
  1827. vid_2bit = sumo_convert_vid7_to_vid2(rdev,
  1828. &pi->sys_info.vid_mapping_table,
  1829. table->entries[i].v);
  1830. kv_set_vid(rdev, i, vid_2bit);
  1831. kv_set_at(rdev, i, pi->at[i]);
  1832. kv_dpm_power_level_enabled_for_throttle(rdev, i, true);
  1833. pi->graphics_dpm_level_count++;
  1834. }
  1835. } else {
  1836. struct sumo_sclk_voltage_mapping_table *table =
  1837. &pi->sys_info.sclk_voltage_mapping_table;
  1838. pi->graphics_dpm_level_count = 0;
  1839. for (i = 0; i < table->num_max_dpm_entries; i++) {
  1840. if (pi->high_voltage_t &&
  1841. pi->high_voltage_t <
  1842. kv_convert_2bit_index_to_voltage(rdev, table->entries[i].vid_2bit))
  1843. break;
  1844. kv_set_divider_value(rdev, i, table->entries[i].sclk_frequency);
  1845. kv_set_vid(rdev, i, table->entries[i].vid_2bit);
  1846. kv_set_at(rdev, i, pi->at[i]);
  1847. kv_dpm_power_level_enabled_for_throttle(rdev, i, true);
  1848. pi->graphics_dpm_level_count++;
  1849. }
  1850. }
  1851. for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++)
  1852. kv_dpm_power_level_enable(rdev, i, false);
  1853. }
  1854. static void kv_enable_new_levels(struct radeon_device *rdev)
  1855. {
  1856. struct kv_power_info *pi = kv_get_pi(rdev);
  1857. u32 i;
  1858. for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) {
  1859. if (i >= pi->lowest_valid && i <= pi->highest_valid)
  1860. kv_dpm_power_level_enable(rdev, i, true);
  1861. }
  1862. }
  1863. static int kv_set_enabled_levels(struct radeon_device *rdev)
  1864. {
  1865. struct kv_power_info *pi = kv_get_pi(rdev);
  1866. u32 i, new_mask = 0;
  1867. for (i = pi->lowest_valid; i <= pi->highest_valid; i++)
  1868. new_mask |= (1 << i);
  1869. return kv_send_msg_to_smc_with_parameter(rdev,
  1870. PPSMC_MSG_SCLKDPM_SetEnabledMask,
  1871. new_mask);
  1872. }
  1873. static void kv_program_nbps_index_settings(struct radeon_device *rdev,
  1874. struct radeon_ps *new_rps)
  1875. {
  1876. struct kv_ps *new_ps = kv_get_ps(new_rps);
  1877. struct kv_power_info *pi = kv_get_pi(rdev);
  1878. u32 nbdpmconfig1;
  1879. if (rdev->family == CHIP_KABINI)
  1880. return;
  1881. if (pi->sys_info.nb_dpm_enable) {
  1882. nbdpmconfig1 = RREG32_SMC(NB_DPM_CONFIG_1);
  1883. nbdpmconfig1 &= ~(Dpm0PgNbPsLo_MASK | Dpm0PgNbPsHi_MASK |
  1884. DpmXNbPsLo_MASK | DpmXNbPsHi_MASK);
  1885. nbdpmconfig1 |= (Dpm0PgNbPsLo(new_ps->dpm0_pg_nb_ps_lo) |
  1886. Dpm0PgNbPsHi(new_ps->dpm0_pg_nb_ps_hi) |
  1887. DpmXNbPsLo(new_ps->dpmx_nb_ps_lo) |
  1888. DpmXNbPsHi(new_ps->dpmx_nb_ps_hi));
  1889. WREG32_SMC(NB_DPM_CONFIG_1, nbdpmconfig1);
  1890. }
  1891. }
  1892. static int kv_set_thermal_temperature_range(struct radeon_device *rdev,
  1893. int min_temp, int max_temp)
  1894. {
  1895. int low_temp = 0 * 1000;
  1896. int high_temp = 255 * 1000;
  1897. u32 tmp;
  1898. if (low_temp < min_temp)
  1899. low_temp = min_temp;
  1900. if (high_temp > max_temp)
  1901. high_temp = max_temp;
  1902. if (high_temp < low_temp) {
  1903. DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
  1904. return -EINVAL;
  1905. }
  1906. tmp = RREG32_SMC(CG_THERMAL_INT_CTRL);
  1907. tmp &= ~(DIG_THERM_INTH_MASK | DIG_THERM_INTL_MASK);
  1908. tmp |= (DIG_THERM_INTH(49 + (high_temp / 1000)) |
  1909. DIG_THERM_INTL(49 + (low_temp / 1000)));
  1910. WREG32_SMC(CG_THERMAL_INT_CTRL, tmp);
  1911. rdev->pm.dpm.thermal.min_temp = low_temp;
  1912. rdev->pm.dpm.thermal.max_temp = high_temp;
  1913. return 0;
  1914. }
  1915. union igp_info {
  1916. struct _ATOM_INTEGRATED_SYSTEM_INFO info;
  1917. struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
  1918. struct _ATOM_INTEGRATED_SYSTEM_INFO_V5 info_5;
  1919. struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6;
  1920. struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7;
  1921. struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8;
  1922. };
  1923. static int kv_parse_sys_info_table(struct radeon_device *rdev)
  1924. {
  1925. struct kv_power_info *pi = kv_get_pi(rdev);
  1926. struct radeon_mode_info *mode_info = &rdev->mode_info;
  1927. int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
  1928. union igp_info *igp_info;
  1929. u8 frev, crev;
  1930. u16 data_offset;
  1931. int i;
  1932. if (atom_parse_data_header(mode_info->atom_context, index, NULL,
  1933. &frev, &crev, &data_offset)) {
  1934. igp_info = (union igp_info *)(mode_info->atom_context->bios +
  1935. data_offset);
  1936. if (crev != 8) {
  1937. DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev);
  1938. return -EINVAL;
  1939. }
  1940. pi->sys_info.bootup_sclk = le32_to_cpu(igp_info->info_8.ulBootUpEngineClock);
  1941. pi->sys_info.bootup_uma_clk = le32_to_cpu(igp_info->info_8.ulBootUpUMAClock);
  1942. pi->sys_info.bootup_nb_voltage_index =
  1943. le16_to_cpu(igp_info->info_8.usBootUpNBVoltage);
  1944. if (igp_info->info_8.ucHtcTmpLmt == 0)
  1945. pi->sys_info.htc_tmp_lmt = 203;
  1946. else
  1947. pi->sys_info.htc_tmp_lmt = igp_info->info_8.ucHtcTmpLmt;
  1948. if (igp_info->info_8.ucHtcHystLmt == 0)
  1949. pi->sys_info.htc_hyst_lmt = 5;
  1950. else
  1951. pi->sys_info.htc_hyst_lmt = igp_info->info_8.ucHtcHystLmt;
  1952. if (pi->sys_info.htc_tmp_lmt <= pi->sys_info.htc_hyst_lmt) {
  1953. DRM_ERROR("The htcTmpLmt should be larger than htcHystLmt.\n");
  1954. }
  1955. if (le32_to_cpu(igp_info->info_8.ulSystemConfig) & (1 << 3))
  1956. pi->sys_info.nb_dpm_enable = true;
  1957. else
  1958. pi->sys_info.nb_dpm_enable = false;
  1959. for (i = 0; i < KV_NUM_NBPSTATES; i++) {
  1960. pi->sys_info.nbp_memory_clock[i] =
  1961. le32_to_cpu(igp_info->info_8.ulNbpStateMemclkFreq[i]);
  1962. pi->sys_info.nbp_n_clock[i] =
  1963. le32_to_cpu(igp_info->info_8.ulNbpStateNClkFreq[i]);
  1964. }
  1965. if (le32_to_cpu(igp_info->info_8.ulGPUCapInfo) &
  1966. SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS)
  1967. pi->caps_enable_dfs_bypass = true;
  1968. sumo_construct_sclk_voltage_mapping_table(rdev,
  1969. &pi->sys_info.sclk_voltage_mapping_table,
  1970. igp_info->info_8.sAvail_SCLK);
  1971. sumo_construct_vid_mapping_table(rdev,
  1972. &pi->sys_info.vid_mapping_table,
  1973. igp_info->info_8.sAvail_SCLK);
  1974. kv_construct_max_power_limits_table(rdev,
  1975. &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac);
  1976. }
  1977. return 0;
  1978. }
  1979. union power_info {
  1980. struct _ATOM_POWERPLAY_INFO info;
  1981. struct _ATOM_POWERPLAY_INFO_V2 info_2;
  1982. struct _ATOM_POWERPLAY_INFO_V3 info_3;
  1983. struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
  1984. struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
  1985. struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
  1986. };
  1987. union pplib_clock_info {
  1988. struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
  1989. struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
  1990. struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
  1991. struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
  1992. };
  1993. union pplib_power_state {
  1994. struct _ATOM_PPLIB_STATE v1;
  1995. struct _ATOM_PPLIB_STATE_V2 v2;
  1996. };
  1997. static void kv_patch_boot_state(struct radeon_device *rdev,
  1998. struct kv_ps *ps)
  1999. {
  2000. struct kv_power_info *pi = kv_get_pi(rdev);
  2001. ps->num_levels = 1;
  2002. ps->levels[0] = pi->boot_pl;
  2003. }
  2004. static void kv_parse_pplib_non_clock_info(struct radeon_device *rdev,
  2005. struct radeon_ps *rps,
  2006. struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
  2007. u8 table_rev)
  2008. {
  2009. struct kv_ps *ps = kv_get_ps(rps);
  2010. rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
  2011. rps->class = le16_to_cpu(non_clock_info->usClassification);
  2012. rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
  2013. if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
  2014. rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
  2015. rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
  2016. } else {
  2017. rps->vclk = 0;
  2018. rps->dclk = 0;
  2019. }
  2020. if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
  2021. rdev->pm.dpm.boot_ps = rps;
  2022. kv_patch_boot_state(rdev, ps);
  2023. }
  2024. if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
  2025. rdev->pm.dpm.uvd_ps = rps;
  2026. }
  2027. static void kv_parse_pplib_clock_info(struct radeon_device *rdev,
  2028. struct radeon_ps *rps, int index,
  2029. union pplib_clock_info *clock_info)
  2030. {
  2031. struct kv_power_info *pi = kv_get_pi(rdev);
  2032. struct kv_ps *ps = kv_get_ps(rps);
  2033. struct kv_pl *pl = &ps->levels[index];
  2034. u32 sclk;
  2035. sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow);
  2036. sclk |= clock_info->sumo.ucEngineClockHigh << 16;
  2037. pl->sclk = sclk;
  2038. pl->vddc_index = clock_info->sumo.vddcIndex;
  2039. ps->num_levels = index + 1;
  2040. if (pi->caps_sclk_ds) {
  2041. pl->ds_divider_index = 5;
  2042. pl->ss_divider_index = 5;
  2043. }
  2044. }
  2045. static int kv_parse_power_table(struct radeon_device *rdev)
  2046. {
  2047. struct radeon_mode_info *mode_info = &rdev->mode_info;
  2048. struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
  2049. union pplib_power_state *power_state;
  2050. int i, j, k, non_clock_array_index, clock_array_index;
  2051. union pplib_clock_info *clock_info;
  2052. struct _StateArray *state_array;
  2053. struct _ClockInfoArray *clock_info_array;
  2054. struct _NonClockInfoArray *non_clock_info_array;
  2055. union power_info *power_info;
  2056. int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
  2057. u16 data_offset;
  2058. u8 frev, crev;
  2059. u8 *power_state_offset;
  2060. struct kv_ps *ps;
  2061. if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
  2062. &frev, &crev, &data_offset))
  2063. return -EINVAL;
  2064. power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
  2065. state_array = (struct _StateArray *)
  2066. (mode_info->atom_context->bios + data_offset +
  2067. le16_to_cpu(power_info->pplib.usStateArrayOffset));
  2068. clock_info_array = (struct _ClockInfoArray *)
  2069. (mode_info->atom_context->bios + data_offset +
  2070. le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
  2071. non_clock_info_array = (struct _NonClockInfoArray *)
  2072. (mode_info->atom_context->bios + data_offset +
  2073. le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
  2074. rdev->pm.dpm.ps = kzalloc(sizeof(struct radeon_ps) *
  2075. state_array->ucNumEntries, GFP_KERNEL);
  2076. if (!rdev->pm.dpm.ps)
  2077. return -ENOMEM;
  2078. power_state_offset = (u8 *)state_array->states;
  2079. rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
  2080. rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
  2081. rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
  2082. for (i = 0; i < state_array->ucNumEntries; i++) {
  2083. power_state = (union pplib_power_state *)power_state_offset;
  2084. non_clock_array_index = power_state->v2.nonClockInfoIndex;
  2085. non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
  2086. &non_clock_info_array->nonClockInfo[non_clock_array_index];
  2087. if (!rdev->pm.power_state[i].clock_info)
  2088. return -EINVAL;
  2089. ps = kzalloc(sizeof(struct kv_ps), GFP_KERNEL);
  2090. if (ps == NULL) {
  2091. kfree(rdev->pm.dpm.ps);
  2092. return -ENOMEM;
  2093. }
  2094. rdev->pm.dpm.ps[i].ps_priv = ps;
  2095. k = 0;
  2096. for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
  2097. clock_array_index = power_state->v2.clockInfoIndex[j];
  2098. if (clock_array_index >= clock_info_array->ucNumEntries)
  2099. continue;
  2100. if (k >= SUMO_MAX_HARDWARE_POWERLEVELS)
  2101. break;
  2102. clock_info = (union pplib_clock_info *)
  2103. &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
  2104. kv_parse_pplib_clock_info(rdev,
  2105. &rdev->pm.dpm.ps[i], k,
  2106. clock_info);
  2107. k++;
  2108. }
  2109. kv_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
  2110. non_clock_info,
  2111. non_clock_info_array->ucEntrySize);
  2112. power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
  2113. }
  2114. rdev->pm.dpm.num_ps = state_array->ucNumEntries;
  2115. return 0;
  2116. }
  2117. int kv_dpm_init(struct radeon_device *rdev)
  2118. {
  2119. struct kv_power_info *pi;
  2120. int ret, i;
  2121. pi = kzalloc(sizeof(struct kv_power_info), GFP_KERNEL);
  2122. if (pi == NULL)
  2123. return -ENOMEM;
  2124. rdev->pm.dpm.priv = pi;
  2125. ret = r600_parse_extended_power_table(rdev);
  2126. if (ret)
  2127. return ret;
  2128. for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++)
  2129. pi->at[i] = TRINITY_AT_DFLT;
  2130. pi->sram_end = SMC_RAM_END;
  2131. if (rdev->family == CHIP_KABINI)
  2132. pi->high_voltage_t = 4001;
  2133. pi->enable_nb_dpm = true;
  2134. pi->caps_power_containment = true;
  2135. pi->caps_cac = true;
  2136. pi->enable_didt = false;
  2137. if (pi->enable_didt) {
  2138. pi->caps_sq_ramping = true;
  2139. pi->caps_db_ramping = true;
  2140. pi->caps_td_ramping = true;
  2141. pi->caps_tcp_ramping = true;
  2142. }
  2143. pi->caps_sclk_ds = true;
  2144. pi->enable_auto_thermal_throttling = true;
  2145. pi->disable_nb_ps3_in_battery = false;
  2146. pi->bapm_enable = true;
  2147. pi->voltage_drop_t = 0;
  2148. pi->caps_sclk_throttle_low_notification = false;
  2149. pi->caps_fps = false; /* true? */
  2150. pi->caps_uvd_pg = true;
  2151. pi->caps_uvd_dpm = true;
  2152. pi->caps_vce_pg = false;
  2153. pi->caps_samu_pg = false;
  2154. pi->caps_acp_pg = false;
  2155. pi->caps_stable_p_state = false;
  2156. ret = kv_parse_sys_info_table(rdev);
  2157. if (ret)
  2158. return ret;
  2159. kv_patch_voltage_values(rdev);
  2160. kv_construct_boot_state(rdev);
  2161. ret = kv_parse_power_table(rdev);
  2162. if (ret)
  2163. return ret;
  2164. pi->enable_dpm = true;
  2165. return 0;
  2166. }
  2167. void kv_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
  2168. struct seq_file *m)
  2169. {
  2170. struct kv_power_info *pi = kv_get_pi(rdev);
  2171. u32 current_index =
  2172. (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) & CURR_SCLK_INDEX_MASK) >>
  2173. CURR_SCLK_INDEX_SHIFT;
  2174. u32 sclk, tmp;
  2175. u16 vddc;
  2176. if (current_index >= SMU__NUM_SCLK_DPM_STATE) {
  2177. seq_printf(m, "invalid dpm profile %d\n", current_index);
  2178. } else {
  2179. sclk = be32_to_cpu(pi->graphics_level[current_index].SclkFrequency);
  2180. tmp = (RREG32_SMC(SMU_VOLTAGE_STATUS) & SMU_VOLTAGE_CURRENT_LEVEL_MASK) >>
  2181. SMU_VOLTAGE_CURRENT_LEVEL_SHIFT;
  2182. vddc = kv_convert_8bit_index_to_voltage(rdev, (u16)tmp);
  2183. seq_printf(m, "power level %d sclk: %u vddc: %u\n",
  2184. current_index, sclk, vddc);
  2185. }
  2186. }
  2187. void kv_dpm_print_power_state(struct radeon_device *rdev,
  2188. struct radeon_ps *rps)
  2189. {
  2190. int i;
  2191. struct kv_ps *ps = kv_get_ps(rps);
  2192. r600_dpm_print_class_info(rps->class, rps->class2);
  2193. r600_dpm_print_cap_info(rps->caps);
  2194. printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
  2195. for (i = 0; i < ps->num_levels; i++) {
  2196. struct kv_pl *pl = &ps->levels[i];
  2197. printk("\t\tpower level %d sclk: %u vddc: %u\n",
  2198. i, pl->sclk,
  2199. kv_convert_8bit_index_to_voltage(rdev, pl->vddc_index));
  2200. }
  2201. r600_dpm_print_ps_status(rdev, rps);
  2202. }
  2203. void kv_dpm_fini(struct radeon_device *rdev)
  2204. {
  2205. int i;
  2206. for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
  2207. kfree(rdev->pm.dpm.ps[i].ps_priv);
  2208. }
  2209. kfree(rdev->pm.dpm.ps);
  2210. kfree(rdev->pm.dpm.priv);
  2211. r600_free_extended_power_table(rdev);
  2212. }
  2213. void kv_dpm_display_configuration_changed(struct radeon_device *rdev)
  2214. {
  2215. }
  2216. u32 kv_dpm_get_sclk(struct radeon_device *rdev, bool low)
  2217. {
  2218. struct kv_power_info *pi = kv_get_pi(rdev);
  2219. struct kv_ps *requested_state = kv_get_ps(&pi->requested_rps);
  2220. if (low)
  2221. return requested_state->levels[0].sclk;
  2222. else
  2223. return requested_state->levels[requested_state->num_levels - 1].sclk;
  2224. }
  2225. u32 kv_dpm_get_mclk(struct radeon_device *rdev, bool low)
  2226. {
  2227. struct kv_power_info *pi = kv_get_pi(rdev);
  2228. return pi->sys_info.bootup_uma_clk;
  2229. }