kv_dpm.c 72 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645
  1. /*
  2. * Copyright 2013 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. */
  23. #include "drmP.h"
  24. #include "radeon.h"
  25. #include "cikd.h"
  26. #include "r600_dpm.h"
  27. #include "kv_dpm.h"
  28. #include "radeon_asic.h"
  29. #include <linux/seq_file.h>
  30. #define KV_MAX_DEEPSLEEP_DIVIDER_ID 5
  31. #define KV_MINIMUM_ENGINE_CLOCK 800
  32. #define SMC_RAM_END 0x40000
  33. static void kv_init_graphics_levels(struct radeon_device *rdev);
  34. static int kv_calculate_ds_divider(struct radeon_device *rdev);
  35. static int kv_calculate_nbps_level_settings(struct radeon_device *rdev);
  36. static int kv_calculate_dpm_settings(struct radeon_device *rdev);
  37. static void kv_enable_new_levels(struct radeon_device *rdev);
  38. static void kv_program_nbps_index_settings(struct radeon_device *rdev,
  39. struct radeon_ps *new_rps);
  40. static int kv_set_enabled_levels(struct radeon_device *rdev);
  41. static int kv_force_dpm_highest(struct radeon_device *rdev);
  42. static int kv_force_dpm_lowest(struct radeon_device *rdev);
  43. static void kv_apply_state_adjust_rules(struct radeon_device *rdev,
  44. struct radeon_ps *new_rps,
  45. struct radeon_ps *old_rps);
  46. static int kv_set_thermal_temperature_range(struct radeon_device *rdev,
  47. int min_temp, int max_temp);
  48. static int kv_init_fps_limits(struct radeon_device *rdev);
  49. void kv_dpm_powergate_uvd(struct radeon_device *rdev, bool gate);
  50. static void kv_dpm_powergate_vce(struct radeon_device *rdev, bool gate);
  51. static void kv_dpm_powergate_samu(struct radeon_device *rdev, bool gate);
  52. static void kv_dpm_powergate_acp(struct radeon_device *rdev, bool gate);
  53. extern void cik_enter_rlc_safe_mode(struct radeon_device *rdev);
  54. extern void cik_exit_rlc_safe_mode(struct radeon_device *rdev);
  55. extern void cik_update_cg(struct radeon_device *rdev,
  56. u32 block, bool enable);
  57. static const struct kv_lcac_config_values sx_local_cac_cfg_kv[] =
  58. {
  59. { 0, 4, 1 },
  60. { 1, 4, 1 },
  61. { 2, 5, 1 },
  62. { 3, 4, 2 },
  63. { 4, 1, 1 },
  64. { 5, 5, 2 },
  65. { 6, 6, 1 },
  66. { 7, 9, 2 },
  67. { 0xffffffff }
  68. };
  69. static const struct kv_lcac_config_values mc0_local_cac_cfg_kv[] =
  70. {
  71. { 0, 4, 1 },
  72. { 0xffffffff }
  73. };
  74. static const struct kv_lcac_config_values mc1_local_cac_cfg_kv[] =
  75. {
  76. { 0, 4, 1 },
  77. { 0xffffffff }
  78. };
  79. static const struct kv_lcac_config_values mc2_local_cac_cfg_kv[] =
  80. {
  81. { 0, 4, 1 },
  82. { 0xffffffff }
  83. };
  84. static const struct kv_lcac_config_values mc3_local_cac_cfg_kv[] =
  85. {
  86. { 0, 4, 1 },
  87. { 0xffffffff }
  88. };
  89. static const struct kv_lcac_config_values cpl_local_cac_cfg_kv[] =
  90. {
  91. { 0, 4, 1 },
  92. { 1, 4, 1 },
  93. { 2, 5, 1 },
  94. { 3, 4, 1 },
  95. { 4, 1, 1 },
  96. { 5, 5, 1 },
  97. { 6, 6, 1 },
  98. { 7, 9, 1 },
  99. { 8, 4, 1 },
  100. { 9, 2, 1 },
  101. { 10, 3, 1 },
  102. { 11, 6, 1 },
  103. { 12, 8, 2 },
  104. { 13, 1, 1 },
  105. { 14, 2, 1 },
  106. { 15, 3, 1 },
  107. { 16, 1, 1 },
  108. { 17, 4, 1 },
  109. { 18, 3, 1 },
  110. { 19, 1, 1 },
  111. { 20, 8, 1 },
  112. { 21, 5, 1 },
  113. { 22, 1, 1 },
  114. { 23, 1, 1 },
  115. { 24, 4, 1 },
  116. { 27, 6, 1 },
  117. { 28, 1, 1 },
  118. { 0xffffffff }
  119. };
  120. static const struct kv_lcac_config_reg sx0_cac_config_reg[] =
  121. {
  122. { 0xc0400d00, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
  123. };
  124. static const struct kv_lcac_config_reg mc0_cac_config_reg[] =
  125. {
  126. { 0xc0400d30, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
  127. };
  128. static const struct kv_lcac_config_reg mc1_cac_config_reg[] =
  129. {
  130. { 0xc0400d3c, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
  131. };
  132. static const struct kv_lcac_config_reg mc2_cac_config_reg[] =
  133. {
  134. { 0xc0400d48, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
  135. };
  136. static const struct kv_lcac_config_reg mc3_cac_config_reg[] =
  137. {
  138. { 0xc0400d54, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
  139. };
  140. static const struct kv_lcac_config_reg cpl_cac_config_reg[] =
  141. {
  142. { 0xc0400d80, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
  143. };
  144. static const struct kv_pt_config_reg didt_config_kv[] =
  145. {
  146. { 0x10, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
  147. { 0x10, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
  148. { 0x10, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
  149. { 0x10, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
  150. { 0x11, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
  151. { 0x11, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
  152. { 0x11, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
  153. { 0x11, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
  154. { 0x12, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
  155. { 0x12, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
  156. { 0x12, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
  157. { 0x12, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
  158. { 0x2, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
  159. { 0x2, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
  160. { 0x2, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
  161. { 0x1, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
  162. { 0x1, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
  163. { 0x0, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
  164. { 0x30, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
  165. { 0x30, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
  166. { 0x30, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
  167. { 0x30, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
  168. { 0x31, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
  169. { 0x31, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
  170. { 0x31, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
  171. { 0x31, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
  172. { 0x32, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
  173. { 0x32, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
  174. { 0x32, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
  175. { 0x32, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
  176. { 0x22, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
  177. { 0x22, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
  178. { 0x22, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
  179. { 0x21, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
  180. { 0x21, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
  181. { 0x20, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
  182. { 0x50, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
  183. { 0x50, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
  184. { 0x50, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
  185. { 0x50, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
  186. { 0x51, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
  187. { 0x51, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
  188. { 0x51, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
  189. { 0x51, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
  190. { 0x52, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
  191. { 0x52, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
  192. { 0x52, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
  193. { 0x52, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
  194. { 0x42, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
  195. { 0x42, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
  196. { 0x42, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
  197. { 0x41, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
  198. { 0x41, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
  199. { 0x40, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
  200. { 0x70, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
  201. { 0x70, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
  202. { 0x70, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
  203. { 0x70, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
  204. { 0x71, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
  205. { 0x71, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
  206. { 0x71, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
  207. { 0x71, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
  208. { 0x72, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
  209. { 0x72, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
  210. { 0x72, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
  211. { 0x72, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
  212. { 0x62, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
  213. { 0x62, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
  214. { 0x62, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
  215. { 0x61, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
  216. { 0x61, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
  217. { 0x60, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
  218. { 0xFFFFFFFF }
  219. };
  220. static struct kv_ps *kv_get_ps(struct radeon_ps *rps)
  221. {
  222. struct kv_ps *ps = rps->ps_priv;
  223. return ps;
  224. }
  225. static struct kv_power_info *kv_get_pi(struct radeon_device *rdev)
  226. {
  227. struct kv_power_info *pi = rdev->pm.dpm.priv;
  228. return pi;
  229. }
  230. #if 0
  231. static void kv_program_local_cac_table(struct radeon_device *rdev,
  232. const struct kv_lcac_config_values *local_cac_table,
  233. const struct kv_lcac_config_reg *local_cac_reg)
  234. {
  235. u32 i, count, data;
  236. const struct kv_lcac_config_values *values = local_cac_table;
  237. while (values->block_id != 0xffffffff) {
  238. count = values->signal_id;
  239. for (i = 0; i < count; i++) {
  240. data = ((values->block_id << local_cac_reg->block_shift) &
  241. local_cac_reg->block_mask);
  242. data |= ((i << local_cac_reg->signal_shift) &
  243. local_cac_reg->signal_mask);
  244. data |= ((values->t << local_cac_reg->t_shift) &
  245. local_cac_reg->t_mask);
  246. data |= ((1 << local_cac_reg->enable_shift) &
  247. local_cac_reg->enable_mask);
  248. WREG32_SMC(local_cac_reg->cntl, data);
  249. }
  250. values++;
  251. }
  252. }
  253. #endif
  254. static int kv_program_pt_config_registers(struct radeon_device *rdev,
  255. const struct kv_pt_config_reg *cac_config_regs)
  256. {
  257. const struct kv_pt_config_reg *config_regs = cac_config_regs;
  258. u32 data;
  259. u32 cache = 0;
  260. if (config_regs == NULL)
  261. return -EINVAL;
  262. while (config_regs->offset != 0xFFFFFFFF) {
  263. if (config_regs->type == KV_CONFIGREG_CACHE) {
  264. cache |= ((config_regs->value << config_regs->shift) & config_regs->mask);
  265. } else {
  266. switch (config_regs->type) {
  267. case KV_CONFIGREG_SMC_IND:
  268. data = RREG32_SMC(config_regs->offset);
  269. break;
  270. case KV_CONFIGREG_DIDT_IND:
  271. data = RREG32_DIDT(config_regs->offset);
  272. break;
  273. default:
  274. data = RREG32(config_regs->offset << 2);
  275. break;
  276. }
  277. data &= ~config_regs->mask;
  278. data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
  279. data |= cache;
  280. cache = 0;
  281. switch (config_regs->type) {
  282. case KV_CONFIGREG_SMC_IND:
  283. WREG32_SMC(config_regs->offset, data);
  284. break;
  285. case KV_CONFIGREG_DIDT_IND:
  286. WREG32_DIDT(config_regs->offset, data);
  287. break;
  288. default:
  289. WREG32(config_regs->offset << 2, data);
  290. break;
  291. }
  292. }
  293. config_regs++;
  294. }
  295. return 0;
  296. }
  297. static void kv_do_enable_didt(struct radeon_device *rdev, bool enable)
  298. {
  299. struct kv_power_info *pi = kv_get_pi(rdev);
  300. u32 data;
  301. if (pi->caps_sq_ramping) {
  302. data = RREG32_DIDT(DIDT_SQ_CTRL0);
  303. if (enable)
  304. data |= DIDT_CTRL_EN;
  305. else
  306. data &= ~DIDT_CTRL_EN;
  307. WREG32_DIDT(DIDT_SQ_CTRL0, data);
  308. }
  309. if (pi->caps_db_ramping) {
  310. data = RREG32_DIDT(DIDT_DB_CTRL0);
  311. if (enable)
  312. data |= DIDT_CTRL_EN;
  313. else
  314. data &= ~DIDT_CTRL_EN;
  315. WREG32_DIDT(DIDT_DB_CTRL0, data);
  316. }
  317. if (pi->caps_td_ramping) {
  318. data = RREG32_DIDT(DIDT_TD_CTRL0);
  319. if (enable)
  320. data |= DIDT_CTRL_EN;
  321. else
  322. data &= ~DIDT_CTRL_EN;
  323. WREG32_DIDT(DIDT_TD_CTRL0, data);
  324. }
  325. if (pi->caps_tcp_ramping) {
  326. data = RREG32_DIDT(DIDT_TCP_CTRL0);
  327. if (enable)
  328. data |= DIDT_CTRL_EN;
  329. else
  330. data &= ~DIDT_CTRL_EN;
  331. WREG32_DIDT(DIDT_TCP_CTRL0, data);
  332. }
  333. }
  334. static int kv_enable_didt(struct radeon_device *rdev, bool enable)
  335. {
  336. struct kv_power_info *pi = kv_get_pi(rdev);
  337. int ret;
  338. if (pi->caps_sq_ramping ||
  339. pi->caps_db_ramping ||
  340. pi->caps_td_ramping ||
  341. pi->caps_tcp_ramping) {
  342. cik_enter_rlc_safe_mode(rdev);
  343. if (enable) {
  344. ret = kv_program_pt_config_registers(rdev, didt_config_kv);
  345. if (ret) {
  346. cik_exit_rlc_safe_mode(rdev);
  347. return ret;
  348. }
  349. }
  350. kv_do_enable_didt(rdev, enable);
  351. cik_exit_rlc_safe_mode(rdev);
  352. }
  353. return 0;
  354. }
  355. #if 0
  356. static void kv_initialize_hardware_cac_manager(struct radeon_device *rdev)
  357. {
  358. struct kv_power_info *pi = kv_get_pi(rdev);
  359. if (pi->caps_cac) {
  360. WREG32_SMC(LCAC_SX0_OVR_SEL, 0);
  361. WREG32_SMC(LCAC_SX0_OVR_VAL, 0);
  362. kv_program_local_cac_table(rdev, sx_local_cac_cfg_kv, sx0_cac_config_reg);
  363. WREG32_SMC(LCAC_MC0_OVR_SEL, 0);
  364. WREG32_SMC(LCAC_MC0_OVR_VAL, 0);
  365. kv_program_local_cac_table(rdev, mc0_local_cac_cfg_kv, mc0_cac_config_reg);
  366. WREG32_SMC(LCAC_MC1_OVR_SEL, 0);
  367. WREG32_SMC(LCAC_MC1_OVR_VAL, 0);
  368. kv_program_local_cac_table(rdev, mc1_local_cac_cfg_kv, mc1_cac_config_reg);
  369. WREG32_SMC(LCAC_MC2_OVR_SEL, 0);
  370. WREG32_SMC(LCAC_MC2_OVR_VAL, 0);
  371. kv_program_local_cac_table(rdev, mc2_local_cac_cfg_kv, mc2_cac_config_reg);
  372. WREG32_SMC(LCAC_MC3_OVR_SEL, 0);
  373. WREG32_SMC(LCAC_MC3_OVR_VAL, 0);
  374. kv_program_local_cac_table(rdev, mc3_local_cac_cfg_kv, mc3_cac_config_reg);
  375. WREG32_SMC(LCAC_CPL_OVR_SEL, 0);
  376. WREG32_SMC(LCAC_CPL_OVR_VAL, 0);
  377. kv_program_local_cac_table(rdev, cpl_local_cac_cfg_kv, cpl_cac_config_reg);
  378. }
  379. }
  380. #endif
  381. static int kv_enable_smc_cac(struct radeon_device *rdev, bool enable)
  382. {
  383. struct kv_power_info *pi = kv_get_pi(rdev);
  384. int ret = 0;
  385. if (pi->caps_cac) {
  386. if (enable) {
  387. ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_EnableCac);
  388. if (ret)
  389. pi->cac_enabled = false;
  390. else
  391. pi->cac_enabled = true;
  392. } else if (pi->cac_enabled) {
  393. kv_notify_message_to_smu(rdev, PPSMC_MSG_DisableCac);
  394. pi->cac_enabled = false;
  395. }
  396. }
  397. return ret;
  398. }
  399. static int kv_process_firmware_header(struct radeon_device *rdev)
  400. {
  401. struct kv_power_info *pi = kv_get_pi(rdev);
  402. u32 tmp;
  403. int ret;
  404. ret = kv_read_smc_sram_dword(rdev, SMU7_FIRMWARE_HEADER_LOCATION +
  405. offsetof(SMU7_Firmware_Header, DpmTable),
  406. &tmp, pi->sram_end);
  407. if (ret == 0)
  408. pi->dpm_table_start = tmp;
  409. ret = kv_read_smc_sram_dword(rdev, SMU7_FIRMWARE_HEADER_LOCATION +
  410. offsetof(SMU7_Firmware_Header, SoftRegisters),
  411. &tmp, pi->sram_end);
  412. if (ret == 0)
  413. pi->soft_regs_start = tmp;
  414. return ret;
  415. }
  416. static int kv_enable_dpm_voltage_scaling(struct radeon_device *rdev)
  417. {
  418. struct kv_power_info *pi = kv_get_pi(rdev);
  419. int ret;
  420. pi->graphics_voltage_change_enable = 1;
  421. ret = kv_copy_bytes_to_smc(rdev,
  422. pi->dpm_table_start +
  423. offsetof(SMU7_Fusion_DpmTable, GraphicsVoltageChangeEnable),
  424. &pi->graphics_voltage_change_enable,
  425. sizeof(u8), pi->sram_end);
  426. return ret;
  427. }
  428. static int kv_set_dpm_interval(struct radeon_device *rdev)
  429. {
  430. struct kv_power_info *pi = kv_get_pi(rdev);
  431. int ret;
  432. pi->graphics_interval = 1;
  433. ret = kv_copy_bytes_to_smc(rdev,
  434. pi->dpm_table_start +
  435. offsetof(SMU7_Fusion_DpmTable, GraphicsInterval),
  436. &pi->graphics_interval,
  437. sizeof(u8), pi->sram_end);
  438. return ret;
  439. }
  440. static int kv_set_dpm_boot_state(struct radeon_device *rdev)
  441. {
  442. struct kv_power_info *pi = kv_get_pi(rdev);
  443. int ret;
  444. ret = kv_copy_bytes_to_smc(rdev,
  445. pi->dpm_table_start +
  446. offsetof(SMU7_Fusion_DpmTable, GraphicsBootLevel),
  447. &pi->graphics_boot_level,
  448. sizeof(u8), pi->sram_end);
  449. return ret;
  450. }
  451. static void kv_program_vc(struct radeon_device *rdev)
  452. {
  453. WREG32_SMC(CG_FTV_0, 0x3FFFC000);
  454. }
  455. static void kv_clear_vc(struct radeon_device *rdev)
  456. {
  457. WREG32_SMC(CG_FTV_0, 0);
  458. }
  459. static int kv_set_divider_value(struct radeon_device *rdev,
  460. u32 index, u32 sclk)
  461. {
  462. struct kv_power_info *pi = kv_get_pi(rdev);
  463. struct atom_clock_dividers dividers;
  464. int ret;
  465. ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
  466. sclk, false, &dividers);
  467. if (ret)
  468. return ret;
  469. pi->graphics_level[index].SclkDid = (u8)dividers.post_div;
  470. pi->graphics_level[index].SclkFrequency = cpu_to_be32(sclk);
  471. return 0;
  472. }
  473. static u16 kv_convert_8bit_index_to_voltage(struct radeon_device *rdev,
  474. u16 voltage)
  475. {
  476. return 6200 - (voltage * 25);
  477. }
  478. static u16 kv_convert_2bit_index_to_voltage(struct radeon_device *rdev,
  479. u32 vid_2bit)
  480. {
  481. struct kv_power_info *pi = kv_get_pi(rdev);
  482. u32 vid_8bit = sumo_convert_vid2_to_vid7(rdev,
  483. &pi->sys_info.vid_mapping_table,
  484. vid_2bit);
  485. return kv_convert_8bit_index_to_voltage(rdev, (u16)vid_8bit);
  486. }
  487. static int kv_set_vid(struct radeon_device *rdev, u32 index, u32 vid)
  488. {
  489. struct kv_power_info *pi = kv_get_pi(rdev);
  490. pi->graphics_level[index].VoltageDownH = (u8)pi->voltage_drop_t;
  491. pi->graphics_level[index].MinVddNb =
  492. cpu_to_be32(kv_convert_2bit_index_to_voltage(rdev, vid));
  493. return 0;
  494. }
  495. static int kv_set_at(struct radeon_device *rdev, u32 index, u32 at)
  496. {
  497. struct kv_power_info *pi = kv_get_pi(rdev);
  498. pi->graphics_level[index].AT = cpu_to_be16((u16)at);
  499. return 0;
  500. }
  501. static void kv_dpm_power_level_enable(struct radeon_device *rdev,
  502. u32 index, bool enable)
  503. {
  504. struct kv_power_info *pi = kv_get_pi(rdev);
  505. pi->graphics_level[index].EnabledForActivity = enable ? 1 : 0;
  506. }
  507. static void kv_start_dpm(struct radeon_device *rdev)
  508. {
  509. u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
  510. tmp |= GLOBAL_PWRMGT_EN;
  511. WREG32_SMC(GENERAL_PWRMGT, tmp);
  512. kv_smc_dpm_enable(rdev, true);
  513. }
  514. static void kv_stop_dpm(struct radeon_device *rdev)
  515. {
  516. kv_smc_dpm_enable(rdev, false);
  517. }
  518. static void kv_start_am(struct radeon_device *rdev)
  519. {
  520. u32 sclk_pwrmgt_cntl = RREG32_SMC(SCLK_PWRMGT_CNTL);
  521. sclk_pwrmgt_cntl &= ~(RESET_SCLK_CNT | RESET_BUSY_CNT);
  522. sclk_pwrmgt_cntl |= DYNAMIC_PM_EN;
  523. WREG32_SMC(SCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl);
  524. }
  525. static void kv_reset_am(struct radeon_device *rdev)
  526. {
  527. u32 sclk_pwrmgt_cntl = RREG32_SMC(SCLK_PWRMGT_CNTL);
  528. sclk_pwrmgt_cntl |= (RESET_SCLK_CNT | RESET_BUSY_CNT);
  529. WREG32_SMC(SCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl);
  530. }
  531. static int kv_freeze_sclk_dpm(struct radeon_device *rdev, bool freeze)
  532. {
  533. return kv_notify_message_to_smu(rdev, freeze ?
  534. PPSMC_MSG_SCLKDPM_FreezeLevel : PPSMC_MSG_SCLKDPM_UnfreezeLevel);
  535. }
  536. static int kv_force_lowest_valid(struct radeon_device *rdev)
  537. {
  538. return kv_force_dpm_lowest(rdev);
  539. }
  540. static int kv_unforce_levels(struct radeon_device *rdev)
  541. {
  542. return kv_notify_message_to_smu(rdev, PPSMC_MSG_NoForcedLevel);
  543. }
  544. static int kv_update_sclk_t(struct radeon_device *rdev)
  545. {
  546. struct kv_power_info *pi = kv_get_pi(rdev);
  547. u32 low_sclk_interrupt_t = 0;
  548. int ret = 0;
  549. if (pi->caps_sclk_throttle_low_notification) {
  550. low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t);
  551. ret = kv_copy_bytes_to_smc(rdev,
  552. pi->dpm_table_start +
  553. offsetof(SMU7_Fusion_DpmTable, LowSclkInterruptT),
  554. (u8 *)&low_sclk_interrupt_t,
  555. sizeof(u32), pi->sram_end);
  556. }
  557. return ret;
  558. }
  559. static int kv_program_bootup_state(struct radeon_device *rdev)
  560. {
  561. struct kv_power_info *pi = kv_get_pi(rdev);
  562. u32 i;
  563. struct radeon_clock_voltage_dependency_table *table =
  564. &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
  565. if (table && table->count) {
  566. for (i = pi->graphics_dpm_level_count - 1; i >= 0; i--) {
  567. if ((table->entries[i].clk == pi->boot_pl.sclk) ||
  568. (i == 0))
  569. break;
  570. }
  571. pi->graphics_boot_level = (u8)i;
  572. kv_dpm_power_level_enable(rdev, i, true);
  573. } else {
  574. struct sumo_sclk_voltage_mapping_table *table =
  575. &pi->sys_info.sclk_voltage_mapping_table;
  576. if (table->num_max_dpm_entries == 0)
  577. return -EINVAL;
  578. for (i = pi->graphics_dpm_level_count - 1; i >= 0; i--) {
  579. if ((table->entries[i].sclk_frequency == pi->boot_pl.sclk) ||
  580. (i == 0))
  581. break;
  582. }
  583. pi->graphics_boot_level = (u8)i;
  584. kv_dpm_power_level_enable(rdev, i, true);
  585. }
  586. return 0;
  587. }
  588. static int kv_enable_auto_thermal_throttling(struct radeon_device *rdev)
  589. {
  590. struct kv_power_info *pi = kv_get_pi(rdev);
  591. int ret;
  592. pi->graphics_therm_throttle_enable = 1;
  593. ret = kv_copy_bytes_to_smc(rdev,
  594. pi->dpm_table_start +
  595. offsetof(SMU7_Fusion_DpmTable, GraphicsThermThrottleEnable),
  596. &pi->graphics_therm_throttle_enable,
  597. sizeof(u8), pi->sram_end);
  598. return ret;
  599. }
  600. static int kv_upload_dpm_settings(struct radeon_device *rdev)
  601. {
  602. struct kv_power_info *pi = kv_get_pi(rdev);
  603. int ret;
  604. ret = kv_copy_bytes_to_smc(rdev,
  605. pi->dpm_table_start +
  606. offsetof(SMU7_Fusion_DpmTable, GraphicsLevel),
  607. (u8 *)&pi->graphics_level,
  608. sizeof(SMU7_Fusion_GraphicsLevel) * SMU7_MAX_LEVELS_GRAPHICS,
  609. pi->sram_end);
  610. if (ret)
  611. return ret;
  612. ret = kv_copy_bytes_to_smc(rdev,
  613. pi->dpm_table_start +
  614. offsetof(SMU7_Fusion_DpmTable, GraphicsDpmLevelCount),
  615. &pi->graphics_dpm_level_count,
  616. sizeof(u8), pi->sram_end);
  617. return ret;
  618. }
  619. static u32 kv_get_clock_difference(u32 a, u32 b)
  620. {
  621. return (a >= b) ? a - b : b - a;
  622. }
  623. static u32 kv_get_clk_bypass(struct radeon_device *rdev, u32 clk)
  624. {
  625. struct kv_power_info *pi = kv_get_pi(rdev);
  626. u32 value;
  627. if (pi->caps_enable_dfs_bypass) {
  628. if (kv_get_clock_difference(clk, 40000) < 200)
  629. value = 3;
  630. else if (kv_get_clock_difference(clk, 30000) < 200)
  631. value = 2;
  632. else if (kv_get_clock_difference(clk, 20000) < 200)
  633. value = 7;
  634. else if (kv_get_clock_difference(clk, 15000) < 200)
  635. value = 6;
  636. else if (kv_get_clock_difference(clk, 10000) < 200)
  637. value = 8;
  638. else
  639. value = 0;
  640. } else {
  641. value = 0;
  642. }
  643. return value;
  644. }
  645. static int kv_populate_uvd_table(struct radeon_device *rdev)
  646. {
  647. struct kv_power_info *pi = kv_get_pi(rdev);
  648. struct radeon_uvd_clock_voltage_dependency_table *table =
  649. &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
  650. struct atom_clock_dividers dividers;
  651. int ret;
  652. u32 i;
  653. if (table == NULL || table->count == 0)
  654. return 0;
  655. pi->uvd_level_count = 0;
  656. for (i = 0; i < table->count; i++) {
  657. if (pi->high_voltage_t &&
  658. (pi->high_voltage_t < table->entries[i].v))
  659. break;
  660. pi->uvd_level[i].VclkFrequency = cpu_to_be32(table->entries[i].vclk);
  661. pi->uvd_level[i].DclkFrequency = cpu_to_be32(table->entries[i].dclk);
  662. pi->uvd_level[i].MinVddNb = cpu_to_be16(table->entries[i].v);
  663. pi->uvd_level[i].VClkBypassCntl =
  664. (u8)kv_get_clk_bypass(rdev, table->entries[i].vclk);
  665. pi->uvd_level[i].DClkBypassCntl =
  666. (u8)kv_get_clk_bypass(rdev, table->entries[i].dclk);
  667. ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
  668. table->entries[i].vclk, false, &dividers);
  669. if (ret)
  670. return ret;
  671. pi->uvd_level[i].VclkDivider = (u8)dividers.post_div;
  672. ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
  673. table->entries[i].dclk, false, &dividers);
  674. if (ret)
  675. return ret;
  676. pi->uvd_level[i].DclkDivider = (u8)dividers.post_div;
  677. pi->uvd_level_count++;
  678. }
  679. ret = kv_copy_bytes_to_smc(rdev,
  680. pi->dpm_table_start +
  681. offsetof(SMU7_Fusion_DpmTable, UvdLevelCount),
  682. (u8 *)&pi->uvd_level_count,
  683. sizeof(u8), pi->sram_end);
  684. if (ret)
  685. return ret;
  686. pi->uvd_interval = 1;
  687. ret = kv_copy_bytes_to_smc(rdev,
  688. pi->dpm_table_start +
  689. offsetof(SMU7_Fusion_DpmTable, UVDInterval),
  690. &pi->uvd_interval,
  691. sizeof(u8), pi->sram_end);
  692. if (ret)
  693. return ret;
  694. ret = kv_copy_bytes_to_smc(rdev,
  695. pi->dpm_table_start +
  696. offsetof(SMU7_Fusion_DpmTable, UvdLevel),
  697. (u8 *)&pi->uvd_level,
  698. sizeof(SMU7_Fusion_UvdLevel) * SMU7_MAX_LEVELS_UVD,
  699. pi->sram_end);
  700. return ret;
  701. }
  702. static int kv_populate_vce_table(struct radeon_device *rdev)
  703. {
  704. struct kv_power_info *pi = kv_get_pi(rdev);
  705. int ret;
  706. u32 i;
  707. struct radeon_vce_clock_voltage_dependency_table *table =
  708. &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
  709. struct atom_clock_dividers dividers;
  710. if (table == NULL || table->count == 0)
  711. return 0;
  712. pi->vce_level_count = 0;
  713. for (i = 0; i < table->count; i++) {
  714. if (pi->high_voltage_t &&
  715. pi->high_voltage_t < table->entries[i].v)
  716. break;
  717. pi->vce_level[i].Frequency = cpu_to_be32(table->entries[i].evclk);
  718. pi->vce_level[i].MinVoltage = cpu_to_be16(table->entries[i].v);
  719. pi->vce_level[i].ClkBypassCntl =
  720. (u8)kv_get_clk_bypass(rdev, table->entries[i].evclk);
  721. ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
  722. table->entries[i].evclk, false, &dividers);
  723. if (ret)
  724. return ret;
  725. pi->vce_level[i].Divider = (u8)dividers.post_div;
  726. pi->vce_level_count++;
  727. }
  728. ret = kv_copy_bytes_to_smc(rdev,
  729. pi->dpm_table_start +
  730. offsetof(SMU7_Fusion_DpmTable, VceLevelCount),
  731. (u8 *)&pi->vce_level_count,
  732. sizeof(u8),
  733. pi->sram_end);
  734. if (ret)
  735. return ret;
  736. pi->vce_interval = 1;
  737. ret = kv_copy_bytes_to_smc(rdev,
  738. pi->dpm_table_start +
  739. offsetof(SMU7_Fusion_DpmTable, VCEInterval),
  740. (u8 *)&pi->vce_interval,
  741. sizeof(u8),
  742. pi->sram_end);
  743. if (ret)
  744. return ret;
  745. ret = kv_copy_bytes_to_smc(rdev,
  746. pi->dpm_table_start +
  747. offsetof(SMU7_Fusion_DpmTable, VceLevel),
  748. (u8 *)&pi->vce_level,
  749. sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_VCE,
  750. pi->sram_end);
  751. return ret;
  752. }
  753. static int kv_populate_samu_table(struct radeon_device *rdev)
  754. {
  755. struct kv_power_info *pi = kv_get_pi(rdev);
  756. struct radeon_clock_voltage_dependency_table *table =
  757. &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table;
  758. struct atom_clock_dividers dividers;
  759. int ret;
  760. u32 i;
  761. if (table == NULL || table->count == 0)
  762. return 0;
  763. pi->samu_level_count = 0;
  764. for (i = 0; i < table->count; i++) {
  765. if (pi->high_voltage_t &&
  766. pi->high_voltage_t < table->entries[i].v)
  767. break;
  768. pi->samu_level[i].Frequency = cpu_to_be32(table->entries[i].clk);
  769. pi->samu_level[i].MinVoltage = cpu_to_be16(table->entries[i].v);
  770. pi->samu_level[i].ClkBypassCntl =
  771. (u8)kv_get_clk_bypass(rdev, table->entries[i].clk);
  772. ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
  773. table->entries[i].clk, false, &dividers);
  774. if (ret)
  775. return ret;
  776. pi->samu_level[i].Divider = (u8)dividers.post_div;
  777. pi->samu_level_count++;
  778. }
  779. ret = kv_copy_bytes_to_smc(rdev,
  780. pi->dpm_table_start +
  781. offsetof(SMU7_Fusion_DpmTable, SamuLevelCount),
  782. (u8 *)&pi->samu_level_count,
  783. sizeof(u8),
  784. pi->sram_end);
  785. if (ret)
  786. return ret;
  787. pi->samu_interval = 1;
  788. ret = kv_copy_bytes_to_smc(rdev,
  789. pi->dpm_table_start +
  790. offsetof(SMU7_Fusion_DpmTable, SAMUInterval),
  791. (u8 *)&pi->samu_interval,
  792. sizeof(u8),
  793. pi->sram_end);
  794. if (ret)
  795. return ret;
  796. ret = kv_copy_bytes_to_smc(rdev,
  797. pi->dpm_table_start +
  798. offsetof(SMU7_Fusion_DpmTable, SamuLevel),
  799. (u8 *)&pi->samu_level,
  800. sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_SAMU,
  801. pi->sram_end);
  802. if (ret)
  803. return ret;
  804. return ret;
  805. }
  806. static int kv_populate_acp_table(struct radeon_device *rdev)
  807. {
  808. struct kv_power_info *pi = kv_get_pi(rdev);
  809. struct radeon_clock_voltage_dependency_table *table =
  810. &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
  811. struct atom_clock_dividers dividers;
  812. int ret;
  813. u32 i;
  814. if (table == NULL || table->count == 0)
  815. return 0;
  816. pi->acp_level_count = 0;
  817. for (i = 0; i < table->count; i++) {
  818. pi->acp_level[i].Frequency = cpu_to_be32(table->entries[i].clk);
  819. pi->acp_level[i].MinVoltage = cpu_to_be16(table->entries[i].v);
  820. ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
  821. table->entries[i].clk, false, &dividers);
  822. if (ret)
  823. return ret;
  824. pi->acp_level[i].Divider = (u8)dividers.post_div;
  825. pi->acp_level_count++;
  826. }
  827. ret = kv_copy_bytes_to_smc(rdev,
  828. pi->dpm_table_start +
  829. offsetof(SMU7_Fusion_DpmTable, AcpLevelCount),
  830. (u8 *)&pi->acp_level_count,
  831. sizeof(u8),
  832. pi->sram_end);
  833. if (ret)
  834. return ret;
  835. pi->acp_interval = 1;
  836. ret = kv_copy_bytes_to_smc(rdev,
  837. pi->dpm_table_start +
  838. offsetof(SMU7_Fusion_DpmTable, ACPInterval),
  839. (u8 *)&pi->acp_interval,
  840. sizeof(u8),
  841. pi->sram_end);
  842. if (ret)
  843. return ret;
  844. ret = kv_copy_bytes_to_smc(rdev,
  845. pi->dpm_table_start +
  846. offsetof(SMU7_Fusion_DpmTable, AcpLevel),
  847. (u8 *)&pi->acp_level,
  848. sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_ACP,
  849. pi->sram_end);
  850. if (ret)
  851. return ret;
  852. return ret;
  853. }
  854. static void kv_calculate_dfs_bypass_settings(struct radeon_device *rdev)
  855. {
  856. struct kv_power_info *pi = kv_get_pi(rdev);
  857. u32 i;
  858. struct radeon_clock_voltage_dependency_table *table =
  859. &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
  860. if (table && table->count) {
  861. for (i = 0; i < pi->graphics_dpm_level_count; i++) {
  862. if (pi->caps_enable_dfs_bypass) {
  863. if (kv_get_clock_difference(table->entries[i].clk, 40000) < 200)
  864. pi->graphics_level[i].ClkBypassCntl = 3;
  865. else if (kv_get_clock_difference(table->entries[i].clk, 30000) < 200)
  866. pi->graphics_level[i].ClkBypassCntl = 2;
  867. else if (kv_get_clock_difference(table->entries[i].clk, 26600) < 200)
  868. pi->graphics_level[i].ClkBypassCntl = 7;
  869. else if (kv_get_clock_difference(table->entries[i].clk , 20000) < 200)
  870. pi->graphics_level[i].ClkBypassCntl = 6;
  871. else if (kv_get_clock_difference(table->entries[i].clk , 10000) < 200)
  872. pi->graphics_level[i].ClkBypassCntl = 8;
  873. else
  874. pi->graphics_level[i].ClkBypassCntl = 0;
  875. } else {
  876. pi->graphics_level[i].ClkBypassCntl = 0;
  877. }
  878. }
  879. } else {
  880. struct sumo_sclk_voltage_mapping_table *table =
  881. &pi->sys_info.sclk_voltage_mapping_table;
  882. for (i = 0; i < pi->graphics_dpm_level_count; i++) {
  883. if (pi->caps_enable_dfs_bypass) {
  884. if (kv_get_clock_difference(table->entries[i].sclk_frequency, 40000) < 200)
  885. pi->graphics_level[i].ClkBypassCntl = 3;
  886. else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 30000) < 200)
  887. pi->graphics_level[i].ClkBypassCntl = 2;
  888. else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 26600) < 200)
  889. pi->graphics_level[i].ClkBypassCntl = 7;
  890. else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 20000) < 200)
  891. pi->graphics_level[i].ClkBypassCntl = 6;
  892. else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 10000) < 200)
  893. pi->graphics_level[i].ClkBypassCntl = 8;
  894. else
  895. pi->graphics_level[i].ClkBypassCntl = 0;
  896. } else {
  897. pi->graphics_level[i].ClkBypassCntl = 0;
  898. }
  899. }
  900. }
  901. }
  902. static int kv_enable_ulv(struct radeon_device *rdev, bool enable)
  903. {
  904. return kv_notify_message_to_smu(rdev, enable ?
  905. PPSMC_MSG_EnableULV : PPSMC_MSG_DisableULV);
  906. }
  907. static void kv_update_current_ps(struct radeon_device *rdev,
  908. struct radeon_ps *rps)
  909. {
  910. struct kv_ps *new_ps = kv_get_ps(rps);
  911. struct kv_power_info *pi = kv_get_pi(rdev);
  912. pi->current_rps = *rps;
  913. pi->current_ps = *new_ps;
  914. pi->current_rps.ps_priv = &pi->current_ps;
  915. }
  916. static void kv_update_requested_ps(struct radeon_device *rdev,
  917. struct radeon_ps *rps)
  918. {
  919. struct kv_ps *new_ps = kv_get_ps(rps);
  920. struct kv_power_info *pi = kv_get_pi(rdev);
  921. pi->requested_rps = *rps;
  922. pi->requested_ps = *new_ps;
  923. pi->requested_rps.ps_priv = &pi->requested_ps;
  924. }
  925. int kv_dpm_enable(struct radeon_device *rdev)
  926. {
  927. struct kv_power_info *pi = kv_get_pi(rdev);
  928. int ret;
  929. cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
  930. RADEON_CG_BLOCK_SDMA |
  931. RADEON_CG_BLOCK_BIF |
  932. RADEON_CG_BLOCK_HDP), false);
  933. ret = kv_process_firmware_header(rdev);
  934. if (ret) {
  935. DRM_ERROR("kv_process_firmware_header failed\n");
  936. return ret;
  937. }
  938. kv_init_fps_limits(rdev);
  939. kv_init_graphics_levels(rdev);
  940. ret = kv_program_bootup_state(rdev);
  941. if (ret) {
  942. DRM_ERROR("kv_program_bootup_state failed\n");
  943. return ret;
  944. }
  945. kv_calculate_dfs_bypass_settings(rdev);
  946. ret = kv_upload_dpm_settings(rdev);
  947. if (ret) {
  948. DRM_ERROR("kv_upload_dpm_settings failed\n");
  949. return ret;
  950. }
  951. ret = kv_populate_uvd_table(rdev);
  952. if (ret) {
  953. DRM_ERROR("kv_populate_uvd_table failed\n");
  954. return ret;
  955. }
  956. ret = kv_populate_vce_table(rdev);
  957. if (ret) {
  958. DRM_ERROR("kv_populate_vce_table failed\n");
  959. return ret;
  960. }
  961. ret = kv_populate_samu_table(rdev);
  962. if (ret) {
  963. DRM_ERROR("kv_populate_samu_table failed\n");
  964. return ret;
  965. }
  966. ret = kv_populate_acp_table(rdev);
  967. if (ret) {
  968. DRM_ERROR("kv_populate_acp_table failed\n");
  969. return ret;
  970. }
  971. kv_program_vc(rdev);
  972. #if 0
  973. kv_initialize_hardware_cac_manager(rdev);
  974. #endif
  975. kv_start_am(rdev);
  976. if (pi->enable_auto_thermal_throttling) {
  977. ret = kv_enable_auto_thermal_throttling(rdev);
  978. if (ret) {
  979. DRM_ERROR("kv_enable_auto_thermal_throttling failed\n");
  980. return ret;
  981. }
  982. }
  983. ret = kv_enable_dpm_voltage_scaling(rdev);
  984. if (ret) {
  985. DRM_ERROR("kv_enable_dpm_voltage_scaling failed\n");
  986. return ret;
  987. }
  988. ret = kv_set_dpm_interval(rdev);
  989. if (ret) {
  990. DRM_ERROR("kv_set_dpm_interval failed\n");
  991. return ret;
  992. }
  993. ret = kv_set_dpm_boot_state(rdev);
  994. if (ret) {
  995. DRM_ERROR("kv_set_dpm_boot_state failed\n");
  996. return ret;
  997. }
  998. ret = kv_enable_ulv(rdev, true);
  999. if (ret) {
  1000. DRM_ERROR("kv_enable_ulv failed\n");
  1001. return ret;
  1002. }
  1003. kv_start_dpm(rdev);
  1004. ret = kv_enable_didt(rdev, true);
  1005. if (ret) {
  1006. DRM_ERROR("kv_enable_didt failed\n");
  1007. return ret;
  1008. }
  1009. ret = kv_enable_smc_cac(rdev, true);
  1010. if (ret) {
  1011. DRM_ERROR("kv_enable_smc_cac failed\n");
  1012. return ret;
  1013. }
  1014. if (rdev->irq.installed &&
  1015. r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
  1016. ret = kv_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
  1017. if (ret) {
  1018. DRM_ERROR("kv_set_thermal_temperature_range failed\n");
  1019. return ret;
  1020. }
  1021. rdev->irq.dpm_thermal = true;
  1022. radeon_irq_set(rdev);
  1023. }
  1024. /* powerdown unused blocks for now */
  1025. kv_dpm_powergate_acp(rdev, true);
  1026. kv_dpm_powergate_samu(rdev, true);
  1027. kv_dpm_powergate_vce(rdev, true);
  1028. kv_dpm_powergate_uvd(rdev, true);
  1029. cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
  1030. RADEON_CG_BLOCK_SDMA |
  1031. RADEON_CG_BLOCK_BIF |
  1032. RADEON_CG_BLOCK_HDP), true);
  1033. kv_update_current_ps(rdev, rdev->pm.dpm.boot_ps);
  1034. return ret;
  1035. }
  1036. void kv_dpm_disable(struct radeon_device *rdev)
  1037. {
  1038. cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
  1039. RADEON_CG_BLOCK_SDMA |
  1040. RADEON_CG_BLOCK_BIF |
  1041. RADEON_CG_BLOCK_HDP), false);
  1042. /* powerup blocks */
  1043. kv_dpm_powergate_acp(rdev, false);
  1044. kv_dpm_powergate_samu(rdev, false);
  1045. kv_dpm_powergate_vce(rdev, false);
  1046. kv_dpm_powergate_uvd(rdev, false);
  1047. kv_enable_smc_cac(rdev, false);
  1048. kv_enable_didt(rdev, false);
  1049. kv_clear_vc(rdev);
  1050. kv_stop_dpm(rdev);
  1051. kv_enable_ulv(rdev, false);
  1052. kv_reset_am(rdev);
  1053. kv_update_current_ps(rdev, rdev->pm.dpm.boot_ps);
  1054. }
  1055. #if 0
  1056. static int kv_write_smc_soft_register(struct radeon_device *rdev,
  1057. u16 reg_offset, u32 value)
  1058. {
  1059. struct kv_power_info *pi = kv_get_pi(rdev);
  1060. return kv_copy_bytes_to_smc(rdev, pi->soft_regs_start + reg_offset,
  1061. (u8 *)&value, sizeof(u16), pi->sram_end);
  1062. }
  1063. static int kv_read_smc_soft_register(struct radeon_device *rdev,
  1064. u16 reg_offset, u32 *value)
  1065. {
  1066. struct kv_power_info *pi = kv_get_pi(rdev);
  1067. return kv_read_smc_sram_dword(rdev, pi->soft_regs_start + reg_offset,
  1068. value, pi->sram_end);
  1069. }
  1070. #endif
  1071. static void kv_init_sclk_t(struct radeon_device *rdev)
  1072. {
  1073. struct kv_power_info *pi = kv_get_pi(rdev);
  1074. pi->low_sclk_interrupt_t = 0;
  1075. }
  1076. static int kv_init_fps_limits(struct radeon_device *rdev)
  1077. {
  1078. struct kv_power_info *pi = kv_get_pi(rdev);
  1079. int ret = 0;
  1080. if (pi->caps_fps) {
  1081. u16 tmp;
  1082. tmp = 45;
  1083. pi->fps_high_t = cpu_to_be16(tmp);
  1084. ret = kv_copy_bytes_to_smc(rdev,
  1085. pi->dpm_table_start +
  1086. offsetof(SMU7_Fusion_DpmTable, FpsHighT),
  1087. (u8 *)&pi->fps_high_t,
  1088. sizeof(u16), pi->sram_end);
  1089. tmp = 30;
  1090. pi->fps_low_t = cpu_to_be16(tmp);
  1091. ret = kv_copy_bytes_to_smc(rdev,
  1092. pi->dpm_table_start +
  1093. offsetof(SMU7_Fusion_DpmTable, FpsLowT),
  1094. (u8 *)&pi->fps_low_t,
  1095. sizeof(u16), pi->sram_end);
  1096. }
  1097. return ret;
  1098. }
  1099. static void kv_init_powergate_state(struct radeon_device *rdev)
  1100. {
  1101. struct kv_power_info *pi = kv_get_pi(rdev);
  1102. pi->uvd_power_gated = false;
  1103. pi->vce_power_gated = false;
  1104. pi->samu_power_gated = false;
  1105. pi->acp_power_gated = false;
  1106. }
  1107. static int kv_enable_uvd_dpm(struct radeon_device *rdev, bool enable)
  1108. {
  1109. return kv_notify_message_to_smu(rdev, enable ?
  1110. PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable);
  1111. }
  1112. #if 0
  1113. static int kv_enable_vce_dpm(struct radeon_device *rdev, bool enable)
  1114. {
  1115. return kv_notify_message_to_smu(rdev, enable ?
  1116. PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable);
  1117. }
  1118. #endif
  1119. static int kv_enable_samu_dpm(struct radeon_device *rdev, bool enable)
  1120. {
  1121. return kv_notify_message_to_smu(rdev, enable ?
  1122. PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable);
  1123. }
  1124. static int kv_enable_acp_dpm(struct radeon_device *rdev, bool enable)
  1125. {
  1126. return kv_notify_message_to_smu(rdev, enable ?
  1127. PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable);
  1128. }
  1129. static int kv_update_uvd_dpm(struct radeon_device *rdev, bool gate)
  1130. {
  1131. struct kv_power_info *pi = kv_get_pi(rdev);
  1132. struct radeon_uvd_clock_voltage_dependency_table *table =
  1133. &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
  1134. int ret;
  1135. if (!gate) {
  1136. if (!pi->caps_uvd_dpm || table->count || pi->caps_stable_p_state)
  1137. pi->uvd_boot_level = table->count - 1;
  1138. else
  1139. pi->uvd_boot_level = 0;
  1140. ret = kv_copy_bytes_to_smc(rdev,
  1141. pi->dpm_table_start +
  1142. offsetof(SMU7_Fusion_DpmTable, UvdBootLevel),
  1143. (uint8_t *)&pi->uvd_boot_level,
  1144. sizeof(u8), pi->sram_end);
  1145. if (ret)
  1146. return ret;
  1147. if (!pi->caps_uvd_dpm ||
  1148. pi->caps_stable_p_state)
  1149. kv_send_msg_to_smc_with_parameter(rdev,
  1150. PPSMC_MSG_UVDDPM_SetEnabledMask,
  1151. (1 << pi->uvd_boot_level));
  1152. }
  1153. return kv_enable_uvd_dpm(rdev, !gate);
  1154. }
  1155. #if 0
  1156. static u8 kv_get_vce_boot_level(struct radeon_device *rdev)
  1157. {
  1158. u8 i;
  1159. struct radeon_vce_clock_voltage_dependency_table *table =
  1160. &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
  1161. for (i = 0; i < table->count; i++) {
  1162. if (table->entries[i].evclk >= 0) /* XXX */
  1163. break;
  1164. }
  1165. return i;
  1166. }
  1167. static int kv_update_vce_dpm(struct radeon_device *rdev,
  1168. struct radeon_ps *radeon_new_state,
  1169. struct radeon_ps *radeon_current_state)
  1170. {
  1171. struct kv_power_info *pi = kv_get_pi(rdev);
  1172. struct radeon_vce_clock_voltage_dependency_table *table =
  1173. &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
  1174. int ret;
  1175. if (radeon_new_state->evclk > 0 && radeon_current_state->evclk == 0) {
  1176. if (pi->caps_stable_p_state)
  1177. pi->vce_boot_level = table->count - 1;
  1178. else
  1179. pi->vce_boot_level = kv_get_vce_boot_level(rdev);
  1180. ret = kv_copy_bytes_to_smc(rdev,
  1181. pi->dpm_table_start +
  1182. offsetof(SMU7_Fusion_DpmTable, VceBootLevel),
  1183. (u8 *)&pi->vce_boot_level,
  1184. sizeof(u8),
  1185. pi->sram_end);
  1186. if (ret)
  1187. return ret;
  1188. if (pi->caps_stable_p_state)
  1189. kv_send_msg_to_smc_with_parameter(rdev,
  1190. PPSMC_MSG_VCEDPM_SetEnabledMask,
  1191. (1 << pi->vce_boot_level));
  1192. kv_enable_vce_dpm(rdev, true);
  1193. } else if (radeon_new_state->evclk == 0 && radeon_current_state->evclk > 0) {
  1194. kv_enable_vce_dpm(rdev, false);
  1195. }
  1196. return 0;
  1197. }
  1198. #endif
  1199. static int kv_update_samu_dpm(struct radeon_device *rdev, bool gate)
  1200. {
  1201. struct kv_power_info *pi = kv_get_pi(rdev);
  1202. struct radeon_clock_voltage_dependency_table *table =
  1203. &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table;
  1204. int ret;
  1205. if (!gate) {
  1206. if (pi->caps_stable_p_state)
  1207. pi->samu_boot_level = table->count - 1;
  1208. else
  1209. pi->samu_boot_level = 0;
  1210. ret = kv_copy_bytes_to_smc(rdev,
  1211. pi->dpm_table_start +
  1212. offsetof(SMU7_Fusion_DpmTable, SamuBootLevel),
  1213. (u8 *)&pi->samu_boot_level,
  1214. sizeof(u8),
  1215. pi->sram_end);
  1216. if (ret)
  1217. return ret;
  1218. if (pi->caps_stable_p_state)
  1219. kv_send_msg_to_smc_with_parameter(rdev,
  1220. PPSMC_MSG_SAMUDPM_SetEnabledMask,
  1221. (1 << pi->samu_boot_level));
  1222. }
  1223. return kv_enable_samu_dpm(rdev, !gate);
  1224. }
  1225. static int kv_update_acp_dpm(struct radeon_device *rdev, bool gate)
  1226. {
  1227. struct kv_power_info *pi = kv_get_pi(rdev);
  1228. struct radeon_clock_voltage_dependency_table *table =
  1229. &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
  1230. int ret;
  1231. if (!gate) {
  1232. if (pi->caps_stable_p_state)
  1233. pi->acp_boot_level = table->count - 1;
  1234. else
  1235. pi->acp_boot_level = 0;
  1236. ret = kv_copy_bytes_to_smc(rdev,
  1237. pi->dpm_table_start +
  1238. offsetof(SMU7_Fusion_DpmTable, AcpBootLevel),
  1239. (u8 *)&pi->acp_boot_level,
  1240. sizeof(u8),
  1241. pi->sram_end);
  1242. if (ret)
  1243. return ret;
  1244. if (pi->caps_stable_p_state)
  1245. kv_send_msg_to_smc_with_parameter(rdev,
  1246. PPSMC_MSG_ACPDPM_SetEnabledMask,
  1247. (1 << pi->acp_boot_level));
  1248. }
  1249. return kv_enable_acp_dpm(rdev, !gate);
  1250. }
  1251. void kv_dpm_powergate_uvd(struct radeon_device *rdev, bool gate)
  1252. {
  1253. struct kv_power_info *pi = kv_get_pi(rdev);
  1254. if (pi->uvd_power_gated == gate)
  1255. return;
  1256. pi->uvd_power_gated = gate;
  1257. if (gate) {
  1258. if (pi->caps_uvd_pg) {
  1259. uvd_v1_0_stop(rdev);
  1260. cik_update_cg(rdev, RADEON_CG_BLOCK_UVD, false);
  1261. }
  1262. kv_update_uvd_dpm(rdev, gate);
  1263. if (pi->caps_uvd_pg)
  1264. kv_notify_message_to_smu(rdev, PPSMC_MSG_UVDPowerOFF);
  1265. } else {
  1266. if (pi->caps_uvd_pg) {
  1267. kv_notify_message_to_smu(rdev, PPSMC_MSG_UVDPowerON);
  1268. uvd_v4_2_resume(rdev);
  1269. uvd_v1_0_start(rdev);
  1270. cik_update_cg(rdev, RADEON_CG_BLOCK_UVD, true);
  1271. }
  1272. kv_update_uvd_dpm(rdev, gate);
  1273. }
  1274. }
  1275. static void kv_dpm_powergate_vce(struct radeon_device *rdev, bool gate)
  1276. {
  1277. struct kv_power_info *pi = kv_get_pi(rdev);
  1278. if (pi->vce_power_gated == gate)
  1279. return;
  1280. pi->vce_power_gated = gate;
  1281. if (gate) {
  1282. if (pi->caps_vce_pg)
  1283. kv_notify_message_to_smu(rdev, PPSMC_MSG_VCEPowerOFF);
  1284. } else {
  1285. if (pi->caps_vce_pg)
  1286. kv_notify_message_to_smu(rdev, PPSMC_MSG_VCEPowerON);
  1287. }
  1288. }
  1289. static void kv_dpm_powergate_samu(struct radeon_device *rdev, bool gate)
  1290. {
  1291. struct kv_power_info *pi = kv_get_pi(rdev);
  1292. if (pi->samu_power_gated == gate)
  1293. return;
  1294. pi->samu_power_gated = gate;
  1295. if (gate) {
  1296. kv_update_samu_dpm(rdev, true);
  1297. if (pi->caps_samu_pg)
  1298. kv_notify_message_to_smu(rdev, PPSMC_MSG_SAMPowerOFF);
  1299. } else {
  1300. if (pi->caps_samu_pg)
  1301. kv_notify_message_to_smu(rdev, PPSMC_MSG_SAMPowerON);
  1302. kv_update_samu_dpm(rdev, false);
  1303. }
  1304. }
  1305. static void kv_dpm_powergate_acp(struct radeon_device *rdev, bool gate)
  1306. {
  1307. struct kv_power_info *pi = kv_get_pi(rdev);
  1308. if (pi->acp_power_gated == gate)
  1309. return;
  1310. if (rdev->family == CHIP_KABINI)
  1311. return;
  1312. pi->acp_power_gated = gate;
  1313. if (gate) {
  1314. kv_update_acp_dpm(rdev, true);
  1315. if (pi->caps_acp_pg)
  1316. kv_notify_message_to_smu(rdev, PPSMC_MSG_ACPPowerOFF);
  1317. } else {
  1318. if (pi->caps_acp_pg)
  1319. kv_notify_message_to_smu(rdev, PPSMC_MSG_ACPPowerON);
  1320. kv_update_acp_dpm(rdev, false);
  1321. }
  1322. }
  1323. static void kv_set_valid_clock_range(struct radeon_device *rdev,
  1324. struct radeon_ps *new_rps)
  1325. {
  1326. struct kv_ps *new_ps = kv_get_ps(new_rps);
  1327. struct kv_power_info *pi = kv_get_pi(rdev);
  1328. u32 i;
  1329. struct radeon_clock_voltage_dependency_table *table =
  1330. &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
  1331. if (table && table->count) {
  1332. for (i = 0; i < pi->graphics_dpm_level_count; i++) {
  1333. if ((table->entries[i].clk >= new_ps->levels[0].sclk) ||
  1334. (i == (pi->graphics_dpm_level_count - 1))) {
  1335. pi->lowest_valid = i;
  1336. break;
  1337. }
  1338. }
  1339. for (i = pi->graphics_dpm_level_count - 1; i >= 0; i--) {
  1340. if ((table->entries[i].clk <= new_ps->levels[new_ps->num_levels -1].sclk) ||
  1341. (i == 0)) {
  1342. pi->highest_valid = i;
  1343. break;
  1344. }
  1345. }
  1346. if (pi->lowest_valid > pi->highest_valid) {
  1347. if ((new_ps->levels[0].sclk - table->entries[pi->highest_valid].clk) >
  1348. (table->entries[pi->lowest_valid].clk - new_ps->levels[new_ps->num_levels - 1].sclk))
  1349. pi->highest_valid = pi->lowest_valid;
  1350. else
  1351. pi->lowest_valid = pi->highest_valid;
  1352. }
  1353. } else {
  1354. struct sumo_sclk_voltage_mapping_table *table =
  1355. &pi->sys_info.sclk_voltage_mapping_table;
  1356. for (i = 0; i < (int)pi->graphics_dpm_level_count; i++) {
  1357. if (table->entries[i].sclk_frequency >= new_ps->levels[0].sclk ||
  1358. i == (int)(pi->graphics_dpm_level_count - 1)) {
  1359. pi->lowest_valid = i;
  1360. break;
  1361. }
  1362. }
  1363. for (i = pi->graphics_dpm_level_count - 1; i >= 0; i--) {
  1364. if (table->entries[i].sclk_frequency <=
  1365. new_ps->levels[new_ps->num_levels - 1].sclk ||
  1366. i == 0) {
  1367. pi->highest_valid = i;
  1368. break;
  1369. }
  1370. }
  1371. if (pi->lowest_valid > pi->highest_valid) {
  1372. if ((new_ps->levels[0].sclk -
  1373. table->entries[pi->highest_valid].sclk_frequency) >
  1374. (table->entries[pi->lowest_valid].sclk_frequency -
  1375. new_ps->levels[new_ps->num_levels -1].sclk))
  1376. pi->highest_valid = pi->lowest_valid;
  1377. else
  1378. pi->lowest_valid = pi->highest_valid;
  1379. }
  1380. }
  1381. }
  1382. static int kv_update_dfs_bypass_settings(struct radeon_device *rdev,
  1383. struct radeon_ps *new_rps)
  1384. {
  1385. struct kv_ps *new_ps = kv_get_ps(new_rps);
  1386. struct kv_power_info *pi = kv_get_pi(rdev);
  1387. int ret = 0;
  1388. u8 clk_bypass_cntl;
  1389. if (pi->caps_enable_dfs_bypass) {
  1390. clk_bypass_cntl = new_ps->need_dfs_bypass ?
  1391. pi->graphics_level[pi->graphics_boot_level].ClkBypassCntl : 0;
  1392. ret = kv_copy_bytes_to_smc(rdev,
  1393. (pi->dpm_table_start +
  1394. offsetof(SMU7_Fusion_DpmTable, GraphicsLevel) +
  1395. (pi->graphics_boot_level * sizeof(SMU7_Fusion_GraphicsLevel)) +
  1396. offsetof(SMU7_Fusion_GraphicsLevel, ClkBypassCntl)),
  1397. &clk_bypass_cntl,
  1398. sizeof(u8), pi->sram_end);
  1399. }
  1400. return ret;
  1401. }
  1402. static int kv_enable_nb_dpm(struct radeon_device *rdev)
  1403. {
  1404. struct kv_power_info *pi = kv_get_pi(rdev);
  1405. int ret = 0;
  1406. if (pi->enable_nb_dpm && !pi->nb_dpm_enabled) {
  1407. ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_NBDPM_Enable);
  1408. if (ret == 0)
  1409. pi->nb_dpm_enabled = true;
  1410. }
  1411. return ret;
  1412. }
  1413. int kv_dpm_force_performance_level(struct radeon_device *rdev,
  1414. enum radeon_dpm_forced_level level)
  1415. {
  1416. int ret;
  1417. if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
  1418. ret = kv_force_dpm_highest(rdev);
  1419. if (ret)
  1420. return ret;
  1421. } else if (level == RADEON_DPM_FORCED_LEVEL_LOW) {
  1422. ret = kv_force_dpm_lowest(rdev);
  1423. if (ret)
  1424. return ret;
  1425. } else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) {
  1426. ret = kv_unforce_levels(rdev);
  1427. if (ret)
  1428. return ret;
  1429. }
  1430. rdev->pm.dpm.forced_level = level;
  1431. return 0;
  1432. }
  1433. int kv_dpm_pre_set_power_state(struct radeon_device *rdev)
  1434. {
  1435. struct kv_power_info *pi = kv_get_pi(rdev);
  1436. struct radeon_ps requested_ps = *rdev->pm.dpm.requested_ps;
  1437. struct radeon_ps *new_ps = &requested_ps;
  1438. kv_update_requested_ps(rdev, new_ps);
  1439. kv_apply_state_adjust_rules(rdev,
  1440. &pi->requested_rps,
  1441. &pi->current_rps);
  1442. return 0;
  1443. }
  1444. int kv_dpm_set_power_state(struct radeon_device *rdev)
  1445. {
  1446. struct kv_power_info *pi = kv_get_pi(rdev);
  1447. struct radeon_ps *new_ps = &pi->requested_rps;
  1448. /*struct radeon_ps *old_ps = &pi->current_rps;*/
  1449. int ret;
  1450. cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
  1451. RADEON_CG_BLOCK_SDMA |
  1452. RADEON_CG_BLOCK_BIF |
  1453. RADEON_CG_BLOCK_HDP), false);
  1454. if (rdev->family == CHIP_KABINI) {
  1455. if (pi->enable_dpm) {
  1456. kv_set_valid_clock_range(rdev, new_ps);
  1457. kv_update_dfs_bypass_settings(rdev, new_ps);
  1458. ret = kv_calculate_ds_divider(rdev);
  1459. if (ret) {
  1460. DRM_ERROR("kv_calculate_ds_divider failed\n");
  1461. return ret;
  1462. }
  1463. kv_calculate_nbps_level_settings(rdev);
  1464. kv_calculate_dpm_settings(rdev);
  1465. kv_force_lowest_valid(rdev);
  1466. kv_enable_new_levels(rdev);
  1467. kv_upload_dpm_settings(rdev);
  1468. kv_program_nbps_index_settings(rdev, new_ps);
  1469. kv_unforce_levels(rdev);
  1470. kv_set_enabled_levels(rdev);
  1471. kv_force_lowest_valid(rdev);
  1472. kv_unforce_levels(rdev);
  1473. #if 0
  1474. ret = kv_update_vce_dpm(rdev, new_ps, old_ps);
  1475. if (ret) {
  1476. DRM_ERROR("kv_update_vce_dpm failed\n");
  1477. return ret;
  1478. }
  1479. #endif
  1480. kv_update_sclk_t(rdev);
  1481. }
  1482. } else {
  1483. if (pi->enable_dpm) {
  1484. kv_set_valid_clock_range(rdev, new_ps);
  1485. kv_update_dfs_bypass_settings(rdev, new_ps);
  1486. ret = kv_calculate_ds_divider(rdev);
  1487. if (ret) {
  1488. DRM_ERROR("kv_calculate_ds_divider failed\n");
  1489. return ret;
  1490. }
  1491. kv_calculate_nbps_level_settings(rdev);
  1492. kv_calculate_dpm_settings(rdev);
  1493. kv_freeze_sclk_dpm(rdev, true);
  1494. kv_upload_dpm_settings(rdev);
  1495. kv_program_nbps_index_settings(rdev, new_ps);
  1496. kv_freeze_sclk_dpm(rdev, false);
  1497. kv_set_enabled_levels(rdev);
  1498. #if 0
  1499. ret = kv_update_vce_dpm(rdev, new_ps, old_ps);
  1500. if (ret) {
  1501. DRM_ERROR("kv_update_vce_dpm failed\n");
  1502. return ret;
  1503. }
  1504. #endif
  1505. kv_update_sclk_t(rdev);
  1506. kv_enable_nb_dpm(rdev);
  1507. }
  1508. }
  1509. cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
  1510. RADEON_CG_BLOCK_SDMA |
  1511. RADEON_CG_BLOCK_BIF |
  1512. RADEON_CG_BLOCK_HDP), true);
  1513. rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO;
  1514. return 0;
  1515. }
  1516. void kv_dpm_post_set_power_state(struct radeon_device *rdev)
  1517. {
  1518. struct kv_power_info *pi = kv_get_pi(rdev);
  1519. struct radeon_ps *new_ps = &pi->requested_rps;
  1520. kv_update_current_ps(rdev, new_ps);
  1521. }
  1522. void kv_dpm_setup_asic(struct radeon_device *rdev)
  1523. {
  1524. sumo_take_smu_control(rdev, true);
  1525. kv_init_powergate_state(rdev);
  1526. kv_init_sclk_t(rdev);
  1527. }
  1528. void kv_dpm_reset_asic(struct radeon_device *rdev)
  1529. {
  1530. kv_force_lowest_valid(rdev);
  1531. kv_init_graphics_levels(rdev);
  1532. kv_program_bootup_state(rdev);
  1533. kv_upload_dpm_settings(rdev);
  1534. kv_force_lowest_valid(rdev);
  1535. kv_unforce_levels(rdev);
  1536. }
  1537. //XXX use sumo_dpm_display_configuration_changed
  1538. static void kv_construct_max_power_limits_table(struct radeon_device *rdev,
  1539. struct radeon_clock_and_voltage_limits *table)
  1540. {
  1541. struct kv_power_info *pi = kv_get_pi(rdev);
  1542. if (pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries > 0) {
  1543. int idx = pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries - 1;
  1544. table->sclk =
  1545. pi->sys_info.sclk_voltage_mapping_table.entries[idx].sclk_frequency;
  1546. table->vddc =
  1547. kv_convert_2bit_index_to_voltage(rdev,
  1548. pi->sys_info.sclk_voltage_mapping_table.entries[idx].vid_2bit);
  1549. }
  1550. table->mclk = pi->sys_info.nbp_memory_clock[0];
  1551. }
  1552. static void kv_patch_voltage_values(struct radeon_device *rdev)
  1553. {
  1554. int i;
  1555. struct radeon_uvd_clock_voltage_dependency_table *table =
  1556. &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
  1557. if (table->count) {
  1558. for (i = 0; i < table->count; i++)
  1559. table->entries[i].v =
  1560. kv_convert_8bit_index_to_voltage(rdev,
  1561. table->entries[i].v);
  1562. }
  1563. }
  1564. static void kv_construct_boot_state(struct radeon_device *rdev)
  1565. {
  1566. struct kv_power_info *pi = kv_get_pi(rdev);
  1567. pi->boot_pl.sclk = pi->sys_info.bootup_sclk;
  1568. pi->boot_pl.vddc_index = pi->sys_info.bootup_nb_voltage_index;
  1569. pi->boot_pl.ds_divider_index = 0;
  1570. pi->boot_pl.ss_divider_index = 0;
  1571. pi->boot_pl.allow_gnb_slow = 1;
  1572. pi->boot_pl.force_nbp_state = 0;
  1573. pi->boot_pl.display_wm = 0;
  1574. pi->boot_pl.vce_wm = 0;
  1575. }
  1576. static int kv_force_dpm_highest(struct radeon_device *rdev)
  1577. {
  1578. int ret;
  1579. u32 enable_mask, i;
  1580. ret = kv_dpm_get_enable_mask(rdev, &enable_mask);
  1581. if (ret)
  1582. return ret;
  1583. for (i = SMU7_MAX_LEVELS_GRAPHICS - 1; i >= 0; i--) {
  1584. if (enable_mask & (1 << i))
  1585. break;
  1586. }
  1587. return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i);
  1588. }
  1589. static int kv_force_dpm_lowest(struct radeon_device *rdev)
  1590. {
  1591. int ret;
  1592. u32 enable_mask, i;
  1593. ret = kv_dpm_get_enable_mask(rdev, &enable_mask);
  1594. if (ret)
  1595. return ret;
  1596. for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) {
  1597. if (enable_mask & (1 << i))
  1598. break;
  1599. }
  1600. return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i);
  1601. }
  1602. static u8 kv_get_sleep_divider_id_from_clock(struct radeon_device *rdev,
  1603. u32 sclk, u32 min_sclk_in_sr)
  1604. {
  1605. struct kv_power_info *pi = kv_get_pi(rdev);
  1606. u32 i;
  1607. u32 temp;
  1608. u32 min = (min_sclk_in_sr > KV_MINIMUM_ENGINE_CLOCK) ?
  1609. min_sclk_in_sr : KV_MINIMUM_ENGINE_CLOCK;
  1610. if (sclk < min)
  1611. return 0;
  1612. if (!pi->caps_sclk_ds)
  1613. return 0;
  1614. for (i = KV_MAX_DEEPSLEEP_DIVIDER_ID; i <= 0; i--) {
  1615. temp = sclk / sumo_get_sleep_divider_from_id(i);
  1616. if ((temp >= min) || (i == 0))
  1617. break;
  1618. }
  1619. return (u8)i;
  1620. }
  1621. static int kv_get_high_voltage_limit(struct radeon_device *rdev, int *limit)
  1622. {
  1623. struct kv_power_info *pi = kv_get_pi(rdev);
  1624. struct radeon_clock_voltage_dependency_table *table =
  1625. &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
  1626. int i;
  1627. if (table && table->count) {
  1628. for (i = table->count - 1; i >= 0; i--) {
  1629. if (pi->high_voltage_t &&
  1630. (kv_convert_8bit_index_to_voltage(rdev, table->entries[i].v) <=
  1631. pi->high_voltage_t)) {
  1632. *limit = i;
  1633. return 0;
  1634. }
  1635. }
  1636. } else {
  1637. struct sumo_sclk_voltage_mapping_table *table =
  1638. &pi->sys_info.sclk_voltage_mapping_table;
  1639. for (i = table->num_max_dpm_entries - 1; i >= 0; i--) {
  1640. if (pi->high_voltage_t &&
  1641. (kv_convert_2bit_index_to_voltage(rdev, table->entries[i].vid_2bit) <=
  1642. pi->high_voltage_t)) {
  1643. *limit = i;
  1644. return 0;
  1645. }
  1646. }
  1647. }
  1648. *limit = 0;
  1649. return 0;
  1650. }
  1651. static void kv_apply_state_adjust_rules(struct radeon_device *rdev,
  1652. struct radeon_ps *new_rps,
  1653. struct radeon_ps *old_rps)
  1654. {
  1655. struct kv_ps *ps = kv_get_ps(new_rps);
  1656. struct kv_power_info *pi = kv_get_pi(rdev);
  1657. u32 min_sclk = 10000; /* ??? */
  1658. u32 sclk, mclk = 0;
  1659. int i, limit;
  1660. bool force_high;
  1661. struct radeon_clock_voltage_dependency_table *table =
  1662. &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
  1663. u32 stable_p_state_sclk = 0;
  1664. struct radeon_clock_and_voltage_limits *max_limits =
  1665. &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
  1666. mclk = max_limits->mclk;
  1667. sclk = min_sclk;
  1668. if (pi->caps_stable_p_state) {
  1669. stable_p_state_sclk = (max_limits->sclk * 75) / 100;
  1670. for (i = table->count - 1; i >= 0; i++) {
  1671. if (stable_p_state_sclk >= table->entries[i].clk) {
  1672. stable_p_state_sclk = table->entries[i].clk;
  1673. break;
  1674. }
  1675. }
  1676. if (i > 0)
  1677. stable_p_state_sclk = table->entries[0].clk;
  1678. sclk = stable_p_state_sclk;
  1679. }
  1680. ps->need_dfs_bypass = true;
  1681. for (i = 0; i < ps->num_levels; i++) {
  1682. if (ps->levels[i].sclk < sclk)
  1683. ps->levels[i].sclk = sclk;
  1684. }
  1685. if (table && table->count) {
  1686. for (i = 0; i < ps->num_levels; i++) {
  1687. if (pi->high_voltage_t &&
  1688. (pi->high_voltage_t <
  1689. kv_convert_8bit_index_to_voltage(rdev, ps->levels[i].vddc_index))) {
  1690. kv_get_high_voltage_limit(rdev, &limit);
  1691. ps->levels[i].sclk = table->entries[limit].clk;
  1692. }
  1693. }
  1694. } else {
  1695. struct sumo_sclk_voltage_mapping_table *table =
  1696. &pi->sys_info.sclk_voltage_mapping_table;
  1697. for (i = 0; i < ps->num_levels; i++) {
  1698. if (pi->high_voltage_t &&
  1699. (pi->high_voltage_t <
  1700. kv_convert_8bit_index_to_voltage(rdev, ps->levels[i].vddc_index))) {
  1701. kv_get_high_voltage_limit(rdev, &limit);
  1702. ps->levels[i].sclk = table->entries[limit].sclk_frequency;
  1703. }
  1704. }
  1705. }
  1706. if (pi->caps_stable_p_state) {
  1707. for (i = 0; i < ps->num_levels; i++) {
  1708. ps->levels[i].sclk = stable_p_state_sclk;
  1709. }
  1710. }
  1711. pi->video_start = new_rps->dclk || new_rps->vclk;
  1712. if ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) ==
  1713. ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)
  1714. pi->battery_state = true;
  1715. else
  1716. pi->battery_state = false;
  1717. if (rdev->family == CHIP_KABINI) {
  1718. ps->dpm0_pg_nb_ps_lo = 0x1;
  1719. ps->dpm0_pg_nb_ps_hi = 0x0;
  1720. ps->dpmx_nb_ps_lo = 0x1;
  1721. ps->dpmx_nb_ps_hi = 0x0;
  1722. } else {
  1723. ps->dpm0_pg_nb_ps_lo = 0x1;
  1724. ps->dpm0_pg_nb_ps_hi = 0x0;
  1725. ps->dpmx_nb_ps_lo = 0x2;
  1726. ps->dpmx_nb_ps_hi = 0x1;
  1727. if (pi->sys_info.nb_dpm_enable && pi->battery_state) {
  1728. force_high = (mclk >= pi->sys_info.nbp_memory_clock[3]) ||
  1729. pi->video_start || (rdev->pm.dpm.new_active_crtc_count >= 3) ||
  1730. pi->disable_nb_ps3_in_battery;
  1731. ps->dpm0_pg_nb_ps_lo = force_high ? 0x2 : 0x3;
  1732. ps->dpm0_pg_nb_ps_hi = 0x2;
  1733. ps->dpmx_nb_ps_lo = force_high ? 0x2 : 0x3;
  1734. ps->dpmx_nb_ps_hi = 0x2;
  1735. }
  1736. }
  1737. }
  1738. static void kv_dpm_power_level_enabled_for_throttle(struct radeon_device *rdev,
  1739. u32 index, bool enable)
  1740. {
  1741. struct kv_power_info *pi = kv_get_pi(rdev);
  1742. pi->graphics_level[index].EnabledForThrottle = enable ? 1 : 0;
  1743. }
  1744. static int kv_calculate_ds_divider(struct radeon_device *rdev)
  1745. {
  1746. struct kv_power_info *pi = kv_get_pi(rdev);
  1747. u32 sclk_in_sr = 10000; /* ??? */
  1748. u32 i;
  1749. if (pi->lowest_valid > pi->highest_valid)
  1750. return -EINVAL;
  1751. for (i = pi->lowest_valid; i <= pi->highest_valid; i++) {
  1752. pi->graphics_level[i].DeepSleepDivId =
  1753. kv_get_sleep_divider_id_from_clock(rdev,
  1754. be32_to_cpu(pi->graphics_level[i].SclkFrequency),
  1755. sclk_in_sr);
  1756. }
  1757. return 0;
  1758. }
  1759. static int kv_calculate_nbps_level_settings(struct radeon_device *rdev)
  1760. {
  1761. struct kv_power_info *pi = kv_get_pi(rdev);
  1762. u32 i;
  1763. bool force_high;
  1764. struct radeon_clock_and_voltage_limits *max_limits =
  1765. &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
  1766. u32 mclk = max_limits->mclk;
  1767. if (pi->lowest_valid > pi->highest_valid)
  1768. return -EINVAL;
  1769. if (rdev->family == CHIP_KABINI) {
  1770. for (i = pi->lowest_valid; i <= pi->highest_valid; i++) {
  1771. pi->graphics_level[i].GnbSlow = 1;
  1772. pi->graphics_level[i].ForceNbPs1 = 0;
  1773. pi->graphics_level[i].UpH = 0;
  1774. }
  1775. if (!pi->sys_info.nb_dpm_enable)
  1776. return 0;
  1777. force_high = ((mclk >= pi->sys_info.nbp_memory_clock[3]) ||
  1778. (rdev->pm.dpm.new_active_crtc_count >= 3) || pi->video_start);
  1779. if (force_high) {
  1780. for (i = pi->lowest_valid; i <= pi->highest_valid; i++)
  1781. pi->graphics_level[i].GnbSlow = 0;
  1782. } else {
  1783. if (pi->battery_state)
  1784. pi->graphics_level[0].ForceNbPs1 = 1;
  1785. pi->graphics_level[1].GnbSlow = 0;
  1786. pi->graphics_level[2].GnbSlow = 0;
  1787. pi->graphics_level[3].GnbSlow = 0;
  1788. pi->graphics_level[4].GnbSlow = 0;
  1789. }
  1790. } else {
  1791. for (i = pi->lowest_valid; i <= pi->highest_valid; i++) {
  1792. pi->graphics_level[i].GnbSlow = 1;
  1793. pi->graphics_level[i].ForceNbPs1 = 0;
  1794. pi->graphics_level[i].UpH = 0;
  1795. }
  1796. if (pi->sys_info.nb_dpm_enable && pi->battery_state) {
  1797. pi->graphics_level[pi->lowest_valid].UpH = 0x28;
  1798. pi->graphics_level[pi->lowest_valid].GnbSlow = 0;
  1799. if (pi->lowest_valid != pi->highest_valid)
  1800. pi->graphics_level[pi->lowest_valid].ForceNbPs1 = 1;
  1801. }
  1802. }
  1803. return 0;
  1804. }
  1805. static int kv_calculate_dpm_settings(struct radeon_device *rdev)
  1806. {
  1807. struct kv_power_info *pi = kv_get_pi(rdev);
  1808. u32 i;
  1809. if (pi->lowest_valid > pi->highest_valid)
  1810. return -EINVAL;
  1811. for (i = pi->lowest_valid; i <= pi->highest_valid; i++)
  1812. pi->graphics_level[i].DisplayWatermark = (i == pi->highest_valid) ? 1 : 0;
  1813. return 0;
  1814. }
  1815. static void kv_init_graphics_levels(struct radeon_device *rdev)
  1816. {
  1817. struct kv_power_info *pi = kv_get_pi(rdev);
  1818. u32 i;
  1819. struct radeon_clock_voltage_dependency_table *table =
  1820. &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
  1821. if (table && table->count) {
  1822. u32 vid_2bit;
  1823. pi->graphics_dpm_level_count = 0;
  1824. for (i = 0; i < table->count; i++) {
  1825. if (pi->high_voltage_t &&
  1826. (pi->high_voltage_t <
  1827. kv_convert_8bit_index_to_voltage(rdev, table->entries[i].v)))
  1828. break;
  1829. kv_set_divider_value(rdev, i, table->entries[i].clk);
  1830. vid_2bit = sumo_convert_vid7_to_vid2(rdev,
  1831. &pi->sys_info.vid_mapping_table,
  1832. table->entries[i].v);
  1833. kv_set_vid(rdev, i, vid_2bit);
  1834. kv_set_at(rdev, i, pi->at[i]);
  1835. kv_dpm_power_level_enabled_for_throttle(rdev, i, true);
  1836. pi->graphics_dpm_level_count++;
  1837. }
  1838. } else {
  1839. struct sumo_sclk_voltage_mapping_table *table =
  1840. &pi->sys_info.sclk_voltage_mapping_table;
  1841. pi->graphics_dpm_level_count = 0;
  1842. for (i = 0; i < table->num_max_dpm_entries; i++) {
  1843. if (pi->high_voltage_t &&
  1844. pi->high_voltage_t <
  1845. kv_convert_2bit_index_to_voltage(rdev, table->entries[i].vid_2bit))
  1846. break;
  1847. kv_set_divider_value(rdev, i, table->entries[i].sclk_frequency);
  1848. kv_set_vid(rdev, i, table->entries[i].vid_2bit);
  1849. kv_set_at(rdev, i, pi->at[i]);
  1850. kv_dpm_power_level_enabled_for_throttle(rdev, i, true);
  1851. pi->graphics_dpm_level_count++;
  1852. }
  1853. }
  1854. for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++)
  1855. kv_dpm_power_level_enable(rdev, i, false);
  1856. }
  1857. static void kv_enable_new_levels(struct radeon_device *rdev)
  1858. {
  1859. struct kv_power_info *pi = kv_get_pi(rdev);
  1860. u32 i;
  1861. for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) {
  1862. if (i >= pi->lowest_valid && i <= pi->highest_valid)
  1863. kv_dpm_power_level_enable(rdev, i, true);
  1864. }
  1865. }
  1866. static int kv_set_enabled_levels(struct radeon_device *rdev)
  1867. {
  1868. struct kv_power_info *pi = kv_get_pi(rdev);
  1869. u32 i, new_mask = 0;
  1870. for (i = pi->lowest_valid; i <= pi->highest_valid; i++)
  1871. new_mask |= (1 << i);
  1872. return kv_send_msg_to_smc_with_parameter(rdev,
  1873. PPSMC_MSG_SCLKDPM_SetEnabledMask,
  1874. new_mask);
  1875. }
  1876. static void kv_program_nbps_index_settings(struct radeon_device *rdev,
  1877. struct radeon_ps *new_rps)
  1878. {
  1879. struct kv_ps *new_ps = kv_get_ps(new_rps);
  1880. struct kv_power_info *pi = kv_get_pi(rdev);
  1881. u32 nbdpmconfig1;
  1882. if (rdev->family == CHIP_KABINI)
  1883. return;
  1884. if (pi->sys_info.nb_dpm_enable) {
  1885. nbdpmconfig1 = RREG32_SMC(NB_DPM_CONFIG_1);
  1886. nbdpmconfig1 &= ~(Dpm0PgNbPsLo_MASK | Dpm0PgNbPsHi_MASK |
  1887. DpmXNbPsLo_MASK | DpmXNbPsHi_MASK);
  1888. nbdpmconfig1 |= (Dpm0PgNbPsLo(new_ps->dpm0_pg_nb_ps_lo) |
  1889. Dpm0PgNbPsHi(new_ps->dpm0_pg_nb_ps_hi) |
  1890. DpmXNbPsLo(new_ps->dpmx_nb_ps_lo) |
  1891. DpmXNbPsHi(new_ps->dpmx_nb_ps_hi));
  1892. WREG32_SMC(NB_DPM_CONFIG_1, nbdpmconfig1);
  1893. }
  1894. }
  1895. static int kv_set_thermal_temperature_range(struct radeon_device *rdev,
  1896. int min_temp, int max_temp)
  1897. {
  1898. int low_temp = 0 * 1000;
  1899. int high_temp = 255 * 1000;
  1900. u32 tmp;
  1901. if (low_temp < min_temp)
  1902. low_temp = min_temp;
  1903. if (high_temp > max_temp)
  1904. high_temp = max_temp;
  1905. if (high_temp < low_temp) {
  1906. DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
  1907. return -EINVAL;
  1908. }
  1909. tmp = RREG32_SMC(CG_THERMAL_INT_CTRL);
  1910. tmp &= ~(DIG_THERM_INTH_MASK | DIG_THERM_INTL_MASK);
  1911. tmp |= (DIG_THERM_INTH(49 + (high_temp / 1000)) |
  1912. DIG_THERM_INTL(49 + (low_temp / 1000)));
  1913. WREG32_SMC(CG_THERMAL_INT_CTRL, tmp);
  1914. rdev->pm.dpm.thermal.min_temp = low_temp;
  1915. rdev->pm.dpm.thermal.max_temp = high_temp;
  1916. return 0;
  1917. }
  1918. union igp_info {
  1919. struct _ATOM_INTEGRATED_SYSTEM_INFO info;
  1920. struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
  1921. struct _ATOM_INTEGRATED_SYSTEM_INFO_V5 info_5;
  1922. struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6;
  1923. struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7;
  1924. struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8;
  1925. };
  1926. static int kv_parse_sys_info_table(struct radeon_device *rdev)
  1927. {
  1928. struct kv_power_info *pi = kv_get_pi(rdev);
  1929. struct radeon_mode_info *mode_info = &rdev->mode_info;
  1930. int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
  1931. union igp_info *igp_info;
  1932. u8 frev, crev;
  1933. u16 data_offset;
  1934. int i;
  1935. if (atom_parse_data_header(mode_info->atom_context, index, NULL,
  1936. &frev, &crev, &data_offset)) {
  1937. igp_info = (union igp_info *)(mode_info->atom_context->bios +
  1938. data_offset);
  1939. if (crev != 8) {
  1940. DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev);
  1941. return -EINVAL;
  1942. }
  1943. pi->sys_info.bootup_sclk = le32_to_cpu(igp_info->info_8.ulBootUpEngineClock);
  1944. pi->sys_info.bootup_uma_clk = le32_to_cpu(igp_info->info_8.ulBootUpUMAClock);
  1945. pi->sys_info.bootup_nb_voltage_index =
  1946. le16_to_cpu(igp_info->info_8.usBootUpNBVoltage);
  1947. if (igp_info->info_8.ucHtcTmpLmt == 0)
  1948. pi->sys_info.htc_tmp_lmt = 203;
  1949. else
  1950. pi->sys_info.htc_tmp_lmt = igp_info->info_8.ucHtcTmpLmt;
  1951. if (igp_info->info_8.ucHtcHystLmt == 0)
  1952. pi->sys_info.htc_hyst_lmt = 5;
  1953. else
  1954. pi->sys_info.htc_hyst_lmt = igp_info->info_8.ucHtcHystLmt;
  1955. if (pi->sys_info.htc_tmp_lmt <= pi->sys_info.htc_hyst_lmt) {
  1956. DRM_ERROR("The htcTmpLmt should be larger than htcHystLmt.\n");
  1957. }
  1958. if (le32_to_cpu(igp_info->info_8.ulSystemConfig) & (1 << 3))
  1959. pi->sys_info.nb_dpm_enable = true;
  1960. else
  1961. pi->sys_info.nb_dpm_enable = false;
  1962. for (i = 0; i < KV_NUM_NBPSTATES; i++) {
  1963. pi->sys_info.nbp_memory_clock[i] =
  1964. le32_to_cpu(igp_info->info_8.ulNbpStateMemclkFreq[i]);
  1965. pi->sys_info.nbp_n_clock[i] =
  1966. le32_to_cpu(igp_info->info_8.ulNbpStateNClkFreq[i]);
  1967. }
  1968. if (le32_to_cpu(igp_info->info_8.ulGPUCapInfo) &
  1969. SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS)
  1970. pi->caps_enable_dfs_bypass = true;
  1971. sumo_construct_sclk_voltage_mapping_table(rdev,
  1972. &pi->sys_info.sclk_voltage_mapping_table,
  1973. igp_info->info_8.sAvail_SCLK);
  1974. sumo_construct_vid_mapping_table(rdev,
  1975. &pi->sys_info.vid_mapping_table,
  1976. igp_info->info_8.sAvail_SCLK);
  1977. kv_construct_max_power_limits_table(rdev,
  1978. &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac);
  1979. }
  1980. return 0;
  1981. }
  1982. union power_info {
  1983. struct _ATOM_POWERPLAY_INFO info;
  1984. struct _ATOM_POWERPLAY_INFO_V2 info_2;
  1985. struct _ATOM_POWERPLAY_INFO_V3 info_3;
  1986. struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
  1987. struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
  1988. struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
  1989. };
  1990. union pplib_clock_info {
  1991. struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
  1992. struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
  1993. struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
  1994. struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
  1995. };
  1996. union pplib_power_state {
  1997. struct _ATOM_PPLIB_STATE v1;
  1998. struct _ATOM_PPLIB_STATE_V2 v2;
  1999. };
  2000. static void kv_patch_boot_state(struct radeon_device *rdev,
  2001. struct kv_ps *ps)
  2002. {
  2003. struct kv_power_info *pi = kv_get_pi(rdev);
  2004. ps->num_levels = 1;
  2005. ps->levels[0] = pi->boot_pl;
  2006. }
  2007. static void kv_parse_pplib_non_clock_info(struct radeon_device *rdev,
  2008. struct radeon_ps *rps,
  2009. struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
  2010. u8 table_rev)
  2011. {
  2012. struct kv_ps *ps = kv_get_ps(rps);
  2013. rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
  2014. rps->class = le16_to_cpu(non_clock_info->usClassification);
  2015. rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
  2016. if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
  2017. rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
  2018. rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
  2019. } else {
  2020. rps->vclk = 0;
  2021. rps->dclk = 0;
  2022. }
  2023. if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
  2024. rdev->pm.dpm.boot_ps = rps;
  2025. kv_patch_boot_state(rdev, ps);
  2026. }
  2027. if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
  2028. rdev->pm.dpm.uvd_ps = rps;
  2029. }
  2030. static void kv_parse_pplib_clock_info(struct radeon_device *rdev,
  2031. struct radeon_ps *rps, int index,
  2032. union pplib_clock_info *clock_info)
  2033. {
  2034. struct kv_power_info *pi = kv_get_pi(rdev);
  2035. struct kv_ps *ps = kv_get_ps(rps);
  2036. struct kv_pl *pl = &ps->levels[index];
  2037. u32 sclk;
  2038. sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow);
  2039. sclk |= clock_info->sumo.ucEngineClockHigh << 16;
  2040. pl->sclk = sclk;
  2041. pl->vddc_index = clock_info->sumo.vddcIndex;
  2042. ps->num_levels = index + 1;
  2043. if (pi->caps_sclk_ds) {
  2044. pl->ds_divider_index = 5;
  2045. pl->ss_divider_index = 5;
  2046. }
  2047. }
  2048. static int kv_parse_power_table(struct radeon_device *rdev)
  2049. {
  2050. struct radeon_mode_info *mode_info = &rdev->mode_info;
  2051. struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
  2052. union pplib_power_state *power_state;
  2053. int i, j, k, non_clock_array_index, clock_array_index;
  2054. union pplib_clock_info *clock_info;
  2055. struct _StateArray *state_array;
  2056. struct _ClockInfoArray *clock_info_array;
  2057. struct _NonClockInfoArray *non_clock_info_array;
  2058. union power_info *power_info;
  2059. int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
  2060. u16 data_offset;
  2061. u8 frev, crev;
  2062. u8 *power_state_offset;
  2063. struct kv_ps *ps;
  2064. if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
  2065. &frev, &crev, &data_offset))
  2066. return -EINVAL;
  2067. power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
  2068. state_array = (struct _StateArray *)
  2069. (mode_info->atom_context->bios + data_offset +
  2070. le16_to_cpu(power_info->pplib.usStateArrayOffset));
  2071. clock_info_array = (struct _ClockInfoArray *)
  2072. (mode_info->atom_context->bios + data_offset +
  2073. le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
  2074. non_clock_info_array = (struct _NonClockInfoArray *)
  2075. (mode_info->atom_context->bios + data_offset +
  2076. le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
  2077. rdev->pm.dpm.ps = kzalloc(sizeof(struct radeon_ps) *
  2078. state_array->ucNumEntries, GFP_KERNEL);
  2079. if (!rdev->pm.dpm.ps)
  2080. return -ENOMEM;
  2081. power_state_offset = (u8 *)state_array->states;
  2082. rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
  2083. rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
  2084. rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
  2085. for (i = 0; i < state_array->ucNumEntries; i++) {
  2086. u8 *idx;
  2087. power_state = (union pplib_power_state *)power_state_offset;
  2088. non_clock_array_index = power_state->v2.nonClockInfoIndex;
  2089. non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
  2090. &non_clock_info_array->nonClockInfo[non_clock_array_index];
  2091. if (!rdev->pm.power_state[i].clock_info)
  2092. return -EINVAL;
  2093. ps = kzalloc(sizeof(struct kv_ps), GFP_KERNEL);
  2094. if (ps == NULL) {
  2095. kfree(rdev->pm.dpm.ps);
  2096. return -ENOMEM;
  2097. }
  2098. rdev->pm.dpm.ps[i].ps_priv = ps;
  2099. k = 0;
  2100. idx = (u8 *)&power_state->v2.clockInfoIndex[0];
  2101. for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
  2102. clock_array_index = idx[j];
  2103. if (clock_array_index >= clock_info_array->ucNumEntries)
  2104. continue;
  2105. if (k >= SUMO_MAX_HARDWARE_POWERLEVELS)
  2106. break;
  2107. clock_info = (union pplib_clock_info *)
  2108. ((u8 *)&clock_info_array->clockInfo[0] +
  2109. (clock_array_index * clock_info_array->ucEntrySize));
  2110. kv_parse_pplib_clock_info(rdev,
  2111. &rdev->pm.dpm.ps[i], k,
  2112. clock_info);
  2113. k++;
  2114. }
  2115. kv_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
  2116. non_clock_info,
  2117. non_clock_info_array->ucEntrySize);
  2118. power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
  2119. }
  2120. rdev->pm.dpm.num_ps = state_array->ucNumEntries;
  2121. return 0;
  2122. }
  2123. int kv_dpm_init(struct radeon_device *rdev)
  2124. {
  2125. struct kv_power_info *pi;
  2126. int ret, i;
  2127. pi = kzalloc(sizeof(struct kv_power_info), GFP_KERNEL);
  2128. if (pi == NULL)
  2129. return -ENOMEM;
  2130. rdev->pm.dpm.priv = pi;
  2131. ret = r600_parse_extended_power_table(rdev);
  2132. if (ret)
  2133. return ret;
  2134. for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++)
  2135. pi->at[i] = TRINITY_AT_DFLT;
  2136. pi->sram_end = SMC_RAM_END;
  2137. if (rdev->family == CHIP_KABINI)
  2138. pi->high_voltage_t = 4001;
  2139. pi->enable_nb_dpm = true;
  2140. pi->caps_power_containment = true;
  2141. pi->caps_cac = true;
  2142. pi->enable_didt = false;
  2143. if (pi->enable_didt) {
  2144. pi->caps_sq_ramping = true;
  2145. pi->caps_db_ramping = true;
  2146. pi->caps_td_ramping = true;
  2147. pi->caps_tcp_ramping = true;
  2148. }
  2149. pi->caps_sclk_ds = true;
  2150. pi->enable_auto_thermal_throttling = true;
  2151. pi->disable_nb_ps3_in_battery = false;
  2152. pi->bapm_enable = true;
  2153. pi->voltage_drop_t = 0;
  2154. pi->caps_sclk_throttle_low_notification = false;
  2155. pi->caps_fps = false; /* true? */
  2156. pi->caps_uvd_pg = true;
  2157. pi->caps_uvd_dpm = true;
  2158. pi->caps_vce_pg = false;
  2159. pi->caps_samu_pg = false;
  2160. pi->caps_acp_pg = false;
  2161. pi->caps_stable_p_state = false;
  2162. ret = kv_parse_sys_info_table(rdev);
  2163. if (ret)
  2164. return ret;
  2165. kv_patch_voltage_values(rdev);
  2166. kv_construct_boot_state(rdev);
  2167. ret = kv_parse_power_table(rdev);
  2168. if (ret)
  2169. return ret;
  2170. pi->enable_dpm = true;
  2171. return 0;
  2172. }
  2173. void kv_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
  2174. struct seq_file *m)
  2175. {
  2176. struct kv_power_info *pi = kv_get_pi(rdev);
  2177. u32 current_index =
  2178. (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) & CURR_SCLK_INDEX_MASK) >>
  2179. CURR_SCLK_INDEX_SHIFT;
  2180. u32 sclk, tmp;
  2181. u16 vddc;
  2182. if (current_index >= SMU__NUM_SCLK_DPM_STATE) {
  2183. seq_printf(m, "invalid dpm profile %d\n", current_index);
  2184. } else {
  2185. sclk = be32_to_cpu(pi->graphics_level[current_index].SclkFrequency);
  2186. tmp = (RREG32_SMC(SMU_VOLTAGE_STATUS) & SMU_VOLTAGE_CURRENT_LEVEL_MASK) >>
  2187. SMU_VOLTAGE_CURRENT_LEVEL_SHIFT;
  2188. vddc = kv_convert_8bit_index_to_voltage(rdev, (u16)tmp);
  2189. seq_printf(m, "power level %d sclk: %u vddc: %u\n",
  2190. current_index, sclk, vddc);
  2191. }
  2192. }
  2193. void kv_dpm_print_power_state(struct radeon_device *rdev,
  2194. struct radeon_ps *rps)
  2195. {
  2196. int i;
  2197. struct kv_ps *ps = kv_get_ps(rps);
  2198. r600_dpm_print_class_info(rps->class, rps->class2);
  2199. r600_dpm_print_cap_info(rps->caps);
  2200. printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
  2201. for (i = 0; i < ps->num_levels; i++) {
  2202. struct kv_pl *pl = &ps->levels[i];
  2203. printk("\t\tpower level %d sclk: %u vddc: %u\n",
  2204. i, pl->sclk,
  2205. kv_convert_8bit_index_to_voltage(rdev, pl->vddc_index));
  2206. }
  2207. r600_dpm_print_ps_status(rdev, rps);
  2208. }
  2209. void kv_dpm_fini(struct radeon_device *rdev)
  2210. {
  2211. int i;
  2212. for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
  2213. kfree(rdev->pm.dpm.ps[i].ps_priv);
  2214. }
  2215. kfree(rdev->pm.dpm.ps);
  2216. kfree(rdev->pm.dpm.priv);
  2217. r600_free_extended_power_table(rdev);
  2218. }
  2219. void kv_dpm_display_configuration_changed(struct radeon_device *rdev)
  2220. {
  2221. }
  2222. u32 kv_dpm_get_sclk(struct radeon_device *rdev, bool low)
  2223. {
  2224. struct kv_power_info *pi = kv_get_pi(rdev);
  2225. struct kv_ps *requested_state = kv_get_ps(&pi->requested_rps);
  2226. if (low)
  2227. return requested_state->levels[0].sclk;
  2228. else
  2229. return requested_state->levels[requested_state->num_levels - 1].sclk;
  2230. }
  2231. u32 kv_dpm_get_mclk(struct radeon_device *rdev, bool low)
  2232. {
  2233. struct kv_power_info *pi = kv_get_pi(rdev);
  2234. return pi->sys_info.bootup_uma_clk;
  2235. }