r600.c 127 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274
  1. /*
  2. * Copyright 2008 Advanced Micro Devices, Inc.
  3. * Copyright 2008 Red Hat Inc.
  4. * Copyright 2009 Jerome Glisse.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the "Software"),
  8. * to deal in the Software without restriction, including without limitation
  9. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10. * and/or sell copies of the Software, and to permit persons to whom the
  11. * Software is furnished to do so, subject to the following conditions:
  12. *
  13. * The above copyright notice and this permission notice shall be included in
  14. * all copies or substantial portions of the Software.
  15. *
  16. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22. * OTHER DEALINGS IN THE SOFTWARE.
  23. *
  24. * Authors: Dave Airlie
  25. * Alex Deucher
  26. * Jerome Glisse
  27. */
  28. #include <linux/slab.h>
  29. #include <linux/seq_file.h>
  30. #include <linux/firmware.h>
  31. #include <linux/platform_device.h>
  32. #include <linux/module.h>
  33. #include <drm/drmP.h>
  34. #include <drm/radeon_drm.h>
  35. #include "radeon.h"
  36. #include "radeon_asic.h"
  37. #include "radeon_mode.h"
  38. #include "r600d.h"
  39. #include "atom.h"
  40. #include "avivod.h"
  41. #define PFP_UCODE_SIZE 576
  42. #define PM4_UCODE_SIZE 1792
  43. #define RLC_UCODE_SIZE 768
  44. #define R700_PFP_UCODE_SIZE 848
  45. #define R700_PM4_UCODE_SIZE 1360
  46. #define R700_RLC_UCODE_SIZE 1024
  47. #define EVERGREEN_PFP_UCODE_SIZE 1120
  48. #define EVERGREEN_PM4_UCODE_SIZE 1376
  49. #define EVERGREEN_RLC_UCODE_SIZE 768
  50. #define CAYMAN_RLC_UCODE_SIZE 1024
  51. #define ARUBA_RLC_UCODE_SIZE 1536
  52. /* Firmware Names */
  53. MODULE_FIRMWARE("radeon/R600_pfp.bin");
  54. MODULE_FIRMWARE("radeon/R600_me.bin");
  55. MODULE_FIRMWARE("radeon/RV610_pfp.bin");
  56. MODULE_FIRMWARE("radeon/RV610_me.bin");
  57. MODULE_FIRMWARE("radeon/RV630_pfp.bin");
  58. MODULE_FIRMWARE("radeon/RV630_me.bin");
  59. MODULE_FIRMWARE("radeon/RV620_pfp.bin");
  60. MODULE_FIRMWARE("radeon/RV620_me.bin");
  61. MODULE_FIRMWARE("radeon/RV635_pfp.bin");
  62. MODULE_FIRMWARE("radeon/RV635_me.bin");
  63. MODULE_FIRMWARE("radeon/RV670_pfp.bin");
  64. MODULE_FIRMWARE("radeon/RV670_me.bin");
  65. MODULE_FIRMWARE("radeon/RS780_pfp.bin");
  66. MODULE_FIRMWARE("radeon/RS780_me.bin");
  67. MODULE_FIRMWARE("radeon/RV770_pfp.bin");
  68. MODULE_FIRMWARE("radeon/RV770_me.bin");
  69. MODULE_FIRMWARE("radeon/RV730_pfp.bin");
  70. MODULE_FIRMWARE("radeon/RV730_me.bin");
  71. MODULE_FIRMWARE("radeon/RV710_pfp.bin");
  72. MODULE_FIRMWARE("radeon/RV710_me.bin");
  73. MODULE_FIRMWARE("radeon/R600_rlc.bin");
  74. MODULE_FIRMWARE("radeon/R700_rlc.bin");
  75. MODULE_FIRMWARE("radeon/CEDAR_pfp.bin");
  76. MODULE_FIRMWARE("radeon/CEDAR_me.bin");
  77. MODULE_FIRMWARE("radeon/CEDAR_rlc.bin");
  78. MODULE_FIRMWARE("radeon/REDWOOD_pfp.bin");
  79. MODULE_FIRMWARE("radeon/REDWOOD_me.bin");
  80. MODULE_FIRMWARE("radeon/REDWOOD_rlc.bin");
  81. MODULE_FIRMWARE("radeon/JUNIPER_pfp.bin");
  82. MODULE_FIRMWARE("radeon/JUNIPER_me.bin");
  83. MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin");
  84. MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin");
  85. MODULE_FIRMWARE("radeon/CYPRESS_me.bin");
  86. MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin");
  87. MODULE_FIRMWARE("radeon/PALM_pfp.bin");
  88. MODULE_FIRMWARE("radeon/PALM_me.bin");
  89. MODULE_FIRMWARE("radeon/SUMO_rlc.bin");
  90. MODULE_FIRMWARE("radeon/SUMO_pfp.bin");
  91. MODULE_FIRMWARE("radeon/SUMO_me.bin");
  92. MODULE_FIRMWARE("radeon/SUMO2_pfp.bin");
  93. MODULE_FIRMWARE("radeon/SUMO2_me.bin");
  94. int r600_debugfs_mc_info_init(struct radeon_device *rdev);
  95. /* r600,rv610,rv630,rv620,rv635,rv670 */
  96. int r600_mc_wait_for_idle(struct radeon_device *rdev);
  97. static void r600_gpu_init(struct radeon_device *rdev);
  98. void r600_fini(struct radeon_device *rdev);
  99. void r600_irq_disable(struct radeon_device *rdev);
  100. static void r600_pcie_gen2_enable(struct radeon_device *rdev);
  101. /* get temperature in millidegrees */
  102. int rv6xx_get_temp(struct radeon_device *rdev)
  103. {
  104. u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >>
  105. ASIC_T_SHIFT;
  106. int actual_temp = temp & 0xff;
  107. if (temp & 0x100)
  108. actual_temp -= 256;
  109. return actual_temp * 1000;
  110. }
  111. void r600_pm_get_dynpm_state(struct radeon_device *rdev)
  112. {
  113. int i;
  114. rdev->pm.dynpm_can_upclock = true;
  115. rdev->pm.dynpm_can_downclock = true;
  116. /* power state array is low to high, default is first */
  117. if ((rdev->flags & RADEON_IS_IGP) || (rdev->family == CHIP_R600)) {
  118. int min_power_state_index = 0;
  119. if (rdev->pm.num_power_states > 2)
  120. min_power_state_index = 1;
  121. switch (rdev->pm.dynpm_planned_action) {
  122. case DYNPM_ACTION_MINIMUM:
  123. rdev->pm.requested_power_state_index = min_power_state_index;
  124. rdev->pm.requested_clock_mode_index = 0;
  125. rdev->pm.dynpm_can_downclock = false;
  126. break;
  127. case DYNPM_ACTION_DOWNCLOCK:
  128. if (rdev->pm.current_power_state_index == min_power_state_index) {
  129. rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
  130. rdev->pm.dynpm_can_downclock = false;
  131. } else {
  132. if (rdev->pm.active_crtc_count > 1) {
  133. for (i = 0; i < rdev->pm.num_power_states; i++) {
  134. if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
  135. continue;
  136. else if (i >= rdev->pm.current_power_state_index) {
  137. rdev->pm.requested_power_state_index =
  138. rdev->pm.current_power_state_index;
  139. break;
  140. } else {
  141. rdev->pm.requested_power_state_index = i;
  142. break;
  143. }
  144. }
  145. } else {
  146. if (rdev->pm.current_power_state_index == 0)
  147. rdev->pm.requested_power_state_index =
  148. rdev->pm.num_power_states - 1;
  149. else
  150. rdev->pm.requested_power_state_index =
  151. rdev->pm.current_power_state_index - 1;
  152. }
  153. }
  154. rdev->pm.requested_clock_mode_index = 0;
  155. /* don't use the power state if crtcs are active and no display flag is set */
  156. if ((rdev->pm.active_crtc_count > 0) &&
  157. (rdev->pm.power_state[rdev->pm.requested_power_state_index].
  158. clock_info[rdev->pm.requested_clock_mode_index].flags &
  159. RADEON_PM_MODE_NO_DISPLAY)) {
  160. rdev->pm.requested_power_state_index++;
  161. }
  162. break;
  163. case DYNPM_ACTION_UPCLOCK:
  164. if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) {
  165. rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
  166. rdev->pm.dynpm_can_upclock = false;
  167. } else {
  168. if (rdev->pm.active_crtc_count > 1) {
  169. for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) {
  170. if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
  171. continue;
  172. else if (i <= rdev->pm.current_power_state_index) {
  173. rdev->pm.requested_power_state_index =
  174. rdev->pm.current_power_state_index;
  175. break;
  176. } else {
  177. rdev->pm.requested_power_state_index = i;
  178. break;
  179. }
  180. }
  181. } else
  182. rdev->pm.requested_power_state_index =
  183. rdev->pm.current_power_state_index + 1;
  184. }
  185. rdev->pm.requested_clock_mode_index = 0;
  186. break;
  187. case DYNPM_ACTION_DEFAULT:
  188. rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
  189. rdev->pm.requested_clock_mode_index = 0;
  190. rdev->pm.dynpm_can_upclock = false;
  191. break;
  192. case DYNPM_ACTION_NONE:
  193. default:
  194. DRM_ERROR("Requested mode for not defined action\n");
  195. return;
  196. }
  197. } else {
  198. /* XXX select a power state based on AC/DC, single/dualhead, etc. */
  199. /* for now just select the first power state and switch between clock modes */
  200. /* power state array is low to high, default is first (0) */
  201. if (rdev->pm.active_crtc_count > 1) {
  202. rdev->pm.requested_power_state_index = -1;
  203. /* start at 1 as we don't want the default mode */
  204. for (i = 1; i < rdev->pm.num_power_states; i++) {
  205. if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
  206. continue;
  207. else if ((rdev->pm.power_state[i].type == POWER_STATE_TYPE_PERFORMANCE) ||
  208. (rdev->pm.power_state[i].type == POWER_STATE_TYPE_BATTERY)) {
  209. rdev->pm.requested_power_state_index = i;
  210. break;
  211. }
  212. }
  213. /* if nothing selected, grab the default state. */
  214. if (rdev->pm.requested_power_state_index == -1)
  215. rdev->pm.requested_power_state_index = 0;
  216. } else
  217. rdev->pm.requested_power_state_index = 1;
  218. switch (rdev->pm.dynpm_planned_action) {
  219. case DYNPM_ACTION_MINIMUM:
  220. rdev->pm.requested_clock_mode_index = 0;
  221. rdev->pm.dynpm_can_downclock = false;
  222. break;
  223. case DYNPM_ACTION_DOWNCLOCK:
  224. if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
  225. if (rdev->pm.current_clock_mode_index == 0) {
  226. rdev->pm.requested_clock_mode_index = 0;
  227. rdev->pm.dynpm_can_downclock = false;
  228. } else
  229. rdev->pm.requested_clock_mode_index =
  230. rdev->pm.current_clock_mode_index - 1;
  231. } else {
  232. rdev->pm.requested_clock_mode_index = 0;
  233. rdev->pm.dynpm_can_downclock = false;
  234. }
  235. /* don't use the power state if crtcs are active and no display flag is set */
  236. if ((rdev->pm.active_crtc_count > 0) &&
  237. (rdev->pm.power_state[rdev->pm.requested_power_state_index].
  238. clock_info[rdev->pm.requested_clock_mode_index].flags &
  239. RADEON_PM_MODE_NO_DISPLAY)) {
  240. rdev->pm.requested_clock_mode_index++;
  241. }
  242. break;
  243. case DYNPM_ACTION_UPCLOCK:
  244. if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
  245. if (rdev->pm.current_clock_mode_index ==
  246. (rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1)) {
  247. rdev->pm.requested_clock_mode_index = rdev->pm.current_clock_mode_index;
  248. rdev->pm.dynpm_can_upclock = false;
  249. } else
  250. rdev->pm.requested_clock_mode_index =
  251. rdev->pm.current_clock_mode_index + 1;
  252. } else {
  253. rdev->pm.requested_clock_mode_index =
  254. rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1;
  255. rdev->pm.dynpm_can_upclock = false;
  256. }
  257. break;
  258. case DYNPM_ACTION_DEFAULT:
  259. rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
  260. rdev->pm.requested_clock_mode_index = 0;
  261. rdev->pm.dynpm_can_upclock = false;
  262. break;
  263. case DYNPM_ACTION_NONE:
  264. default:
  265. DRM_ERROR("Requested mode for not defined action\n");
  266. return;
  267. }
  268. }
  269. DRM_DEBUG_DRIVER("Requested: e: %d m: %d p: %d\n",
  270. rdev->pm.power_state[rdev->pm.requested_power_state_index].
  271. clock_info[rdev->pm.requested_clock_mode_index].sclk,
  272. rdev->pm.power_state[rdev->pm.requested_power_state_index].
  273. clock_info[rdev->pm.requested_clock_mode_index].mclk,
  274. rdev->pm.power_state[rdev->pm.requested_power_state_index].
  275. pcie_lanes);
  276. }
  277. void rs780_pm_init_profile(struct radeon_device *rdev)
  278. {
  279. if (rdev->pm.num_power_states == 2) {
  280. /* default */
  281. rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
  282. rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
  283. rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
  284. rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
  285. /* low sh */
  286. rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
  287. rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
  288. rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
  289. rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
  290. /* mid sh */
  291. rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0;
  292. rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0;
  293. rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
  294. rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
  295. /* high sh */
  296. rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
  297. rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
  298. rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
  299. rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
  300. /* low mh */
  301. rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
  302. rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
  303. rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
  304. rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
  305. /* mid mh */
  306. rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0;
  307. rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0;
  308. rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
  309. rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
  310. /* high mh */
  311. rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
  312. rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 1;
  313. rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
  314. rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
  315. } else if (rdev->pm.num_power_states == 3) {
  316. /* default */
  317. rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
  318. rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
  319. rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
  320. rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
  321. /* low sh */
  322. rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
  323. rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
  324. rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
  325. rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
  326. /* mid sh */
  327. rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1;
  328. rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
  329. rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
  330. rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
  331. /* high sh */
  332. rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
  333. rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 2;
  334. rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
  335. rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
  336. /* low mh */
  337. rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 1;
  338. rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 1;
  339. rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
  340. rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
  341. /* mid mh */
  342. rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 1;
  343. rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 1;
  344. rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
  345. rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
  346. /* high mh */
  347. rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 1;
  348. rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
  349. rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
  350. rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
  351. } else {
  352. /* default */
  353. rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
  354. rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
  355. rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
  356. rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
  357. /* low sh */
  358. rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 2;
  359. rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 2;
  360. rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
  361. rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
  362. /* mid sh */
  363. rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 2;
  364. rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 2;
  365. rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
  366. rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
  367. /* high sh */
  368. rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 2;
  369. rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 3;
  370. rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
  371. rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
  372. /* low mh */
  373. rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
  374. rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
  375. rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
  376. rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
  377. /* mid mh */
  378. rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2;
  379. rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0;
  380. rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
  381. rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
  382. /* high mh */
  383. rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
  384. rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 3;
  385. rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
  386. rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
  387. }
  388. }
  389. void r600_pm_init_profile(struct radeon_device *rdev)
  390. {
  391. int idx;
  392. if (rdev->family == CHIP_R600) {
  393. /* XXX */
  394. /* default */
  395. rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
  396. rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
  397. rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
  398. rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
  399. /* low sh */
  400. rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
  401. rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
  402. rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
  403. rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
  404. /* mid sh */
  405. rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
  406. rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
  407. rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
  408. rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
  409. /* high sh */
  410. rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
  411. rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
  412. rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
  413. rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
  414. /* low mh */
  415. rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
  416. rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
  417. rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
  418. rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
  419. /* mid mh */
  420. rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
  421. rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
  422. rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
  423. rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
  424. /* high mh */
  425. rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
  426. rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
  427. rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
  428. rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
  429. } else {
  430. if (rdev->pm.num_power_states < 4) {
  431. /* default */
  432. rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
  433. rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
  434. rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
  435. rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
  436. /* low sh */
  437. rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
  438. rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
  439. rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
  440. rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
  441. /* mid sh */
  442. rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1;
  443. rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
  444. rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
  445. rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
  446. /* high sh */
  447. rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
  448. rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
  449. rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
  450. rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
  451. /* low mh */
  452. rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
  453. rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 2;
  454. rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
  455. rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
  456. /* low mh */
  457. rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2;
  458. rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 2;
  459. rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
  460. rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
  461. /* high mh */
  462. rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
  463. rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
  464. rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
  465. rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
  466. } else {
  467. /* default */
  468. rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
  469. rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
  470. rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
  471. rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
  472. /* low sh */
  473. if (rdev->flags & RADEON_IS_MOBILITY)
  474. idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
  475. else
  476. idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
  477. rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx;
  478. rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx;
  479. rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
  480. rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
  481. /* mid sh */
  482. rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx;
  483. rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx;
  484. rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
  485. rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
  486. /* high sh */
  487. idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
  488. rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx;
  489. rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx;
  490. rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
  491. rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
  492. /* low mh */
  493. if (rdev->flags & RADEON_IS_MOBILITY)
  494. idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
  495. else
  496. idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
  497. rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx;
  498. rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx;
  499. rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
  500. rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
  501. /* mid mh */
  502. rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx;
  503. rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx;
  504. rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
  505. rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
  506. /* high mh */
  507. idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
  508. rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx;
  509. rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx;
  510. rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
  511. rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
  512. }
  513. }
  514. }
  515. void r600_pm_misc(struct radeon_device *rdev)
  516. {
  517. int req_ps_idx = rdev->pm.requested_power_state_index;
  518. int req_cm_idx = rdev->pm.requested_clock_mode_index;
  519. struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx];
  520. struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
  521. if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
  522. /* 0xff01 is a flag rather then an actual voltage */
  523. if (voltage->voltage == 0xff01)
  524. return;
  525. if (voltage->voltage != rdev->pm.current_vddc) {
  526. radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
  527. rdev->pm.current_vddc = voltage->voltage;
  528. DRM_DEBUG_DRIVER("Setting: v: %d\n", voltage->voltage);
  529. }
  530. }
  531. }
  532. bool r600_gui_idle(struct radeon_device *rdev)
  533. {
  534. if (RREG32(GRBM_STATUS) & GUI_ACTIVE)
  535. return false;
  536. else
  537. return true;
  538. }
  539. /* hpd for digital panel detect/disconnect */
  540. bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
  541. {
  542. bool connected = false;
  543. if (ASIC_IS_DCE3(rdev)) {
  544. switch (hpd) {
  545. case RADEON_HPD_1:
  546. if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
  547. connected = true;
  548. break;
  549. case RADEON_HPD_2:
  550. if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
  551. connected = true;
  552. break;
  553. case RADEON_HPD_3:
  554. if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
  555. connected = true;
  556. break;
  557. case RADEON_HPD_4:
  558. if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
  559. connected = true;
  560. break;
  561. /* DCE 3.2 */
  562. case RADEON_HPD_5:
  563. if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
  564. connected = true;
  565. break;
  566. case RADEON_HPD_6:
  567. if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
  568. connected = true;
  569. break;
  570. default:
  571. break;
  572. }
  573. } else {
  574. switch (hpd) {
  575. case RADEON_HPD_1:
  576. if (RREG32(DC_HOT_PLUG_DETECT1_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
  577. connected = true;
  578. break;
  579. case RADEON_HPD_2:
  580. if (RREG32(DC_HOT_PLUG_DETECT2_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
  581. connected = true;
  582. break;
  583. case RADEON_HPD_3:
  584. if (RREG32(DC_HOT_PLUG_DETECT3_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
  585. connected = true;
  586. break;
  587. default:
  588. break;
  589. }
  590. }
  591. return connected;
  592. }
  593. void r600_hpd_set_polarity(struct radeon_device *rdev,
  594. enum radeon_hpd_id hpd)
  595. {
  596. u32 tmp;
  597. bool connected = r600_hpd_sense(rdev, hpd);
  598. if (ASIC_IS_DCE3(rdev)) {
  599. switch (hpd) {
  600. case RADEON_HPD_1:
  601. tmp = RREG32(DC_HPD1_INT_CONTROL);
  602. if (connected)
  603. tmp &= ~DC_HPDx_INT_POLARITY;
  604. else
  605. tmp |= DC_HPDx_INT_POLARITY;
  606. WREG32(DC_HPD1_INT_CONTROL, tmp);
  607. break;
  608. case RADEON_HPD_2:
  609. tmp = RREG32(DC_HPD2_INT_CONTROL);
  610. if (connected)
  611. tmp &= ~DC_HPDx_INT_POLARITY;
  612. else
  613. tmp |= DC_HPDx_INT_POLARITY;
  614. WREG32(DC_HPD2_INT_CONTROL, tmp);
  615. break;
  616. case RADEON_HPD_3:
  617. tmp = RREG32(DC_HPD3_INT_CONTROL);
  618. if (connected)
  619. tmp &= ~DC_HPDx_INT_POLARITY;
  620. else
  621. tmp |= DC_HPDx_INT_POLARITY;
  622. WREG32(DC_HPD3_INT_CONTROL, tmp);
  623. break;
  624. case RADEON_HPD_4:
  625. tmp = RREG32(DC_HPD4_INT_CONTROL);
  626. if (connected)
  627. tmp &= ~DC_HPDx_INT_POLARITY;
  628. else
  629. tmp |= DC_HPDx_INT_POLARITY;
  630. WREG32(DC_HPD4_INT_CONTROL, tmp);
  631. break;
  632. case RADEON_HPD_5:
  633. tmp = RREG32(DC_HPD5_INT_CONTROL);
  634. if (connected)
  635. tmp &= ~DC_HPDx_INT_POLARITY;
  636. else
  637. tmp |= DC_HPDx_INT_POLARITY;
  638. WREG32(DC_HPD5_INT_CONTROL, tmp);
  639. break;
  640. /* DCE 3.2 */
  641. case RADEON_HPD_6:
  642. tmp = RREG32(DC_HPD6_INT_CONTROL);
  643. if (connected)
  644. tmp &= ~DC_HPDx_INT_POLARITY;
  645. else
  646. tmp |= DC_HPDx_INT_POLARITY;
  647. WREG32(DC_HPD6_INT_CONTROL, tmp);
  648. break;
  649. default:
  650. break;
  651. }
  652. } else {
  653. switch (hpd) {
  654. case RADEON_HPD_1:
  655. tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
  656. if (connected)
  657. tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
  658. else
  659. tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
  660. WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
  661. break;
  662. case RADEON_HPD_2:
  663. tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
  664. if (connected)
  665. tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
  666. else
  667. tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
  668. WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
  669. break;
  670. case RADEON_HPD_3:
  671. tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
  672. if (connected)
  673. tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
  674. else
  675. tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
  676. WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
  677. break;
  678. default:
  679. break;
  680. }
  681. }
  682. }
  683. void r600_hpd_init(struct radeon_device *rdev)
  684. {
  685. struct drm_device *dev = rdev->ddev;
  686. struct drm_connector *connector;
  687. unsigned enable = 0;
  688. list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
  689. struct radeon_connector *radeon_connector = to_radeon_connector(connector);
  690. if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
  691. connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
  692. /* don't try to enable hpd on eDP or LVDS avoid breaking the
  693. * aux dp channel on imac and help (but not completely fix)
  694. * https://bugzilla.redhat.com/show_bug.cgi?id=726143
  695. */
  696. continue;
  697. }
  698. if (ASIC_IS_DCE3(rdev)) {
  699. u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa);
  700. if (ASIC_IS_DCE32(rdev))
  701. tmp |= DC_HPDx_EN;
  702. switch (radeon_connector->hpd.hpd) {
  703. case RADEON_HPD_1:
  704. WREG32(DC_HPD1_CONTROL, tmp);
  705. break;
  706. case RADEON_HPD_2:
  707. WREG32(DC_HPD2_CONTROL, tmp);
  708. break;
  709. case RADEON_HPD_3:
  710. WREG32(DC_HPD3_CONTROL, tmp);
  711. break;
  712. case RADEON_HPD_4:
  713. WREG32(DC_HPD4_CONTROL, tmp);
  714. break;
  715. /* DCE 3.2 */
  716. case RADEON_HPD_5:
  717. WREG32(DC_HPD5_CONTROL, tmp);
  718. break;
  719. case RADEON_HPD_6:
  720. WREG32(DC_HPD6_CONTROL, tmp);
  721. break;
  722. default:
  723. break;
  724. }
  725. } else {
  726. switch (radeon_connector->hpd.hpd) {
  727. case RADEON_HPD_1:
  728. WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN);
  729. break;
  730. case RADEON_HPD_2:
  731. WREG32(DC_HOT_PLUG_DETECT2_CONTROL, DC_HOT_PLUG_DETECTx_EN);
  732. break;
  733. case RADEON_HPD_3:
  734. WREG32(DC_HOT_PLUG_DETECT3_CONTROL, DC_HOT_PLUG_DETECTx_EN);
  735. break;
  736. default:
  737. break;
  738. }
  739. }
  740. enable |= 1 << radeon_connector->hpd.hpd;
  741. radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
  742. }
  743. radeon_irq_kms_enable_hpd(rdev, enable);
  744. }
  745. void r600_hpd_fini(struct radeon_device *rdev)
  746. {
  747. struct drm_device *dev = rdev->ddev;
  748. struct drm_connector *connector;
  749. unsigned disable = 0;
  750. list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
  751. struct radeon_connector *radeon_connector = to_radeon_connector(connector);
  752. if (ASIC_IS_DCE3(rdev)) {
  753. switch (radeon_connector->hpd.hpd) {
  754. case RADEON_HPD_1:
  755. WREG32(DC_HPD1_CONTROL, 0);
  756. break;
  757. case RADEON_HPD_2:
  758. WREG32(DC_HPD2_CONTROL, 0);
  759. break;
  760. case RADEON_HPD_3:
  761. WREG32(DC_HPD3_CONTROL, 0);
  762. break;
  763. case RADEON_HPD_4:
  764. WREG32(DC_HPD4_CONTROL, 0);
  765. break;
  766. /* DCE 3.2 */
  767. case RADEON_HPD_5:
  768. WREG32(DC_HPD5_CONTROL, 0);
  769. break;
  770. case RADEON_HPD_6:
  771. WREG32(DC_HPD6_CONTROL, 0);
  772. break;
  773. default:
  774. break;
  775. }
  776. } else {
  777. switch (radeon_connector->hpd.hpd) {
  778. case RADEON_HPD_1:
  779. WREG32(DC_HOT_PLUG_DETECT1_CONTROL, 0);
  780. break;
  781. case RADEON_HPD_2:
  782. WREG32(DC_HOT_PLUG_DETECT2_CONTROL, 0);
  783. break;
  784. case RADEON_HPD_3:
  785. WREG32(DC_HOT_PLUG_DETECT3_CONTROL, 0);
  786. break;
  787. default:
  788. break;
  789. }
  790. }
  791. disable |= 1 << radeon_connector->hpd.hpd;
  792. }
  793. radeon_irq_kms_disable_hpd(rdev, disable);
  794. }
  795. /*
  796. * R600 PCIE GART
  797. */
  798. void r600_pcie_gart_tlb_flush(struct radeon_device *rdev)
  799. {
  800. unsigned i;
  801. u32 tmp;
  802. /* flush hdp cache so updates hit vram */
  803. if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
  804. !(rdev->flags & RADEON_IS_AGP)) {
  805. void __iomem *ptr = (void *)rdev->gart.ptr;
  806. u32 tmp;
  807. /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
  808. * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL
  809. * This seems to cause problems on some AGP cards. Just use the old
  810. * method for them.
  811. */
  812. WREG32(HDP_DEBUG1, 0);
  813. tmp = readl((void __iomem *)ptr);
  814. } else
  815. WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
  816. WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12);
  817. WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12);
  818. WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
  819. for (i = 0; i < rdev->usec_timeout; i++) {
  820. /* read MC_STATUS */
  821. tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE);
  822. tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
  823. if (tmp == 2) {
  824. printk(KERN_WARNING "[drm] r600 flush TLB failed\n");
  825. return;
  826. }
  827. if (tmp) {
  828. return;
  829. }
  830. udelay(1);
  831. }
  832. }
  833. int r600_pcie_gart_init(struct radeon_device *rdev)
  834. {
  835. int r;
  836. if (rdev->gart.robj) {
  837. WARN(1, "R600 PCIE GART already initialized\n");
  838. return 0;
  839. }
  840. /* Initialize common gart structure */
  841. r = radeon_gart_init(rdev);
  842. if (r)
  843. return r;
  844. rdev->gart.table_size = rdev->gart.num_gpu_pages * 8;
  845. return radeon_gart_table_vram_alloc(rdev);
  846. }
  847. static int r600_pcie_gart_enable(struct radeon_device *rdev)
  848. {
  849. u32 tmp;
  850. int r, i;
  851. if (rdev->gart.robj == NULL) {
  852. dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
  853. return -EINVAL;
  854. }
  855. r = radeon_gart_table_vram_pin(rdev);
  856. if (r)
  857. return r;
  858. radeon_gart_restore(rdev);
  859. /* Setup L2 cache */
  860. WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
  861. ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
  862. EFFECTIVE_L2_QUEUE_SIZE(7));
  863. WREG32(VM_L2_CNTL2, 0);
  864. WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
  865. /* Setup TLB control */
  866. tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
  867. SYSTEM_ACCESS_MODE_NOT_IN_SYS |
  868. EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
  869. ENABLE_WAIT_L2_QUERY;
  870. WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
  871. WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
  872. WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
  873. WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
  874. WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
  875. WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
  876. WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
  877. WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
  878. WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
  879. WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
  880. WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
  881. WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
  882. WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
  883. WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
  884. WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
  885. WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
  886. WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
  887. WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
  888. RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
  889. WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
  890. (u32)(rdev->dummy_page.addr >> 12));
  891. for (i = 1; i < 7; i++)
  892. WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
  893. r600_pcie_gart_tlb_flush(rdev);
  894. DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
  895. (unsigned)(rdev->mc.gtt_size >> 20),
  896. (unsigned long long)rdev->gart.table_addr);
  897. rdev->gart.ready = true;
  898. return 0;
  899. }
  900. static void r600_pcie_gart_disable(struct radeon_device *rdev)
  901. {
  902. u32 tmp;
  903. int i;
  904. /* Disable all tables */
  905. for (i = 0; i < 7; i++)
  906. WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
  907. /* Disable L2 cache */
  908. WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
  909. EFFECTIVE_L2_QUEUE_SIZE(7));
  910. WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
  911. /* Setup L1 TLB control */
  912. tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
  913. ENABLE_WAIT_L2_QUERY;
  914. WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
  915. WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
  916. WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
  917. WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
  918. WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
  919. WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
  920. WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
  921. WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
  922. WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp);
  923. WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp);
  924. WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
  925. WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
  926. WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp);
  927. WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
  928. radeon_gart_table_vram_unpin(rdev);
  929. }
  930. static void r600_pcie_gart_fini(struct radeon_device *rdev)
  931. {
  932. radeon_gart_fini(rdev);
  933. r600_pcie_gart_disable(rdev);
  934. radeon_gart_table_vram_free(rdev);
  935. }
  936. static void r600_agp_enable(struct radeon_device *rdev)
  937. {
  938. u32 tmp;
  939. int i;
  940. /* Setup L2 cache */
  941. WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
  942. ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
  943. EFFECTIVE_L2_QUEUE_SIZE(7));
  944. WREG32(VM_L2_CNTL2, 0);
  945. WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
  946. /* Setup TLB control */
  947. tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
  948. SYSTEM_ACCESS_MODE_NOT_IN_SYS |
  949. EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
  950. ENABLE_WAIT_L2_QUERY;
  951. WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
  952. WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
  953. WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
  954. WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
  955. WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
  956. WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
  957. WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
  958. WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
  959. WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
  960. WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
  961. WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
  962. WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
  963. WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
  964. WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
  965. for (i = 0; i < 7; i++)
  966. WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
  967. }
  968. int r600_mc_wait_for_idle(struct radeon_device *rdev)
  969. {
  970. unsigned i;
  971. u32 tmp;
  972. for (i = 0; i < rdev->usec_timeout; i++) {
  973. /* read MC_STATUS */
  974. tmp = RREG32(R_000E50_SRBM_STATUS) & 0x3F00;
  975. if (!tmp)
  976. return 0;
  977. udelay(1);
  978. }
  979. return -1;
  980. }
  981. static void r600_mc_program(struct radeon_device *rdev)
  982. {
  983. struct rv515_mc_save save;
  984. u32 tmp;
  985. int i, j;
  986. /* Initialize HDP */
  987. for (i = 0, j = 0; i < 32; i++, j += 0x18) {
  988. WREG32((0x2c14 + j), 0x00000000);
  989. WREG32((0x2c18 + j), 0x00000000);
  990. WREG32((0x2c1c + j), 0x00000000);
  991. WREG32((0x2c20 + j), 0x00000000);
  992. WREG32((0x2c24 + j), 0x00000000);
  993. }
  994. WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
  995. rv515_mc_stop(rdev, &save);
  996. if (r600_mc_wait_for_idle(rdev)) {
  997. dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
  998. }
  999. /* Lockout access through VGA aperture (doesn't exist before R600) */
  1000. WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
  1001. /* Update configuration */
  1002. if (rdev->flags & RADEON_IS_AGP) {
  1003. if (rdev->mc.vram_start < rdev->mc.gtt_start) {
  1004. /* VRAM before AGP */
  1005. WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
  1006. rdev->mc.vram_start >> 12);
  1007. WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
  1008. rdev->mc.gtt_end >> 12);
  1009. } else {
  1010. /* VRAM after AGP */
  1011. WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
  1012. rdev->mc.gtt_start >> 12);
  1013. WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
  1014. rdev->mc.vram_end >> 12);
  1015. }
  1016. } else {
  1017. WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12);
  1018. WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end >> 12);
  1019. }
  1020. WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12);
  1021. tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
  1022. tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
  1023. WREG32(MC_VM_FB_LOCATION, tmp);
  1024. WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
  1025. WREG32(HDP_NONSURFACE_INFO, (2 << 7));
  1026. WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
  1027. if (rdev->flags & RADEON_IS_AGP) {
  1028. WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22);
  1029. WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22);
  1030. WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
  1031. } else {
  1032. WREG32(MC_VM_AGP_BASE, 0);
  1033. WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
  1034. WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
  1035. }
  1036. if (r600_mc_wait_for_idle(rdev)) {
  1037. dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
  1038. }
  1039. rv515_mc_resume(rdev, &save);
  1040. /* we need to own VRAM, so turn off the VGA renderer here
  1041. * to stop it overwriting our objects */
  1042. rv515_vga_render_disable(rdev);
  1043. }
  1044. /**
  1045. * r600_vram_gtt_location - try to find VRAM & GTT location
  1046. * @rdev: radeon device structure holding all necessary informations
  1047. * @mc: memory controller structure holding memory informations
  1048. *
  1049. * Function will place try to place VRAM at same place as in CPU (PCI)
  1050. * address space as some GPU seems to have issue when we reprogram at
  1051. * different address space.
  1052. *
  1053. * If there is not enough space to fit the unvisible VRAM after the
  1054. * aperture then we limit the VRAM size to the aperture.
  1055. *
  1056. * If we are using AGP then place VRAM adjacent to AGP aperture are we need
  1057. * them to be in one from GPU point of view so that we can program GPU to
  1058. * catch access outside them (weird GPU policy see ??).
  1059. *
  1060. * This function will never fails, worst case are limiting VRAM or GTT.
  1061. *
  1062. * Note: GTT start, end, size should be initialized before calling this
  1063. * function on AGP platform.
  1064. */
  1065. static void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
  1066. {
  1067. u64 size_bf, size_af;
  1068. if (mc->mc_vram_size > 0xE0000000) {
  1069. /* leave room for at least 512M GTT */
  1070. dev_warn(rdev->dev, "limiting VRAM\n");
  1071. mc->real_vram_size = 0xE0000000;
  1072. mc->mc_vram_size = 0xE0000000;
  1073. }
  1074. if (rdev->flags & RADEON_IS_AGP) {
  1075. size_bf = mc->gtt_start;
  1076. size_af = 0xFFFFFFFF - mc->gtt_end;
  1077. if (size_bf > size_af) {
  1078. if (mc->mc_vram_size > size_bf) {
  1079. dev_warn(rdev->dev, "limiting VRAM\n");
  1080. mc->real_vram_size = size_bf;
  1081. mc->mc_vram_size = size_bf;
  1082. }
  1083. mc->vram_start = mc->gtt_start - mc->mc_vram_size;
  1084. } else {
  1085. if (mc->mc_vram_size > size_af) {
  1086. dev_warn(rdev->dev, "limiting VRAM\n");
  1087. mc->real_vram_size = size_af;
  1088. mc->mc_vram_size = size_af;
  1089. }
  1090. mc->vram_start = mc->gtt_end + 1;
  1091. }
  1092. mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
  1093. dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
  1094. mc->mc_vram_size >> 20, mc->vram_start,
  1095. mc->vram_end, mc->real_vram_size >> 20);
  1096. } else {
  1097. u64 base = 0;
  1098. if (rdev->flags & RADEON_IS_IGP) {
  1099. base = RREG32(MC_VM_FB_LOCATION) & 0xFFFF;
  1100. base <<= 24;
  1101. }
  1102. radeon_vram_location(rdev, &rdev->mc, base);
  1103. rdev->mc.gtt_base_align = 0;
  1104. radeon_gtt_location(rdev, mc);
  1105. }
  1106. }
  1107. static int r600_mc_init(struct radeon_device *rdev)
  1108. {
  1109. u32 tmp;
  1110. int chansize, numchan;
  1111. /* Get VRAM informations */
  1112. rdev->mc.vram_is_ddr = true;
  1113. tmp = RREG32(RAMCFG);
  1114. if (tmp & CHANSIZE_OVERRIDE) {
  1115. chansize = 16;
  1116. } else if (tmp & CHANSIZE_MASK) {
  1117. chansize = 64;
  1118. } else {
  1119. chansize = 32;
  1120. }
  1121. tmp = RREG32(CHMAP);
  1122. switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
  1123. case 0:
  1124. default:
  1125. numchan = 1;
  1126. break;
  1127. case 1:
  1128. numchan = 2;
  1129. break;
  1130. case 2:
  1131. numchan = 4;
  1132. break;
  1133. case 3:
  1134. numchan = 8;
  1135. break;
  1136. }
  1137. rdev->mc.vram_width = numchan * chansize;
  1138. /* Could aper size report 0 ? */
  1139. rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
  1140. rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
  1141. /* Setup GPU memory space */
  1142. rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
  1143. rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
  1144. rdev->mc.visible_vram_size = rdev->mc.aper_size;
  1145. r600_vram_gtt_location(rdev, &rdev->mc);
  1146. if (rdev->flags & RADEON_IS_IGP) {
  1147. rs690_pm_info(rdev);
  1148. rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
  1149. }
  1150. radeon_update_bandwidth_info(rdev);
  1151. return 0;
  1152. }
  1153. int r600_vram_scratch_init(struct radeon_device *rdev)
  1154. {
  1155. int r;
  1156. if (rdev->vram_scratch.robj == NULL) {
  1157. r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE,
  1158. PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
  1159. NULL, &rdev->vram_scratch.robj);
  1160. if (r) {
  1161. return r;
  1162. }
  1163. }
  1164. r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
  1165. if (unlikely(r != 0))
  1166. return r;
  1167. r = radeon_bo_pin(rdev->vram_scratch.robj,
  1168. RADEON_GEM_DOMAIN_VRAM, &rdev->vram_scratch.gpu_addr);
  1169. if (r) {
  1170. radeon_bo_unreserve(rdev->vram_scratch.robj);
  1171. return r;
  1172. }
  1173. r = radeon_bo_kmap(rdev->vram_scratch.robj,
  1174. (void **)&rdev->vram_scratch.ptr);
  1175. if (r)
  1176. radeon_bo_unpin(rdev->vram_scratch.robj);
  1177. radeon_bo_unreserve(rdev->vram_scratch.robj);
  1178. return r;
  1179. }
  1180. void r600_vram_scratch_fini(struct radeon_device *rdev)
  1181. {
  1182. int r;
  1183. if (rdev->vram_scratch.robj == NULL) {
  1184. return;
  1185. }
  1186. r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
  1187. if (likely(r == 0)) {
  1188. radeon_bo_kunmap(rdev->vram_scratch.robj);
  1189. radeon_bo_unpin(rdev->vram_scratch.robj);
  1190. radeon_bo_unreserve(rdev->vram_scratch.robj);
  1191. }
  1192. radeon_bo_unref(&rdev->vram_scratch.robj);
  1193. }
  1194. /* We doesn't check that the GPU really needs a reset we simply do the
  1195. * reset, it's up to the caller to determine if the GPU needs one. We
  1196. * might add an helper function to check that.
  1197. */
  1198. static int r600_gpu_soft_reset(struct radeon_device *rdev)
  1199. {
  1200. struct rv515_mc_save save;
  1201. u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) |
  1202. S_008010_VGT_BUSY(1) | S_008010_TA03_BUSY(1) |
  1203. S_008010_TC_BUSY(1) | S_008010_SX_BUSY(1) |
  1204. S_008010_SH_BUSY(1) | S_008010_SPI03_BUSY(1) |
  1205. S_008010_SMX_BUSY(1) | S_008010_SC_BUSY(1) |
  1206. S_008010_PA_BUSY(1) | S_008010_DB03_BUSY(1) |
  1207. S_008010_CR_BUSY(1) | S_008010_CB03_BUSY(1) |
  1208. S_008010_GUI_ACTIVE(1);
  1209. u32 grbm2_busy_mask = S_008014_SPI0_BUSY(1) | S_008014_SPI1_BUSY(1) |
  1210. S_008014_SPI2_BUSY(1) | S_008014_SPI3_BUSY(1) |
  1211. S_008014_TA0_BUSY(1) | S_008014_TA1_BUSY(1) |
  1212. S_008014_TA2_BUSY(1) | S_008014_TA3_BUSY(1) |
  1213. S_008014_DB0_BUSY(1) | S_008014_DB1_BUSY(1) |
  1214. S_008014_DB2_BUSY(1) | S_008014_DB3_BUSY(1) |
  1215. S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) |
  1216. S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1);
  1217. u32 tmp;
  1218. if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
  1219. return 0;
  1220. dev_info(rdev->dev, "GPU softreset \n");
  1221. dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n",
  1222. RREG32(R_008010_GRBM_STATUS));
  1223. dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n",
  1224. RREG32(R_008014_GRBM_STATUS2));
  1225. dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n",
  1226. RREG32(R_000E50_SRBM_STATUS));
  1227. dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
  1228. RREG32(CP_STALLED_STAT1));
  1229. dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
  1230. RREG32(CP_STALLED_STAT2));
  1231. dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
  1232. RREG32(CP_BUSY_STAT));
  1233. dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
  1234. RREG32(CP_STAT));
  1235. rv515_mc_stop(rdev, &save);
  1236. if (r600_mc_wait_for_idle(rdev)) {
  1237. dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
  1238. }
  1239. /* Disable CP parsing/prefetching */
  1240. WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
  1241. /* Check if any of the rendering block is busy and reset it */
  1242. if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) ||
  1243. (RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) {
  1244. tmp = S_008020_SOFT_RESET_CR(1) |
  1245. S_008020_SOFT_RESET_DB(1) |
  1246. S_008020_SOFT_RESET_CB(1) |
  1247. S_008020_SOFT_RESET_PA(1) |
  1248. S_008020_SOFT_RESET_SC(1) |
  1249. S_008020_SOFT_RESET_SMX(1) |
  1250. S_008020_SOFT_RESET_SPI(1) |
  1251. S_008020_SOFT_RESET_SX(1) |
  1252. S_008020_SOFT_RESET_SH(1) |
  1253. S_008020_SOFT_RESET_TC(1) |
  1254. S_008020_SOFT_RESET_TA(1) |
  1255. S_008020_SOFT_RESET_VC(1) |
  1256. S_008020_SOFT_RESET_VGT(1);
  1257. dev_info(rdev->dev, " R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
  1258. WREG32(R_008020_GRBM_SOFT_RESET, tmp);
  1259. RREG32(R_008020_GRBM_SOFT_RESET);
  1260. mdelay(15);
  1261. WREG32(R_008020_GRBM_SOFT_RESET, 0);
  1262. }
  1263. /* Reset CP (we always reset CP) */
  1264. tmp = S_008020_SOFT_RESET_CP(1);
  1265. dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
  1266. WREG32(R_008020_GRBM_SOFT_RESET, tmp);
  1267. RREG32(R_008020_GRBM_SOFT_RESET);
  1268. mdelay(15);
  1269. WREG32(R_008020_GRBM_SOFT_RESET, 0);
  1270. /* Wait a little for things to settle down */
  1271. mdelay(1);
  1272. dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n",
  1273. RREG32(R_008010_GRBM_STATUS));
  1274. dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n",
  1275. RREG32(R_008014_GRBM_STATUS2));
  1276. dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n",
  1277. RREG32(R_000E50_SRBM_STATUS));
  1278. dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
  1279. RREG32(CP_STALLED_STAT1));
  1280. dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
  1281. RREG32(CP_STALLED_STAT2));
  1282. dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
  1283. RREG32(CP_BUSY_STAT));
  1284. dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
  1285. RREG32(CP_STAT));
  1286. rv515_mc_resume(rdev, &save);
  1287. return 0;
  1288. }
  1289. bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
  1290. {
  1291. u32 srbm_status;
  1292. u32 grbm_status;
  1293. u32 grbm_status2;
  1294. srbm_status = RREG32(R_000E50_SRBM_STATUS);
  1295. grbm_status = RREG32(R_008010_GRBM_STATUS);
  1296. grbm_status2 = RREG32(R_008014_GRBM_STATUS2);
  1297. if (!G_008010_GUI_ACTIVE(grbm_status)) {
  1298. radeon_ring_lockup_update(ring);
  1299. return false;
  1300. }
  1301. /* force CP activities */
  1302. radeon_ring_force_activity(rdev, ring);
  1303. return radeon_ring_test_lockup(rdev, ring);
  1304. }
  1305. /**
  1306. * r600_dma_is_lockup - Check if the DMA engine is locked up
  1307. *
  1308. * @rdev: radeon_device pointer
  1309. * @ring: radeon_ring structure holding ring information
  1310. *
  1311. * Check if the async DMA engine is locked up (r6xx-evergreen).
  1312. * Returns true if the engine appears to be locked up, false if not.
  1313. */
  1314. bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
  1315. {
  1316. u32 dma_status_reg;
  1317. dma_status_reg = RREG32(DMA_STATUS_REG);
  1318. if (dma_status_reg & DMA_IDLE) {
  1319. radeon_ring_lockup_update(ring);
  1320. return false;
  1321. }
  1322. /* force ring activities */
  1323. radeon_ring_force_activity(rdev, ring);
  1324. return radeon_ring_test_lockup(rdev, ring);
  1325. }
  1326. int r600_asic_reset(struct radeon_device *rdev)
  1327. {
  1328. return r600_gpu_soft_reset(rdev);
  1329. }
  1330. u32 r6xx_remap_render_backend(struct radeon_device *rdev,
  1331. u32 tiling_pipe_num,
  1332. u32 max_rb_num,
  1333. u32 total_max_rb_num,
  1334. u32 disabled_rb_mask)
  1335. {
  1336. u32 rendering_pipe_num, rb_num_width, req_rb_num;
  1337. u32 pipe_rb_ratio, pipe_rb_remain;
  1338. u32 data = 0, mask = 1 << (max_rb_num - 1);
  1339. unsigned i, j;
  1340. /* mask out the RBs that don't exist on that asic */
  1341. disabled_rb_mask |= (0xff << max_rb_num) & 0xff;
  1342. rendering_pipe_num = 1 << tiling_pipe_num;
  1343. req_rb_num = total_max_rb_num - r600_count_pipe_bits(disabled_rb_mask);
  1344. BUG_ON(rendering_pipe_num < req_rb_num);
  1345. pipe_rb_ratio = rendering_pipe_num / req_rb_num;
  1346. pipe_rb_remain = rendering_pipe_num - pipe_rb_ratio * req_rb_num;
  1347. if (rdev->family <= CHIP_RV740) {
  1348. /* r6xx/r7xx */
  1349. rb_num_width = 2;
  1350. } else {
  1351. /* eg+ */
  1352. rb_num_width = 4;
  1353. }
  1354. for (i = 0; i < max_rb_num; i++) {
  1355. if (!(mask & disabled_rb_mask)) {
  1356. for (j = 0; j < pipe_rb_ratio; j++) {
  1357. data <<= rb_num_width;
  1358. data |= max_rb_num - i - 1;
  1359. }
  1360. if (pipe_rb_remain) {
  1361. data <<= rb_num_width;
  1362. data |= max_rb_num - i - 1;
  1363. pipe_rb_remain--;
  1364. }
  1365. }
  1366. mask >>= 1;
  1367. }
  1368. return data;
  1369. }
  1370. int r600_count_pipe_bits(uint32_t val)
  1371. {
  1372. return hweight32(val);
  1373. }
  1374. static void r600_gpu_init(struct radeon_device *rdev)
  1375. {
  1376. u32 tiling_config;
  1377. u32 ramcfg;
  1378. u32 cc_rb_backend_disable;
  1379. u32 cc_gc_shader_pipe_config;
  1380. u32 tmp;
  1381. int i, j;
  1382. u32 sq_config;
  1383. u32 sq_gpr_resource_mgmt_1 = 0;
  1384. u32 sq_gpr_resource_mgmt_2 = 0;
  1385. u32 sq_thread_resource_mgmt = 0;
  1386. u32 sq_stack_resource_mgmt_1 = 0;
  1387. u32 sq_stack_resource_mgmt_2 = 0;
  1388. u32 disabled_rb_mask;
  1389. rdev->config.r600.tiling_group_size = 256;
  1390. switch (rdev->family) {
  1391. case CHIP_R600:
  1392. rdev->config.r600.max_pipes = 4;
  1393. rdev->config.r600.max_tile_pipes = 8;
  1394. rdev->config.r600.max_simds = 4;
  1395. rdev->config.r600.max_backends = 4;
  1396. rdev->config.r600.max_gprs = 256;
  1397. rdev->config.r600.max_threads = 192;
  1398. rdev->config.r600.max_stack_entries = 256;
  1399. rdev->config.r600.max_hw_contexts = 8;
  1400. rdev->config.r600.max_gs_threads = 16;
  1401. rdev->config.r600.sx_max_export_size = 128;
  1402. rdev->config.r600.sx_max_export_pos_size = 16;
  1403. rdev->config.r600.sx_max_export_smx_size = 128;
  1404. rdev->config.r600.sq_num_cf_insts = 2;
  1405. break;
  1406. case CHIP_RV630:
  1407. case CHIP_RV635:
  1408. rdev->config.r600.max_pipes = 2;
  1409. rdev->config.r600.max_tile_pipes = 2;
  1410. rdev->config.r600.max_simds = 3;
  1411. rdev->config.r600.max_backends = 1;
  1412. rdev->config.r600.max_gprs = 128;
  1413. rdev->config.r600.max_threads = 192;
  1414. rdev->config.r600.max_stack_entries = 128;
  1415. rdev->config.r600.max_hw_contexts = 8;
  1416. rdev->config.r600.max_gs_threads = 4;
  1417. rdev->config.r600.sx_max_export_size = 128;
  1418. rdev->config.r600.sx_max_export_pos_size = 16;
  1419. rdev->config.r600.sx_max_export_smx_size = 128;
  1420. rdev->config.r600.sq_num_cf_insts = 2;
  1421. break;
  1422. case CHIP_RV610:
  1423. case CHIP_RV620:
  1424. case CHIP_RS780:
  1425. case CHIP_RS880:
  1426. rdev->config.r600.max_pipes = 1;
  1427. rdev->config.r600.max_tile_pipes = 1;
  1428. rdev->config.r600.max_simds = 2;
  1429. rdev->config.r600.max_backends = 1;
  1430. rdev->config.r600.max_gprs = 128;
  1431. rdev->config.r600.max_threads = 192;
  1432. rdev->config.r600.max_stack_entries = 128;
  1433. rdev->config.r600.max_hw_contexts = 4;
  1434. rdev->config.r600.max_gs_threads = 4;
  1435. rdev->config.r600.sx_max_export_size = 128;
  1436. rdev->config.r600.sx_max_export_pos_size = 16;
  1437. rdev->config.r600.sx_max_export_smx_size = 128;
  1438. rdev->config.r600.sq_num_cf_insts = 1;
  1439. break;
  1440. case CHIP_RV670:
  1441. rdev->config.r600.max_pipes = 4;
  1442. rdev->config.r600.max_tile_pipes = 4;
  1443. rdev->config.r600.max_simds = 4;
  1444. rdev->config.r600.max_backends = 4;
  1445. rdev->config.r600.max_gprs = 192;
  1446. rdev->config.r600.max_threads = 192;
  1447. rdev->config.r600.max_stack_entries = 256;
  1448. rdev->config.r600.max_hw_contexts = 8;
  1449. rdev->config.r600.max_gs_threads = 16;
  1450. rdev->config.r600.sx_max_export_size = 128;
  1451. rdev->config.r600.sx_max_export_pos_size = 16;
  1452. rdev->config.r600.sx_max_export_smx_size = 128;
  1453. rdev->config.r600.sq_num_cf_insts = 2;
  1454. break;
  1455. default:
  1456. break;
  1457. }
  1458. /* Initialize HDP */
  1459. for (i = 0, j = 0; i < 32; i++, j += 0x18) {
  1460. WREG32((0x2c14 + j), 0x00000000);
  1461. WREG32((0x2c18 + j), 0x00000000);
  1462. WREG32((0x2c1c + j), 0x00000000);
  1463. WREG32((0x2c20 + j), 0x00000000);
  1464. WREG32((0x2c24 + j), 0x00000000);
  1465. }
  1466. WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
  1467. /* Setup tiling */
  1468. tiling_config = 0;
  1469. ramcfg = RREG32(RAMCFG);
  1470. switch (rdev->config.r600.max_tile_pipes) {
  1471. case 1:
  1472. tiling_config |= PIPE_TILING(0);
  1473. break;
  1474. case 2:
  1475. tiling_config |= PIPE_TILING(1);
  1476. break;
  1477. case 4:
  1478. tiling_config |= PIPE_TILING(2);
  1479. break;
  1480. case 8:
  1481. tiling_config |= PIPE_TILING(3);
  1482. break;
  1483. default:
  1484. break;
  1485. }
  1486. rdev->config.r600.tiling_npipes = rdev->config.r600.max_tile_pipes;
  1487. rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
  1488. tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
  1489. tiling_config |= GROUP_SIZE((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
  1490. tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
  1491. if (tmp > 3) {
  1492. tiling_config |= ROW_TILING(3);
  1493. tiling_config |= SAMPLE_SPLIT(3);
  1494. } else {
  1495. tiling_config |= ROW_TILING(tmp);
  1496. tiling_config |= SAMPLE_SPLIT(tmp);
  1497. }
  1498. tiling_config |= BANK_SWAPS(1);
  1499. cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
  1500. tmp = R6XX_MAX_BACKENDS -
  1501. r600_count_pipe_bits((cc_rb_backend_disable >> 16) & R6XX_MAX_BACKENDS_MASK);
  1502. if (tmp < rdev->config.r600.max_backends) {
  1503. rdev->config.r600.max_backends = tmp;
  1504. }
  1505. cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0x00ffff00;
  1506. tmp = R6XX_MAX_PIPES -
  1507. r600_count_pipe_bits((cc_gc_shader_pipe_config >> 8) & R6XX_MAX_PIPES_MASK);
  1508. if (tmp < rdev->config.r600.max_pipes) {
  1509. rdev->config.r600.max_pipes = tmp;
  1510. }
  1511. tmp = R6XX_MAX_SIMDS -
  1512. r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R6XX_MAX_SIMDS_MASK);
  1513. if (tmp < rdev->config.r600.max_simds) {
  1514. rdev->config.r600.max_simds = tmp;
  1515. }
  1516. disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R6XX_MAX_BACKENDS_MASK;
  1517. tmp = (tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT;
  1518. tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.r600.max_backends,
  1519. R6XX_MAX_BACKENDS, disabled_rb_mask);
  1520. tiling_config |= tmp << 16;
  1521. rdev->config.r600.backend_map = tmp;
  1522. rdev->config.r600.tile_config = tiling_config;
  1523. WREG32(GB_TILING_CONFIG, tiling_config);
  1524. WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
  1525. WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
  1526. WREG32(DMA_TILING_CONFIG, tiling_config & 0xffff);
  1527. tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
  1528. WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
  1529. WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK);
  1530. /* Setup some CP states */
  1531. WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | ROQ_IB2_START(0x2b)));
  1532. WREG32(CP_MEQ_THRESHOLDS, (MEQ_END(0x40) | ROQ_END(0x40)));
  1533. WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO | SYNC_GRADIENT |
  1534. SYNC_WALKER | SYNC_ALIGNER));
  1535. /* Setup various GPU states */
  1536. if (rdev->family == CHIP_RV670)
  1537. WREG32(ARB_GDEC_RD_CNTL, 0x00000021);
  1538. tmp = RREG32(SX_DEBUG_1);
  1539. tmp |= SMX_EVENT_RELEASE;
  1540. if ((rdev->family > CHIP_R600))
  1541. tmp |= ENABLE_NEW_SMX_ADDRESS;
  1542. WREG32(SX_DEBUG_1, tmp);
  1543. if (((rdev->family) == CHIP_R600) ||
  1544. ((rdev->family) == CHIP_RV630) ||
  1545. ((rdev->family) == CHIP_RV610) ||
  1546. ((rdev->family) == CHIP_RV620) ||
  1547. ((rdev->family) == CHIP_RS780) ||
  1548. ((rdev->family) == CHIP_RS880)) {
  1549. WREG32(DB_DEBUG, PREZ_MUST_WAIT_FOR_POSTZ_DONE);
  1550. } else {
  1551. WREG32(DB_DEBUG, 0);
  1552. }
  1553. WREG32(DB_WATERMARKS, (DEPTH_FREE(4) | DEPTH_CACHELINE_FREE(16) |
  1554. DEPTH_FLUSH(16) | DEPTH_PENDING_FREE(4)));
  1555. WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
  1556. WREG32(VGT_NUM_INSTANCES, 0);
  1557. WREG32(SPI_CONFIG_CNTL, GPR_WRITE_PRIORITY(0));
  1558. WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(0));
  1559. tmp = RREG32(SQ_MS_FIFO_SIZES);
  1560. if (((rdev->family) == CHIP_RV610) ||
  1561. ((rdev->family) == CHIP_RV620) ||
  1562. ((rdev->family) == CHIP_RS780) ||
  1563. ((rdev->family) == CHIP_RS880)) {
  1564. tmp = (CACHE_FIFO_SIZE(0xa) |
  1565. FETCH_FIFO_HIWATER(0xa) |
  1566. DONE_FIFO_HIWATER(0xe0) |
  1567. ALU_UPDATE_FIFO_HIWATER(0x8));
  1568. } else if (((rdev->family) == CHIP_R600) ||
  1569. ((rdev->family) == CHIP_RV630)) {
  1570. tmp &= ~DONE_FIFO_HIWATER(0xff);
  1571. tmp |= DONE_FIFO_HIWATER(0x4);
  1572. }
  1573. WREG32(SQ_MS_FIFO_SIZES, tmp);
  1574. /* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT
  1575. * should be adjusted as needed by the 2D/3D drivers. This just sets default values
  1576. */
  1577. sq_config = RREG32(SQ_CONFIG);
  1578. sq_config &= ~(PS_PRIO(3) |
  1579. VS_PRIO(3) |
  1580. GS_PRIO(3) |
  1581. ES_PRIO(3));
  1582. sq_config |= (DX9_CONSTS |
  1583. VC_ENABLE |
  1584. PS_PRIO(0) |
  1585. VS_PRIO(1) |
  1586. GS_PRIO(2) |
  1587. ES_PRIO(3));
  1588. if ((rdev->family) == CHIP_R600) {
  1589. sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(124) |
  1590. NUM_VS_GPRS(124) |
  1591. NUM_CLAUSE_TEMP_GPRS(4));
  1592. sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(0) |
  1593. NUM_ES_GPRS(0));
  1594. sq_thread_resource_mgmt = (NUM_PS_THREADS(136) |
  1595. NUM_VS_THREADS(48) |
  1596. NUM_GS_THREADS(4) |
  1597. NUM_ES_THREADS(4));
  1598. sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(128) |
  1599. NUM_VS_STACK_ENTRIES(128));
  1600. sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(0) |
  1601. NUM_ES_STACK_ENTRIES(0));
  1602. } else if (((rdev->family) == CHIP_RV610) ||
  1603. ((rdev->family) == CHIP_RV620) ||
  1604. ((rdev->family) == CHIP_RS780) ||
  1605. ((rdev->family) == CHIP_RS880)) {
  1606. /* no vertex cache */
  1607. sq_config &= ~VC_ENABLE;
  1608. sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
  1609. NUM_VS_GPRS(44) |
  1610. NUM_CLAUSE_TEMP_GPRS(2));
  1611. sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
  1612. NUM_ES_GPRS(17));
  1613. sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
  1614. NUM_VS_THREADS(78) |
  1615. NUM_GS_THREADS(4) |
  1616. NUM_ES_THREADS(31));
  1617. sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
  1618. NUM_VS_STACK_ENTRIES(40));
  1619. sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
  1620. NUM_ES_STACK_ENTRIES(16));
  1621. } else if (((rdev->family) == CHIP_RV630) ||
  1622. ((rdev->family) == CHIP_RV635)) {
  1623. sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
  1624. NUM_VS_GPRS(44) |
  1625. NUM_CLAUSE_TEMP_GPRS(2));
  1626. sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(18) |
  1627. NUM_ES_GPRS(18));
  1628. sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
  1629. NUM_VS_THREADS(78) |
  1630. NUM_GS_THREADS(4) |
  1631. NUM_ES_THREADS(31));
  1632. sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
  1633. NUM_VS_STACK_ENTRIES(40));
  1634. sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
  1635. NUM_ES_STACK_ENTRIES(16));
  1636. } else if ((rdev->family) == CHIP_RV670) {
  1637. sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
  1638. NUM_VS_GPRS(44) |
  1639. NUM_CLAUSE_TEMP_GPRS(2));
  1640. sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
  1641. NUM_ES_GPRS(17));
  1642. sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
  1643. NUM_VS_THREADS(78) |
  1644. NUM_GS_THREADS(4) |
  1645. NUM_ES_THREADS(31));
  1646. sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(64) |
  1647. NUM_VS_STACK_ENTRIES(64));
  1648. sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(64) |
  1649. NUM_ES_STACK_ENTRIES(64));
  1650. }
  1651. WREG32(SQ_CONFIG, sq_config);
  1652. WREG32(SQ_GPR_RESOURCE_MGMT_1, sq_gpr_resource_mgmt_1);
  1653. WREG32(SQ_GPR_RESOURCE_MGMT_2, sq_gpr_resource_mgmt_2);
  1654. WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
  1655. WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
  1656. WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
  1657. if (((rdev->family) == CHIP_RV610) ||
  1658. ((rdev->family) == CHIP_RV620) ||
  1659. ((rdev->family) == CHIP_RS780) ||
  1660. ((rdev->family) == CHIP_RS880)) {
  1661. WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(TC_ONLY));
  1662. } else {
  1663. WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC));
  1664. }
  1665. /* More default values. 2D/3D driver should adjust as needed */
  1666. WREG32(PA_SC_AA_SAMPLE_LOCS_2S, (S0_X(0xc) | S0_Y(0x4) |
  1667. S1_X(0x4) | S1_Y(0xc)));
  1668. WREG32(PA_SC_AA_SAMPLE_LOCS_4S, (S0_X(0xe) | S0_Y(0xe) |
  1669. S1_X(0x2) | S1_Y(0x2) |
  1670. S2_X(0xa) | S2_Y(0x6) |
  1671. S3_X(0x6) | S3_Y(0xa)));
  1672. WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD0, (S0_X(0xe) | S0_Y(0xb) |
  1673. S1_X(0x4) | S1_Y(0xc) |
  1674. S2_X(0x1) | S2_Y(0x6) |
  1675. S3_X(0xa) | S3_Y(0xe)));
  1676. WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD1, (S4_X(0x6) | S4_Y(0x1) |
  1677. S5_X(0x0) | S5_Y(0x0) |
  1678. S6_X(0xb) | S6_Y(0x4) |
  1679. S7_X(0x7) | S7_Y(0x8)));
  1680. WREG32(VGT_STRMOUT_EN, 0);
  1681. tmp = rdev->config.r600.max_pipes * 16;
  1682. switch (rdev->family) {
  1683. case CHIP_RV610:
  1684. case CHIP_RV620:
  1685. case CHIP_RS780:
  1686. case CHIP_RS880:
  1687. tmp += 32;
  1688. break;
  1689. case CHIP_RV670:
  1690. tmp += 128;
  1691. break;
  1692. default:
  1693. break;
  1694. }
  1695. if (tmp > 256) {
  1696. tmp = 256;
  1697. }
  1698. WREG32(VGT_ES_PER_GS, 128);
  1699. WREG32(VGT_GS_PER_ES, tmp);
  1700. WREG32(VGT_GS_PER_VS, 2);
  1701. WREG32(VGT_GS_VERTEX_REUSE, 16);
  1702. /* more default values. 2D/3D driver should adjust as needed */
  1703. WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
  1704. WREG32(VGT_STRMOUT_EN, 0);
  1705. WREG32(SX_MISC, 0);
  1706. WREG32(PA_SC_MODE_CNTL, 0);
  1707. WREG32(PA_SC_AA_CONFIG, 0);
  1708. WREG32(PA_SC_LINE_STIPPLE, 0);
  1709. WREG32(SPI_INPUT_Z, 0);
  1710. WREG32(SPI_PS_IN_CONTROL_0, NUM_INTERP(2));
  1711. WREG32(CB_COLOR7_FRAG, 0);
  1712. /* Clear render buffer base addresses */
  1713. WREG32(CB_COLOR0_BASE, 0);
  1714. WREG32(CB_COLOR1_BASE, 0);
  1715. WREG32(CB_COLOR2_BASE, 0);
  1716. WREG32(CB_COLOR3_BASE, 0);
  1717. WREG32(CB_COLOR4_BASE, 0);
  1718. WREG32(CB_COLOR5_BASE, 0);
  1719. WREG32(CB_COLOR6_BASE, 0);
  1720. WREG32(CB_COLOR7_BASE, 0);
  1721. WREG32(CB_COLOR7_FRAG, 0);
  1722. switch (rdev->family) {
  1723. case CHIP_RV610:
  1724. case CHIP_RV620:
  1725. case CHIP_RS780:
  1726. case CHIP_RS880:
  1727. tmp = TC_L2_SIZE(8);
  1728. break;
  1729. case CHIP_RV630:
  1730. case CHIP_RV635:
  1731. tmp = TC_L2_SIZE(4);
  1732. break;
  1733. case CHIP_R600:
  1734. tmp = TC_L2_SIZE(0) | L2_DISABLE_LATE_HIT;
  1735. break;
  1736. default:
  1737. tmp = TC_L2_SIZE(0);
  1738. break;
  1739. }
  1740. WREG32(TC_CNTL, tmp);
  1741. tmp = RREG32(HDP_HOST_PATH_CNTL);
  1742. WREG32(HDP_HOST_PATH_CNTL, tmp);
  1743. tmp = RREG32(ARB_POP);
  1744. tmp |= ENABLE_TC128;
  1745. WREG32(ARB_POP, tmp);
  1746. WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
  1747. WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
  1748. NUM_CLIP_SEQ(3)));
  1749. WREG32(PA_SC_ENHANCE, FORCE_EOV_MAX_CLK_CNT(4095));
  1750. WREG32(VC_ENHANCE, 0);
  1751. }
  1752. /*
  1753. * Indirect registers accessor
  1754. */
  1755. u32 r600_pciep_rreg(struct radeon_device *rdev, u32 reg)
  1756. {
  1757. u32 r;
  1758. WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
  1759. (void)RREG32(PCIE_PORT_INDEX);
  1760. r = RREG32(PCIE_PORT_DATA);
  1761. return r;
  1762. }
  1763. void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
  1764. {
  1765. WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
  1766. (void)RREG32(PCIE_PORT_INDEX);
  1767. WREG32(PCIE_PORT_DATA, (v));
  1768. (void)RREG32(PCIE_PORT_DATA);
  1769. }
  1770. /*
  1771. * CP & Ring
  1772. */
  1773. void r600_cp_stop(struct radeon_device *rdev)
  1774. {
  1775. radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
  1776. WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
  1777. WREG32(SCRATCH_UMSK, 0);
  1778. rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
  1779. }
  1780. int r600_init_microcode(struct radeon_device *rdev)
  1781. {
  1782. struct platform_device *pdev;
  1783. const char *chip_name;
  1784. const char *rlc_chip_name;
  1785. size_t pfp_req_size, me_req_size, rlc_req_size;
  1786. char fw_name[30];
  1787. int err;
  1788. DRM_DEBUG("\n");
  1789. pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
  1790. err = IS_ERR(pdev);
  1791. if (err) {
  1792. printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
  1793. return -EINVAL;
  1794. }
  1795. switch (rdev->family) {
  1796. case CHIP_R600:
  1797. chip_name = "R600";
  1798. rlc_chip_name = "R600";
  1799. break;
  1800. case CHIP_RV610:
  1801. chip_name = "RV610";
  1802. rlc_chip_name = "R600";
  1803. break;
  1804. case CHIP_RV630:
  1805. chip_name = "RV630";
  1806. rlc_chip_name = "R600";
  1807. break;
  1808. case CHIP_RV620:
  1809. chip_name = "RV620";
  1810. rlc_chip_name = "R600";
  1811. break;
  1812. case CHIP_RV635:
  1813. chip_name = "RV635";
  1814. rlc_chip_name = "R600";
  1815. break;
  1816. case CHIP_RV670:
  1817. chip_name = "RV670";
  1818. rlc_chip_name = "R600";
  1819. break;
  1820. case CHIP_RS780:
  1821. case CHIP_RS880:
  1822. chip_name = "RS780";
  1823. rlc_chip_name = "R600";
  1824. break;
  1825. case CHIP_RV770:
  1826. chip_name = "RV770";
  1827. rlc_chip_name = "R700";
  1828. break;
  1829. case CHIP_RV730:
  1830. case CHIP_RV740:
  1831. chip_name = "RV730";
  1832. rlc_chip_name = "R700";
  1833. break;
  1834. case CHIP_RV710:
  1835. chip_name = "RV710";
  1836. rlc_chip_name = "R700";
  1837. break;
  1838. case CHIP_CEDAR:
  1839. chip_name = "CEDAR";
  1840. rlc_chip_name = "CEDAR";
  1841. break;
  1842. case CHIP_REDWOOD:
  1843. chip_name = "REDWOOD";
  1844. rlc_chip_name = "REDWOOD";
  1845. break;
  1846. case CHIP_JUNIPER:
  1847. chip_name = "JUNIPER";
  1848. rlc_chip_name = "JUNIPER";
  1849. break;
  1850. case CHIP_CYPRESS:
  1851. case CHIP_HEMLOCK:
  1852. chip_name = "CYPRESS";
  1853. rlc_chip_name = "CYPRESS";
  1854. break;
  1855. case CHIP_PALM:
  1856. chip_name = "PALM";
  1857. rlc_chip_name = "SUMO";
  1858. break;
  1859. case CHIP_SUMO:
  1860. chip_name = "SUMO";
  1861. rlc_chip_name = "SUMO";
  1862. break;
  1863. case CHIP_SUMO2:
  1864. chip_name = "SUMO2";
  1865. rlc_chip_name = "SUMO";
  1866. break;
  1867. default: BUG();
  1868. }
  1869. if (rdev->family >= CHIP_CEDAR) {
  1870. pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
  1871. me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
  1872. rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
  1873. } else if (rdev->family >= CHIP_RV770) {
  1874. pfp_req_size = R700_PFP_UCODE_SIZE * 4;
  1875. me_req_size = R700_PM4_UCODE_SIZE * 4;
  1876. rlc_req_size = R700_RLC_UCODE_SIZE * 4;
  1877. } else {
  1878. pfp_req_size = PFP_UCODE_SIZE * 4;
  1879. me_req_size = PM4_UCODE_SIZE * 12;
  1880. rlc_req_size = RLC_UCODE_SIZE * 4;
  1881. }
  1882. DRM_INFO("Loading %s Microcode\n", chip_name);
  1883. snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
  1884. err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev);
  1885. if (err)
  1886. goto out;
  1887. if (rdev->pfp_fw->size != pfp_req_size) {
  1888. printk(KERN_ERR
  1889. "r600_cp: Bogus length %zu in firmware \"%s\"\n",
  1890. rdev->pfp_fw->size, fw_name);
  1891. err = -EINVAL;
  1892. goto out;
  1893. }
  1894. snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
  1895. err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev);
  1896. if (err)
  1897. goto out;
  1898. if (rdev->me_fw->size != me_req_size) {
  1899. printk(KERN_ERR
  1900. "r600_cp: Bogus length %zu in firmware \"%s\"\n",
  1901. rdev->me_fw->size, fw_name);
  1902. err = -EINVAL;
  1903. }
  1904. snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
  1905. err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev);
  1906. if (err)
  1907. goto out;
  1908. if (rdev->rlc_fw->size != rlc_req_size) {
  1909. printk(KERN_ERR
  1910. "r600_rlc: Bogus length %zu in firmware \"%s\"\n",
  1911. rdev->rlc_fw->size, fw_name);
  1912. err = -EINVAL;
  1913. }
  1914. out:
  1915. platform_device_unregister(pdev);
  1916. if (err) {
  1917. if (err != -EINVAL)
  1918. printk(KERN_ERR
  1919. "r600_cp: Failed to load firmware \"%s\"\n",
  1920. fw_name);
  1921. release_firmware(rdev->pfp_fw);
  1922. rdev->pfp_fw = NULL;
  1923. release_firmware(rdev->me_fw);
  1924. rdev->me_fw = NULL;
  1925. release_firmware(rdev->rlc_fw);
  1926. rdev->rlc_fw = NULL;
  1927. }
  1928. return err;
  1929. }
  1930. static int r600_cp_load_microcode(struct radeon_device *rdev)
  1931. {
  1932. const __be32 *fw_data;
  1933. int i;
  1934. if (!rdev->me_fw || !rdev->pfp_fw)
  1935. return -EINVAL;
  1936. r600_cp_stop(rdev);
  1937. WREG32(CP_RB_CNTL,
  1938. #ifdef __BIG_ENDIAN
  1939. BUF_SWAP_32BIT |
  1940. #endif
  1941. RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
  1942. /* Reset cp */
  1943. WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
  1944. RREG32(GRBM_SOFT_RESET);
  1945. mdelay(15);
  1946. WREG32(GRBM_SOFT_RESET, 0);
  1947. WREG32(CP_ME_RAM_WADDR, 0);
  1948. fw_data = (const __be32 *)rdev->me_fw->data;
  1949. WREG32(CP_ME_RAM_WADDR, 0);
  1950. for (i = 0; i < PM4_UCODE_SIZE * 3; i++)
  1951. WREG32(CP_ME_RAM_DATA,
  1952. be32_to_cpup(fw_data++));
  1953. fw_data = (const __be32 *)rdev->pfp_fw->data;
  1954. WREG32(CP_PFP_UCODE_ADDR, 0);
  1955. for (i = 0; i < PFP_UCODE_SIZE; i++)
  1956. WREG32(CP_PFP_UCODE_DATA,
  1957. be32_to_cpup(fw_data++));
  1958. WREG32(CP_PFP_UCODE_ADDR, 0);
  1959. WREG32(CP_ME_RAM_WADDR, 0);
  1960. WREG32(CP_ME_RAM_RADDR, 0);
  1961. return 0;
  1962. }
  1963. int r600_cp_start(struct radeon_device *rdev)
  1964. {
  1965. struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
  1966. int r;
  1967. uint32_t cp_me;
  1968. r = radeon_ring_lock(rdev, ring, 7);
  1969. if (r) {
  1970. DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
  1971. return r;
  1972. }
  1973. radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
  1974. radeon_ring_write(ring, 0x1);
  1975. if (rdev->family >= CHIP_RV770) {
  1976. radeon_ring_write(ring, 0x0);
  1977. radeon_ring_write(ring, rdev->config.rv770.max_hw_contexts - 1);
  1978. } else {
  1979. radeon_ring_write(ring, 0x3);
  1980. radeon_ring_write(ring, rdev->config.r600.max_hw_contexts - 1);
  1981. }
  1982. radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
  1983. radeon_ring_write(ring, 0);
  1984. radeon_ring_write(ring, 0);
  1985. radeon_ring_unlock_commit(rdev, ring);
  1986. cp_me = 0xff;
  1987. WREG32(R_0086D8_CP_ME_CNTL, cp_me);
  1988. return 0;
  1989. }
  1990. int r600_cp_resume(struct radeon_device *rdev)
  1991. {
  1992. struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
  1993. u32 tmp;
  1994. u32 rb_bufsz;
  1995. int r;
  1996. /* Reset cp */
  1997. WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
  1998. RREG32(GRBM_SOFT_RESET);
  1999. mdelay(15);
  2000. WREG32(GRBM_SOFT_RESET, 0);
  2001. /* Set ring buffer size */
  2002. rb_bufsz = drm_order(ring->ring_size / 8);
  2003. tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
  2004. #ifdef __BIG_ENDIAN
  2005. tmp |= BUF_SWAP_32BIT;
  2006. #endif
  2007. WREG32(CP_RB_CNTL, tmp);
  2008. WREG32(CP_SEM_WAIT_TIMER, 0x0);
  2009. /* Set the write pointer delay */
  2010. WREG32(CP_RB_WPTR_DELAY, 0);
  2011. /* Initialize the ring buffer's read and write pointers */
  2012. WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
  2013. WREG32(CP_RB_RPTR_WR, 0);
  2014. ring->wptr = 0;
  2015. WREG32(CP_RB_WPTR, ring->wptr);
  2016. /* set the wb address whether it's enabled or not */
  2017. WREG32(CP_RB_RPTR_ADDR,
  2018. ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC));
  2019. WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
  2020. WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
  2021. if (rdev->wb.enabled)
  2022. WREG32(SCRATCH_UMSK, 0xff);
  2023. else {
  2024. tmp |= RB_NO_UPDATE;
  2025. WREG32(SCRATCH_UMSK, 0);
  2026. }
  2027. mdelay(1);
  2028. WREG32(CP_RB_CNTL, tmp);
  2029. WREG32(CP_RB_BASE, ring->gpu_addr >> 8);
  2030. WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
  2031. ring->rptr = RREG32(CP_RB_RPTR);
  2032. r600_cp_start(rdev);
  2033. ring->ready = true;
  2034. r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring);
  2035. if (r) {
  2036. ring->ready = false;
  2037. return r;
  2038. }
  2039. return 0;
  2040. }
  2041. void r600_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size)
  2042. {
  2043. u32 rb_bufsz;
  2044. int r;
  2045. /* Align ring size */
  2046. rb_bufsz = drm_order(ring_size / 8);
  2047. ring_size = (1 << (rb_bufsz + 1)) * 4;
  2048. ring->ring_size = ring_size;
  2049. ring->align_mask = 16 - 1;
  2050. if (radeon_ring_supports_scratch_reg(rdev, ring)) {
  2051. r = radeon_scratch_get(rdev, &ring->rptr_save_reg);
  2052. if (r) {
  2053. DRM_ERROR("failed to get scratch reg for rptr save (%d).\n", r);
  2054. ring->rptr_save_reg = 0;
  2055. }
  2056. }
  2057. }
  2058. void r600_cp_fini(struct radeon_device *rdev)
  2059. {
  2060. struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
  2061. r600_cp_stop(rdev);
  2062. radeon_ring_fini(rdev, ring);
  2063. radeon_scratch_free(rdev, ring->rptr_save_reg);
  2064. }
  2065. /*
  2066. * DMA
  2067. * Starting with R600, the GPU has an asynchronous
  2068. * DMA engine. The programming model is very similar
  2069. * to the 3D engine (ring buffer, IBs, etc.), but the
  2070. * DMA controller has it's own packet format that is
  2071. * different form the PM4 format used by the 3D engine.
  2072. * It supports copying data, writing embedded data,
  2073. * solid fills, and a number of other things. It also
  2074. * has support for tiling/detiling of buffers.
  2075. */
  2076. /**
  2077. * r600_dma_stop - stop the async dma engine
  2078. *
  2079. * @rdev: radeon_device pointer
  2080. *
  2081. * Stop the async dma engine (r6xx-evergreen).
  2082. */
  2083. void r600_dma_stop(struct radeon_device *rdev)
  2084. {
  2085. u32 rb_cntl = RREG32(DMA_RB_CNTL);
  2086. radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
  2087. rb_cntl &= ~DMA_RB_ENABLE;
  2088. WREG32(DMA_RB_CNTL, rb_cntl);
  2089. rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
  2090. }
  2091. /**
  2092. * r600_dma_resume - setup and start the async dma engine
  2093. *
  2094. * @rdev: radeon_device pointer
  2095. *
  2096. * Set up the DMA ring buffer and enable it. (r6xx-evergreen).
  2097. * Returns 0 for success, error for failure.
  2098. */
  2099. int r600_dma_resume(struct radeon_device *rdev)
  2100. {
  2101. struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
  2102. u32 rb_cntl, dma_cntl;
  2103. u32 rb_bufsz;
  2104. int r;
  2105. /* Reset dma */
  2106. if (rdev->family >= CHIP_RV770)
  2107. WREG32(SRBM_SOFT_RESET, RV770_SOFT_RESET_DMA);
  2108. else
  2109. WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA);
  2110. RREG32(SRBM_SOFT_RESET);
  2111. udelay(50);
  2112. WREG32(SRBM_SOFT_RESET, 0);
  2113. WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL, 0);
  2114. WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL, 0);
  2115. /* Set ring buffer size in dwords */
  2116. rb_bufsz = drm_order(ring->ring_size / 4);
  2117. rb_cntl = rb_bufsz << 1;
  2118. #ifdef __BIG_ENDIAN
  2119. rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
  2120. #endif
  2121. WREG32(DMA_RB_CNTL, rb_cntl);
  2122. /* Initialize the ring buffer's read and write pointers */
  2123. WREG32(DMA_RB_RPTR, 0);
  2124. WREG32(DMA_RB_WPTR, 0);
  2125. /* set the wb address whether it's enabled or not */
  2126. WREG32(DMA_RB_RPTR_ADDR_HI,
  2127. upper_32_bits(rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFF);
  2128. WREG32(DMA_RB_RPTR_ADDR_LO,
  2129. ((rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFFFFFFFC));
  2130. if (rdev->wb.enabled)
  2131. rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
  2132. WREG32(DMA_RB_BASE, ring->gpu_addr >> 8);
  2133. /* enable DMA IBs */
  2134. WREG32(DMA_IB_CNTL, DMA_IB_ENABLE);
  2135. dma_cntl = RREG32(DMA_CNTL);
  2136. dma_cntl &= ~CTXEMPTY_INT_ENABLE;
  2137. WREG32(DMA_CNTL, dma_cntl);
  2138. if (rdev->family >= CHIP_RV770)
  2139. WREG32(DMA_MODE, 1);
  2140. ring->wptr = 0;
  2141. WREG32(DMA_RB_WPTR, ring->wptr << 2);
  2142. ring->rptr = RREG32(DMA_RB_RPTR) >> 2;
  2143. WREG32(DMA_RB_CNTL, rb_cntl | DMA_RB_ENABLE);
  2144. ring->ready = true;
  2145. r = radeon_ring_test(rdev, R600_RING_TYPE_DMA_INDEX, ring);
  2146. if (r) {
  2147. ring->ready = false;
  2148. return r;
  2149. }
  2150. radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
  2151. return 0;
  2152. }
  2153. /**
  2154. * r600_dma_fini - tear down the async dma engine
  2155. *
  2156. * @rdev: radeon_device pointer
  2157. *
  2158. * Stop the async dma engine and free the ring (r6xx-evergreen).
  2159. */
  2160. void r600_dma_fini(struct radeon_device *rdev)
  2161. {
  2162. r600_dma_stop(rdev);
  2163. radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
  2164. }
  2165. /*
  2166. * GPU scratch registers helpers function.
  2167. */
  2168. void r600_scratch_init(struct radeon_device *rdev)
  2169. {
  2170. int i;
  2171. rdev->scratch.num_reg = 7;
  2172. rdev->scratch.reg_base = SCRATCH_REG0;
  2173. for (i = 0; i < rdev->scratch.num_reg; i++) {
  2174. rdev->scratch.free[i] = true;
  2175. rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
  2176. }
  2177. }
  2178. int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
  2179. {
  2180. uint32_t scratch;
  2181. uint32_t tmp = 0;
  2182. unsigned i;
  2183. int r;
  2184. r = radeon_scratch_get(rdev, &scratch);
  2185. if (r) {
  2186. DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
  2187. return r;
  2188. }
  2189. WREG32(scratch, 0xCAFEDEAD);
  2190. r = radeon_ring_lock(rdev, ring, 3);
  2191. if (r) {
  2192. DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n", ring->idx, r);
  2193. radeon_scratch_free(rdev, scratch);
  2194. return r;
  2195. }
  2196. radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
  2197. radeon_ring_write(ring, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
  2198. radeon_ring_write(ring, 0xDEADBEEF);
  2199. radeon_ring_unlock_commit(rdev, ring);
  2200. for (i = 0; i < rdev->usec_timeout; i++) {
  2201. tmp = RREG32(scratch);
  2202. if (tmp == 0xDEADBEEF)
  2203. break;
  2204. DRM_UDELAY(1);
  2205. }
  2206. if (i < rdev->usec_timeout) {
  2207. DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
  2208. } else {
  2209. DRM_ERROR("radeon: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
  2210. ring->idx, scratch, tmp);
  2211. r = -EINVAL;
  2212. }
  2213. radeon_scratch_free(rdev, scratch);
  2214. return r;
  2215. }
  2216. /**
  2217. * r600_dma_ring_test - simple async dma engine test
  2218. *
  2219. * @rdev: radeon_device pointer
  2220. * @ring: radeon_ring structure holding ring information
  2221. *
  2222. * Test the DMA engine by writing using it to write an
  2223. * value to memory. (r6xx-SI).
  2224. * Returns 0 for success, error for failure.
  2225. */
  2226. int r600_dma_ring_test(struct radeon_device *rdev,
  2227. struct radeon_ring *ring)
  2228. {
  2229. unsigned i;
  2230. int r;
  2231. void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
  2232. u32 tmp;
  2233. if (!ptr) {
  2234. DRM_ERROR("invalid vram scratch pointer\n");
  2235. return -EINVAL;
  2236. }
  2237. tmp = 0xCAFEDEAD;
  2238. writel(tmp, ptr);
  2239. r = radeon_ring_lock(rdev, ring, 4);
  2240. if (r) {
  2241. DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r);
  2242. return r;
  2243. }
  2244. radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
  2245. radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc);
  2246. radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff);
  2247. radeon_ring_write(ring, 0xDEADBEEF);
  2248. radeon_ring_unlock_commit(rdev, ring);
  2249. for (i = 0; i < rdev->usec_timeout; i++) {
  2250. tmp = readl(ptr);
  2251. if (tmp == 0xDEADBEEF)
  2252. break;
  2253. DRM_UDELAY(1);
  2254. }
  2255. if (i < rdev->usec_timeout) {
  2256. DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
  2257. } else {
  2258. DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
  2259. ring->idx, tmp);
  2260. r = -EINVAL;
  2261. }
  2262. return r;
  2263. }
  2264. /*
  2265. * CP fences/semaphores
  2266. */
  2267. void r600_fence_ring_emit(struct radeon_device *rdev,
  2268. struct radeon_fence *fence)
  2269. {
  2270. struct radeon_ring *ring = &rdev->ring[fence->ring];
  2271. if (rdev->wb.use_event) {
  2272. u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
  2273. /* flush read cache over gart */
  2274. radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
  2275. radeon_ring_write(ring, PACKET3_TC_ACTION_ENA |
  2276. PACKET3_VC_ACTION_ENA |
  2277. PACKET3_SH_ACTION_ENA);
  2278. radeon_ring_write(ring, 0xFFFFFFFF);
  2279. radeon_ring_write(ring, 0);
  2280. radeon_ring_write(ring, 10); /* poll interval */
  2281. /* EVENT_WRITE_EOP - flush caches, send int */
  2282. radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
  2283. radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
  2284. radeon_ring_write(ring, addr & 0xffffffff);
  2285. radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
  2286. radeon_ring_write(ring, fence->seq);
  2287. radeon_ring_write(ring, 0);
  2288. } else {
  2289. /* flush read cache over gart */
  2290. radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
  2291. radeon_ring_write(ring, PACKET3_TC_ACTION_ENA |
  2292. PACKET3_VC_ACTION_ENA |
  2293. PACKET3_SH_ACTION_ENA);
  2294. radeon_ring_write(ring, 0xFFFFFFFF);
  2295. radeon_ring_write(ring, 0);
  2296. radeon_ring_write(ring, 10); /* poll interval */
  2297. radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
  2298. radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0));
  2299. /* wait for 3D idle clean */
  2300. radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
  2301. radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
  2302. radeon_ring_write(ring, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
  2303. /* Emit fence sequence & fire IRQ */
  2304. radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
  2305. radeon_ring_write(ring, ((rdev->fence_drv[fence->ring].scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
  2306. radeon_ring_write(ring, fence->seq);
  2307. /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
  2308. radeon_ring_write(ring, PACKET0(CP_INT_STATUS, 0));
  2309. radeon_ring_write(ring, RB_INT_STAT);
  2310. }
  2311. }
  2312. void r600_semaphore_ring_emit(struct radeon_device *rdev,
  2313. struct radeon_ring *ring,
  2314. struct radeon_semaphore *semaphore,
  2315. bool emit_wait)
  2316. {
  2317. uint64_t addr = semaphore->gpu_addr;
  2318. unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL;
  2319. if (rdev->family < CHIP_CAYMAN)
  2320. sel |= PACKET3_SEM_WAIT_ON_SIGNAL;
  2321. radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
  2322. radeon_ring_write(ring, addr & 0xffffffff);
  2323. radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
  2324. }
  2325. /*
  2326. * DMA fences/semaphores
  2327. */
  2328. /**
  2329. * r600_dma_fence_ring_emit - emit a fence on the DMA ring
  2330. *
  2331. * @rdev: radeon_device pointer
  2332. * @fence: radeon fence object
  2333. *
  2334. * Add a DMA fence packet to the ring to write
  2335. * the fence seq number and DMA trap packet to generate
  2336. * an interrupt if needed (r6xx-r7xx).
  2337. */
  2338. void r600_dma_fence_ring_emit(struct radeon_device *rdev,
  2339. struct radeon_fence *fence)
  2340. {
  2341. struct radeon_ring *ring = &rdev->ring[fence->ring];
  2342. u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
  2343. /* write the fence */
  2344. radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0));
  2345. radeon_ring_write(ring, addr & 0xfffffffc);
  2346. radeon_ring_write(ring, (upper_32_bits(addr) & 0xff));
  2347. radeon_ring_write(ring, lower_32_bits(fence->seq));
  2348. /* generate an interrupt */
  2349. radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0));
  2350. }
  2351. /**
  2352. * r600_dma_semaphore_ring_emit - emit a semaphore on the dma ring
  2353. *
  2354. * @rdev: radeon_device pointer
  2355. * @ring: radeon_ring structure holding ring information
  2356. * @semaphore: radeon semaphore object
  2357. * @emit_wait: wait or signal semaphore
  2358. *
  2359. * Add a DMA semaphore packet to the ring wait on or signal
  2360. * other rings (r6xx-SI).
  2361. */
  2362. void r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
  2363. struct radeon_ring *ring,
  2364. struct radeon_semaphore *semaphore,
  2365. bool emit_wait)
  2366. {
  2367. u64 addr = semaphore->gpu_addr;
  2368. u32 s = emit_wait ? 0 : 1;
  2369. radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SEMAPHORE, 0, s, 0));
  2370. radeon_ring_write(ring, addr & 0xfffffffc);
  2371. radeon_ring_write(ring, upper_32_bits(addr) & 0xff);
  2372. }
  2373. int r600_copy_blit(struct radeon_device *rdev,
  2374. uint64_t src_offset,
  2375. uint64_t dst_offset,
  2376. unsigned num_gpu_pages,
  2377. struct radeon_fence **fence)
  2378. {
  2379. struct radeon_semaphore *sem = NULL;
  2380. struct radeon_sa_bo *vb = NULL;
  2381. int r;
  2382. r = r600_blit_prepare_copy(rdev, num_gpu_pages, fence, &vb, &sem);
  2383. if (r) {
  2384. return r;
  2385. }
  2386. r600_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages, vb);
  2387. r600_blit_done_copy(rdev, fence, vb, sem);
  2388. return 0;
  2389. }
  2390. /**
  2391. * r600_copy_dma - copy pages using the DMA engine
  2392. *
  2393. * @rdev: radeon_device pointer
  2394. * @src_offset: src GPU address
  2395. * @dst_offset: dst GPU address
  2396. * @num_gpu_pages: number of GPU pages to xfer
  2397. * @fence: radeon fence object
  2398. *
  2399. * Copy GPU paging using the DMA engine (r6xx-r7xx).
  2400. * Used by the radeon ttm implementation to move pages if
  2401. * registered as the asic copy callback.
  2402. */
  2403. int r600_copy_dma(struct radeon_device *rdev,
  2404. uint64_t src_offset, uint64_t dst_offset,
  2405. unsigned num_gpu_pages,
  2406. struct radeon_fence **fence)
  2407. {
  2408. struct radeon_semaphore *sem = NULL;
  2409. int ring_index = rdev->asic->copy.dma_ring_index;
  2410. struct radeon_ring *ring = &rdev->ring[ring_index];
  2411. u32 size_in_dw, cur_size_in_dw;
  2412. int i, num_loops;
  2413. int r = 0;
  2414. r = radeon_semaphore_create(rdev, &sem);
  2415. if (r) {
  2416. DRM_ERROR("radeon: moving bo (%d).\n", r);
  2417. return r;
  2418. }
  2419. size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
  2420. num_loops = DIV_ROUND_UP(size_in_dw, 0xffff);
  2421. r = radeon_ring_lock(rdev, ring, num_loops * 5 + 8);
  2422. if (r) {
  2423. DRM_ERROR("radeon: moving bo (%d).\n", r);
  2424. radeon_semaphore_free(rdev, &sem, NULL);
  2425. return r;
  2426. }
  2427. if (radeon_fence_need_sync(*fence, ring->idx)) {
  2428. radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
  2429. ring->idx);
  2430. radeon_fence_note_sync(*fence, ring->idx);
  2431. } else {
  2432. radeon_semaphore_free(rdev, &sem, NULL);
  2433. }
  2434. for (i = 0; i < num_loops; i++) {
  2435. cur_size_in_dw = size_in_dw;
  2436. if (cur_size_in_dw > 0xFFFF)
  2437. cur_size_in_dw = 0xFFFF;
  2438. size_in_dw -= cur_size_in_dw;
  2439. radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
  2440. radeon_ring_write(ring, dst_offset & 0xfffffffc);
  2441. radeon_ring_write(ring, src_offset & 0xfffffffc);
  2442. radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
  2443. radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
  2444. src_offset += cur_size_in_dw * 4;
  2445. dst_offset += cur_size_in_dw * 4;
  2446. }
  2447. r = radeon_fence_emit(rdev, fence, ring->idx);
  2448. if (r) {
  2449. radeon_ring_unlock_undo(rdev, ring);
  2450. return r;
  2451. }
  2452. radeon_ring_unlock_commit(rdev, ring);
  2453. radeon_semaphore_free(rdev, &sem, *fence);
  2454. return r;
  2455. }
  2456. int r600_set_surface_reg(struct radeon_device *rdev, int reg,
  2457. uint32_t tiling_flags, uint32_t pitch,
  2458. uint32_t offset, uint32_t obj_size)
  2459. {
  2460. /* FIXME: implement */
  2461. return 0;
  2462. }
  2463. void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
  2464. {
  2465. /* FIXME: implement */
  2466. }
  2467. static int r600_startup(struct radeon_device *rdev)
  2468. {
  2469. struct radeon_ring *ring;
  2470. int r;
  2471. /* enable pcie gen2 link */
  2472. r600_pcie_gen2_enable(rdev);
  2473. if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
  2474. r = r600_init_microcode(rdev);
  2475. if (r) {
  2476. DRM_ERROR("Failed to load firmware!\n");
  2477. return r;
  2478. }
  2479. }
  2480. r = r600_vram_scratch_init(rdev);
  2481. if (r)
  2482. return r;
  2483. r600_mc_program(rdev);
  2484. if (rdev->flags & RADEON_IS_AGP) {
  2485. r600_agp_enable(rdev);
  2486. } else {
  2487. r = r600_pcie_gart_enable(rdev);
  2488. if (r)
  2489. return r;
  2490. }
  2491. r600_gpu_init(rdev);
  2492. r = r600_blit_init(rdev);
  2493. if (r) {
  2494. r600_blit_fini(rdev);
  2495. rdev->asic->copy.copy = NULL;
  2496. dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
  2497. }
  2498. /* allocate wb buffer */
  2499. r = radeon_wb_init(rdev);
  2500. if (r)
  2501. return r;
  2502. r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
  2503. if (r) {
  2504. dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
  2505. return r;
  2506. }
  2507. r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
  2508. if (r) {
  2509. dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
  2510. return r;
  2511. }
  2512. /* Enable IRQ */
  2513. r = r600_irq_init(rdev);
  2514. if (r) {
  2515. DRM_ERROR("radeon: IH init failed (%d).\n", r);
  2516. radeon_irq_kms_fini(rdev);
  2517. return r;
  2518. }
  2519. r600_irq_set(rdev);
  2520. ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
  2521. r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
  2522. R600_CP_RB_RPTR, R600_CP_RB_WPTR,
  2523. 0, 0xfffff, RADEON_CP_PACKET2);
  2524. if (r)
  2525. return r;
  2526. ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
  2527. r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
  2528. DMA_RB_RPTR, DMA_RB_WPTR,
  2529. 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
  2530. if (r)
  2531. return r;
  2532. r = r600_cp_load_microcode(rdev);
  2533. if (r)
  2534. return r;
  2535. r = r600_cp_resume(rdev);
  2536. if (r)
  2537. return r;
  2538. r = r600_dma_resume(rdev);
  2539. if (r)
  2540. return r;
  2541. r = radeon_ib_pool_init(rdev);
  2542. if (r) {
  2543. dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
  2544. return r;
  2545. }
  2546. r = r600_audio_init(rdev);
  2547. if (r) {
  2548. DRM_ERROR("radeon: audio init failed\n");
  2549. return r;
  2550. }
  2551. return 0;
  2552. }
  2553. void r600_vga_set_state(struct radeon_device *rdev, bool state)
  2554. {
  2555. uint32_t temp;
  2556. temp = RREG32(CONFIG_CNTL);
  2557. if (state == false) {
  2558. temp &= ~(1<<0);
  2559. temp |= (1<<1);
  2560. } else {
  2561. temp &= ~(1<<1);
  2562. }
  2563. WREG32(CONFIG_CNTL, temp);
  2564. }
  2565. int r600_resume(struct radeon_device *rdev)
  2566. {
  2567. int r;
  2568. /* Do not reset GPU before posting, on r600 hw unlike on r500 hw,
  2569. * posting will perform necessary task to bring back GPU into good
  2570. * shape.
  2571. */
  2572. /* post card */
  2573. atom_asic_init(rdev->mode_info.atom_context);
  2574. rdev->accel_working = true;
  2575. r = r600_startup(rdev);
  2576. if (r) {
  2577. DRM_ERROR("r600 startup failed on resume\n");
  2578. rdev->accel_working = false;
  2579. return r;
  2580. }
  2581. return r;
  2582. }
  2583. int r600_suspend(struct radeon_device *rdev)
  2584. {
  2585. r600_audio_fini(rdev);
  2586. r600_cp_stop(rdev);
  2587. r600_dma_stop(rdev);
  2588. r600_irq_suspend(rdev);
  2589. radeon_wb_disable(rdev);
  2590. r600_pcie_gart_disable(rdev);
  2591. return 0;
  2592. }
  2593. /* Plan is to move initialization in that function and use
  2594. * helper function so that radeon_device_init pretty much
  2595. * do nothing more than calling asic specific function. This
  2596. * should also allow to remove a bunch of callback function
  2597. * like vram_info.
  2598. */
  2599. int r600_init(struct radeon_device *rdev)
  2600. {
  2601. int r;
  2602. if (r600_debugfs_mc_info_init(rdev)) {
  2603. DRM_ERROR("Failed to register debugfs file for mc !\n");
  2604. }
  2605. /* Read BIOS */
  2606. if (!radeon_get_bios(rdev)) {
  2607. if (ASIC_IS_AVIVO(rdev))
  2608. return -EINVAL;
  2609. }
  2610. /* Must be an ATOMBIOS */
  2611. if (!rdev->is_atom_bios) {
  2612. dev_err(rdev->dev, "Expecting atombios for R600 GPU\n");
  2613. return -EINVAL;
  2614. }
  2615. r = radeon_atombios_init(rdev);
  2616. if (r)
  2617. return r;
  2618. /* Post card if necessary */
  2619. if (!radeon_card_posted(rdev)) {
  2620. if (!rdev->bios) {
  2621. dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
  2622. return -EINVAL;
  2623. }
  2624. DRM_INFO("GPU not posted. posting now...\n");
  2625. atom_asic_init(rdev->mode_info.atom_context);
  2626. }
  2627. /* Initialize scratch registers */
  2628. r600_scratch_init(rdev);
  2629. /* Initialize surface registers */
  2630. radeon_surface_init(rdev);
  2631. /* Initialize clocks */
  2632. radeon_get_clock_info(rdev->ddev);
  2633. /* Fence driver */
  2634. r = radeon_fence_driver_init(rdev);
  2635. if (r)
  2636. return r;
  2637. if (rdev->flags & RADEON_IS_AGP) {
  2638. r = radeon_agp_init(rdev);
  2639. if (r)
  2640. radeon_agp_disable(rdev);
  2641. }
  2642. r = r600_mc_init(rdev);
  2643. if (r)
  2644. return r;
  2645. /* Memory manager */
  2646. r = radeon_bo_init(rdev);
  2647. if (r)
  2648. return r;
  2649. r = radeon_irq_kms_init(rdev);
  2650. if (r)
  2651. return r;
  2652. rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
  2653. r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
  2654. rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL;
  2655. r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024);
  2656. rdev->ih.ring_obj = NULL;
  2657. r600_ih_ring_init(rdev, 64 * 1024);
  2658. r = r600_pcie_gart_init(rdev);
  2659. if (r)
  2660. return r;
  2661. rdev->accel_working = true;
  2662. r = r600_startup(rdev);
  2663. if (r) {
  2664. dev_err(rdev->dev, "disabling GPU acceleration\n");
  2665. r600_cp_fini(rdev);
  2666. r600_dma_fini(rdev);
  2667. r600_irq_fini(rdev);
  2668. radeon_wb_fini(rdev);
  2669. radeon_ib_pool_fini(rdev);
  2670. radeon_irq_kms_fini(rdev);
  2671. r600_pcie_gart_fini(rdev);
  2672. rdev->accel_working = false;
  2673. }
  2674. return 0;
  2675. }
  2676. void r600_fini(struct radeon_device *rdev)
  2677. {
  2678. r600_audio_fini(rdev);
  2679. r600_blit_fini(rdev);
  2680. r600_cp_fini(rdev);
  2681. r600_dma_fini(rdev);
  2682. r600_irq_fini(rdev);
  2683. radeon_wb_fini(rdev);
  2684. radeon_ib_pool_fini(rdev);
  2685. radeon_irq_kms_fini(rdev);
  2686. r600_pcie_gart_fini(rdev);
  2687. r600_vram_scratch_fini(rdev);
  2688. radeon_agp_fini(rdev);
  2689. radeon_gem_fini(rdev);
  2690. radeon_fence_driver_fini(rdev);
  2691. radeon_bo_fini(rdev);
  2692. radeon_atombios_fini(rdev);
  2693. kfree(rdev->bios);
  2694. rdev->bios = NULL;
  2695. }
  2696. /*
  2697. * CS stuff
  2698. */
  2699. void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
  2700. {
  2701. struct radeon_ring *ring = &rdev->ring[ib->ring];
  2702. u32 next_rptr;
  2703. if (ring->rptr_save_reg) {
  2704. next_rptr = ring->wptr + 3 + 4;
  2705. radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
  2706. radeon_ring_write(ring, ((ring->rptr_save_reg -
  2707. PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
  2708. radeon_ring_write(ring, next_rptr);
  2709. } else if (rdev->wb.enabled) {
  2710. next_rptr = ring->wptr + 5 + 4;
  2711. radeon_ring_write(ring, PACKET3(PACKET3_MEM_WRITE, 3));
  2712. radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
  2713. radeon_ring_write(ring, (upper_32_bits(ring->next_rptr_gpu_addr) & 0xff) | (1 << 18));
  2714. radeon_ring_write(ring, next_rptr);
  2715. radeon_ring_write(ring, 0);
  2716. }
  2717. radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
  2718. radeon_ring_write(ring,
  2719. #ifdef __BIG_ENDIAN
  2720. (2 << 0) |
  2721. #endif
  2722. (ib->gpu_addr & 0xFFFFFFFC));
  2723. radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
  2724. radeon_ring_write(ring, ib->length_dw);
  2725. }
  2726. int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
  2727. {
  2728. struct radeon_ib ib;
  2729. uint32_t scratch;
  2730. uint32_t tmp = 0;
  2731. unsigned i;
  2732. int r;
  2733. r = radeon_scratch_get(rdev, &scratch);
  2734. if (r) {
  2735. DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
  2736. return r;
  2737. }
  2738. WREG32(scratch, 0xCAFEDEAD);
  2739. r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
  2740. if (r) {
  2741. DRM_ERROR("radeon: failed to get ib (%d).\n", r);
  2742. goto free_scratch;
  2743. }
  2744. ib.ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
  2745. ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
  2746. ib.ptr[2] = 0xDEADBEEF;
  2747. ib.length_dw = 3;
  2748. r = radeon_ib_schedule(rdev, &ib, NULL);
  2749. if (r) {
  2750. DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
  2751. goto free_ib;
  2752. }
  2753. r = radeon_fence_wait(ib.fence, false);
  2754. if (r) {
  2755. DRM_ERROR("radeon: fence wait failed (%d).\n", r);
  2756. goto free_ib;
  2757. }
  2758. for (i = 0; i < rdev->usec_timeout; i++) {
  2759. tmp = RREG32(scratch);
  2760. if (tmp == 0xDEADBEEF)
  2761. break;
  2762. DRM_UDELAY(1);
  2763. }
  2764. if (i < rdev->usec_timeout) {
  2765. DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
  2766. } else {
  2767. DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
  2768. scratch, tmp);
  2769. r = -EINVAL;
  2770. }
  2771. free_ib:
  2772. radeon_ib_free(rdev, &ib);
  2773. free_scratch:
  2774. radeon_scratch_free(rdev, scratch);
  2775. return r;
  2776. }
  2777. /**
  2778. * r600_dma_ib_test - test an IB on the DMA engine
  2779. *
  2780. * @rdev: radeon_device pointer
  2781. * @ring: radeon_ring structure holding ring information
  2782. *
  2783. * Test a simple IB in the DMA ring (r6xx-SI).
  2784. * Returns 0 on success, error on failure.
  2785. */
  2786. int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
  2787. {
  2788. struct radeon_ib ib;
  2789. unsigned i;
  2790. int r;
  2791. void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
  2792. u32 tmp = 0;
  2793. if (!ptr) {
  2794. DRM_ERROR("invalid vram scratch pointer\n");
  2795. return -EINVAL;
  2796. }
  2797. tmp = 0xCAFEDEAD;
  2798. writel(tmp, ptr);
  2799. r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
  2800. if (r) {
  2801. DRM_ERROR("radeon: failed to get ib (%d).\n", r);
  2802. return r;
  2803. }
  2804. ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1);
  2805. ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc;
  2806. ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff;
  2807. ib.ptr[3] = 0xDEADBEEF;
  2808. ib.length_dw = 4;
  2809. r = radeon_ib_schedule(rdev, &ib, NULL);
  2810. if (r) {
  2811. radeon_ib_free(rdev, &ib);
  2812. DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
  2813. return r;
  2814. }
  2815. r = radeon_fence_wait(ib.fence, false);
  2816. if (r) {
  2817. DRM_ERROR("radeon: fence wait failed (%d).\n", r);
  2818. return r;
  2819. }
  2820. for (i = 0; i < rdev->usec_timeout; i++) {
  2821. tmp = readl(ptr);
  2822. if (tmp == 0xDEADBEEF)
  2823. break;
  2824. DRM_UDELAY(1);
  2825. }
  2826. if (i < rdev->usec_timeout) {
  2827. DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
  2828. } else {
  2829. DRM_ERROR("radeon: ib test failed (0x%08X)\n", tmp);
  2830. r = -EINVAL;
  2831. }
  2832. radeon_ib_free(rdev, &ib);
  2833. return r;
  2834. }
  2835. /**
  2836. * r600_dma_ring_ib_execute - Schedule an IB on the DMA engine
  2837. *
  2838. * @rdev: radeon_device pointer
  2839. * @ib: IB object to schedule
  2840. *
  2841. * Schedule an IB in the DMA ring (r6xx-r7xx).
  2842. */
  2843. void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
  2844. {
  2845. struct radeon_ring *ring = &rdev->ring[ib->ring];
  2846. if (rdev->wb.enabled) {
  2847. u32 next_rptr = ring->wptr + 4;
  2848. while ((next_rptr & 7) != 5)
  2849. next_rptr++;
  2850. next_rptr += 3;
  2851. radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
  2852. radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
  2853. radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
  2854. radeon_ring_write(ring, next_rptr);
  2855. }
  2856. /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
  2857. * Pad as necessary with NOPs.
  2858. */
  2859. while ((ring->wptr & 7) != 5)
  2860. radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
  2861. radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0, 0));
  2862. radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
  2863. radeon_ring_write(ring, (ib->length_dw << 16) | (upper_32_bits(ib->gpu_addr) & 0xFF));
  2864. }
  2865. /*
  2866. * Interrupts
  2867. *
  2868. * Interrupts use a ring buffer on r6xx/r7xx hardware. It works pretty
  2869. * the same as the CP ring buffer, but in reverse. Rather than the CPU
  2870. * writing to the ring and the GPU consuming, the GPU writes to the ring
  2871. * and host consumes. As the host irq handler processes interrupts, it
  2872. * increments the rptr. When the rptr catches up with the wptr, all the
  2873. * current interrupts have been processed.
  2874. */
  2875. void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size)
  2876. {
  2877. u32 rb_bufsz;
  2878. /* Align ring size */
  2879. rb_bufsz = drm_order(ring_size / 4);
  2880. ring_size = (1 << rb_bufsz) * 4;
  2881. rdev->ih.ring_size = ring_size;
  2882. rdev->ih.ptr_mask = rdev->ih.ring_size - 1;
  2883. rdev->ih.rptr = 0;
  2884. }
  2885. int r600_ih_ring_alloc(struct radeon_device *rdev)
  2886. {
  2887. int r;
  2888. /* Allocate ring buffer */
  2889. if (rdev->ih.ring_obj == NULL) {
  2890. r = radeon_bo_create(rdev, rdev->ih.ring_size,
  2891. PAGE_SIZE, true,
  2892. RADEON_GEM_DOMAIN_GTT,
  2893. NULL, &rdev->ih.ring_obj);
  2894. if (r) {
  2895. DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r);
  2896. return r;
  2897. }
  2898. r = radeon_bo_reserve(rdev->ih.ring_obj, false);
  2899. if (unlikely(r != 0))
  2900. return r;
  2901. r = radeon_bo_pin(rdev->ih.ring_obj,
  2902. RADEON_GEM_DOMAIN_GTT,
  2903. &rdev->ih.gpu_addr);
  2904. if (r) {
  2905. radeon_bo_unreserve(rdev->ih.ring_obj);
  2906. DRM_ERROR("radeon: failed to pin ih ring buffer (%d).\n", r);
  2907. return r;
  2908. }
  2909. r = radeon_bo_kmap(rdev->ih.ring_obj,
  2910. (void **)&rdev->ih.ring);
  2911. radeon_bo_unreserve(rdev->ih.ring_obj);
  2912. if (r) {
  2913. DRM_ERROR("radeon: failed to map ih ring buffer (%d).\n", r);
  2914. return r;
  2915. }
  2916. }
  2917. return 0;
  2918. }
  2919. void r600_ih_ring_fini(struct radeon_device *rdev)
  2920. {
  2921. int r;
  2922. if (rdev->ih.ring_obj) {
  2923. r = radeon_bo_reserve(rdev->ih.ring_obj, false);
  2924. if (likely(r == 0)) {
  2925. radeon_bo_kunmap(rdev->ih.ring_obj);
  2926. radeon_bo_unpin(rdev->ih.ring_obj);
  2927. radeon_bo_unreserve(rdev->ih.ring_obj);
  2928. }
  2929. radeon_bo_unref(&rdev->ih.ring_obj);
  2930. rdev->ih.ring = NULL;
  2931. rdev->ih.ring_obj = NULL;
  2932. }
  2933. }
  2934. void r600_rlc_stop(struct radeon_device *rdev)
  2935. {
  2936. if ((rdev->family >= CHIP_RV770) &&
  2937. (rdev->family <= CHIP_RV740)) {
  2938. /* r7xx asics need to soft reset RLC before halting */
  2939. WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC);
  2940. RREG32(SRBM_SOFT_RESET);
  2941. mdelay(15);
  2942. WREG32(SRBM_SOFT_RESET, 0);
  2943. RREG32(SRBM_SOFT_RESET);
  2944. }
  2945. WREG32(RLC_CNTL, 0);
  2946. }
  2947. static void r600_rlc_start(struct radeon_device *rdev)
  2948. {
  2949. WREG32(RLC_CNTL, RLC_ENABLE);
  2950. }
  2951. static int r600_rlc_init(struct radeon_device *rdev)
  2952. {
  2953. u32 i;
  2954. const __be32 *fw_data;
  2955. if (!rdev->rlc_fw)
  2956. return -EINVAL;
  2957. r600_rlc_stop(rdev);
  2958. WREG32(RLC_HB_CNTL, 0);
  2959. if (rdev->family == CHIP_ARUBA) {
  2960. WREG32(TN_RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
  2961. WREG32(TN_RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8);
  2962. }
  2963. if (rdev->family <= CHIP_CAYMAN) {
  2964. WREG32(RLC_HB_BASE, 0);
  2965. WREG32(RLC_HB_RPTR, 0);
  2966. WREG32(RLC_HB_WPTR, 0);
  2967. }
  2968. if (rdev->family <= CHIP_CAICOS) {
  2969. WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
  2970. WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
  2971. }
  2972. WREG32(RLC_MC_CNTL, 0);
  2973. WREG32(RLC_UCODE_CNTL, 0);
  2974. fw_data = (const __be32 *)rdev->rlc_fw->data;
  2975. if (rdev->family >= CHIP_ARUBA) {
  2976. for (i = 0; i < ARUBA_RLC_UCODE_SIZE; i++) {
  2977. WREG32(RLC_UCODE_ADDR, i);
  2978. WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
  2979. }
  2980. } else if (rdev->family >= CHIP_CAYMAN) {
  2981. for (i = 0; i < CAYMAN_RLC_UCODE_SIZE; i++) {
  2982. WREG32(RLC_UCODE_ADDR, i);
  2983. WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
  2984. }
  2985. } else if (rdev->family >= CHIP_CEDAR) {
  2986. for (i = 0; i < EVERGREEN_RLC_UCODE_SIZE; i++) {
  2987. WREG32(RLC_UCODE_ADDR, i);
  2988. WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
  2989. }
  2990. } else if (rdev->family >= CHIP_RV770) {
  2991. for (i = 0; i < R700_RLC_UCODE_SIZE; i++) {
  2992. WREG32(RLC_UCODE_ADDR, i);
  2993. WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
  2994. }
  2995. } else {
  2996. for (i = 0; i < RLC_UCODE_SIZE; i++) {
  2997. WREG32(RLC_UCODE_ADDR, i);
  2998. WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
  2999. }
  3000. }
  3001. WREG32(RLC_UCODE_ADDR, 0);
  3002. r600_rlc_start(rdev);
  3003. return 0;
  3004. }
  3005. static void r600_enable_interrupts(struct radeon_device *rdev)
  3006. {
  3007. u32 ih_cntl = RREG32(IH_CNTL);
  3008. u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
  3009. ih_cntl |= ENABLE_INTR;
  3010. ih_rb_cntl |= IH_RB_ENABLE;
  3011. WREG32(IH_CNTL, ih_cntl);
  3012. WREG32(IH_RB_CNTL, ih_rb_cntl);
  3013. rdev->ih.enabled = true;
  3014. }
  3015. void r600_disable_interrupts(struct radeon_device *rdev)
  3016. {
  3017. u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
  3018. u32 ih_cntl = RREG32(IH_CNTL);
  3019. ih_rb_cntl &= ~IH_RB_ENABLE;
  3020. ih_cntl &= ~ENABLE_INTR;
  3021. WREG32(IH_RB_CNTL, ih_rb_cntl);
  3022. WREG32(IH_CNTL, ih_cntl);
  3023. /* set rptr, wptr to 0 */
  3024. WREG32(IH_RB_RPTR, 0);
  3025. WREG32(IH_RB_WPTR, 0);
  3026. rdev->ih.enabled = false;
  3027. rdev->ih.rptr = 0;
  3028. }
  3029. static void r600_disable_interrupt_state(struct radeon_device *rdev)
  3030. {
  3031. u32 tmp;
  3032. WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
  3033. tmp = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
  3034. WREG32(DMA_CNTL, tmp);
  3035. WREG32(GRBM_INT_CNTL, 0);
  3036. WREG32(DxMODE_INT_MASK, 0);
  3037. WREG32(D1GRPH_INTERRUPT_CONTROL, 0);
  3038. WREG32(D2GRPH_INTERRUPT_CONTROL, 0);
  3039. if (ASIC_IS_DCE3(rdev)) {
  3040. WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL, 0);
  3041. WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL, 0);
  3042. tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
  3043. WREG32(DC_HPD1_INT_CONTROL, tmp);
  3044. tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
  3045. WREG32(DC_HPD2_INT_CONTROL, tmp);
  3046. tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
  3047. WREG32(DC_HPD3_INT_CONTROL, tmp);
  3048. tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
  3049. WREG32(DC_HPD4_INT_CONTROL, tmp);
  3050. if (ASIC_IS_DCE32(rdev)) {
  3051. tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
  3052. WREG32(DC_HPD5_INT_CONTROL, tmp);
  3053. tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
  3054. WREG32(DC_HPD6_INT_CONTROL, tmp);
  3055. tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
  3056. WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, tmp);
  3057. tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
  3058. WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, tmp);
  3059. } else {
  3060. tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
  3061. WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp);
  3062. tmp = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
  3063. WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, tmp);
  3064. }
  3065. } else {
  3066. WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
  3067. WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
  3068. tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
  3069. WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
  3070. tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
  3071. WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
  3072. tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
  3073. WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
  3074. tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
  3075. WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp);
  3076. tmp = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
  3077. WREG32(HDMI1_AUDIO_PACKET_CONTROL, tmp);
  3078. }
  3079. }
  3080. int r600_irq_init(struct radeon_device *rdev)
  3081. {
  3082. int ret = 0;
  3083. int rb_bufsz;
  3084. u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
  3085. /* allocate ring */
  3086. ret = r600_ih_ring_alloc(rdev);
  3087. if (ret)
  3088. return ret;
  3089. /* disable irqs */
  3090. r600_disable_interrupts(rdev);
  3091. /* init rlc */
  3092. ret = r600_rlc_init(rdev);
  3093. if (ret) {
  3094. r600_ih_ring_fini(rdev);
  3095. return ret;
  3096. }
  3097. /* setup interrupt control */
  3098. /* set dummy read address to ring address */
  3099. WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
  3100. interrupt_cntl = RREG32(INTERRUPT_CNTL);
  3101. /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
  3102. * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
  3103. */
  3104. interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
  3105. /* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
  3106. interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
  3107. WREG32(INTERRUPT_CNTL, interrupt_cntl);
  3108. WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
  3109. rb_bufsz = drm_order(rdev->ih.ring_size / 4);
  3110. ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
  3111. IH_WPTR_OVERFLOW_CLEAR |
  3112. (rb_bufsz << 1));
  3113. if (rdev->wb.enabled)
  3114. ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;
  3115. /* set the writeback address whether it's enabled or not */
  3116. WREG32(IH_RB_WPTR_ADDR_LO, (rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFFFFFFFC);
  3117. WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFF);
  3118. WREG32(IH_RB_CNTL, ih_rb_cntl);
  3119. /* set rptr, wptr to 0 */
  3120. WREG32(IH_RB_RPTR, 0);
  3121. WREG32(IH_RB_WPTR, 0);
  3122. /* Default settings for IH_CNTL (disabled at first) */
  3123. ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10);
  3124. /* RPTR_REARM only works if msi's are enabled */
  3125. if (rdev->msi_enabled)
  3126. ih_cntl |= RPTR_REARM;
  3127. WREG32(IH_CNTL, ih_cntl);
  3128. /* force the active interrupt state to all disabled */
  3129. if (rdev->family >= CHIP_CEDAR)
  3130. evergreen_disable_interrupt_state(rdev);
  3131. else
  3132. r600_disable_interrupt_state(rdev);
  3133. /* at this point everything should be setup correctly to enable master */
  3134. pci_set_master(rdev->pdev);
  3135. /* enable irqs */
  3136. r600_enable_interrupts(rdev);
  3137. return ret;
  3138. }
  3139. void r600_irq_suspend(struct radeon_device *rdev)
  3140. {
  3141. r600_irq_disable(rdev);
  3142. r600_rlc_stop(rdev);
  3143. }
  3144. void r600_irq_fini(struct radeon_device *rdev)
  3145. {
  3146. r600_irq_suspend(rdev);
  3147. r600_ih_ring_fini(rdev);
  3148. }
  3149. int r600_irq_set(struct radeon_device *rdev)
  3150. {
  3151. u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
  3152. u32 mode_int = 0;
  3153. u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
  3154. u32 grbm_int_cntl = 0;
  3155. u32 hdmi0, hdmi1;
  3156. u32 d1grph = 0, d2grph = 0;
  3157. u32 dma_cntl;
  3158. if (!rdev->irq.installed) {
  3159. WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
  3160. return -EINVAL;
  3161. }
  3162. /* don't enable anything if the ih is disabled */
  3163. if (!rdev->ih.enabled) {
  3164. r600_disable_interrupts(rdev);
  3165. /* force the active interrupt state to all disabled */
  3166. r600_disable_interrupt_state(rdev);
  3167. return 0;
  3168. }
  3169. if (ASIC_IS_DCE3(rdev)) {
  3170. hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
  3171. hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
  3172. hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
  3173. hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
  3174. if (ASIC_IS_DCE32(rdev)) {
  3175. hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
  3176. hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
  3177. hdmi0 = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
  3178. hdmi1 = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
  3179. } else {
  3180. hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
  3181. hdmi1 = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
  3182. }
  3183. } else {
  3184. hpd1 = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & ~DC_HPDx_INT_EN;
  3185. hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN;
  3186. hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN;
  3187. hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
  3188. hdmi1 = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
  3189. }
  3190. dma_cntl = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
  3191. if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
  3192. DRM_DEBUG("r600_irq_set: sw int\n");
  3193. cp_int_cntl |= RB_INT_ENABLE;
  3194. cp_int_cntl |= TIME_STAMP_INT_ENABLE;
  3195. }
  3196. if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) {
  3197. DRM_DEBUG("r600_irq_set: sw int dma\n");
  3198. dma_cntl |= TRAP_ENABLE;
  3199. }
  3200. if (rdev->irq.crtc_vblank_int[0] ||
  3201. atomic_read(&rdev->irq.pflip[0])) {
  3202. DRM_DEBUG("r600_irq_set: vblank 0\n");
  3203. mode_int |= D1MODE_VBLANK_INT_MASK;
  3204. }
  3205. if (rdev->irq.crtc_vblank_int[1] ||
  3206. atomic_read(&rdev->irq.pflip[1])) {
  3207. DRM_DEBUG("r600_irq_set: vblank 1\n");
  3208. mode_int |= D2MODE_VBLANK_INT_MASK;
  3209. }
  3210. if (rdev->irq.hpd[0]) {
  3211. DRM_DEBUG("r600_irq_set: hpd 1\n");
  3212. hpd1 |= DC_HPDx_INT_EN;
  3213. }
  3214. if (rdev->irq.hpd[1]) {
  3215. DRM_DEBUG("r600_irq_set: hpd 2\n");
  3216. hpd2 |= DC_HPDx_INT_EN;
  3217. }
  3218. if (rdev->irq.hpd[2]) {
  3219. DRM_DEBUG("r600_irq_set: hpd 3\n");
  3220. hpd3 |= DC_HPDx_INT_EN;
  3221. }
  3222. if (rdev->irq.hpd[3]) {
  3223. DRM_DEBUG("r600_irq_set: hpd 4\n");
  3224. hpd4 |= DC_HPDx_INT_EN;
  3225. }
  3226. if (rdev->irq.hpd[4]) {
  3227. DRM_DEBUG("r600_irq_set: hpd 5\n");
  3228. hpd5 |= DC_HPDx_INT_EN;
  3229. }
  3230. if (rdev->irq.hpd[5]) {
  3231. DRM_DEBUG("r600_irq_set: hpd 6\n");
  3232. hpd6 |= DC_HPDx_INT_EN;
  3233. }
  3234. if (rdev->irq.afmt[0]) {
  3235. DRM_DEBUG("r600_irq_set: hdmi 0\n");
  3236. hdmi0 |= HDMI0_AZ_FORMAT_WTRIG_MASK;
  3237. }
  3238. if (rdev->irq.afmt[1]) {
  3239. DRM_DEBUG("r600_irq_set: hdmi 0\n");
  3240. hdmi1 |= HDMI0_AZ_FORMAT_WTRIG_MASK;
  3241. }
  3242. WREG32(CP_INT_CNTL, cp_int_cntl);
  3243. WREG32(DMA_CNTL, dma_cntl);
  3244. WREG32(DxMODE_INT_MASK, mode_int);
  3245. WREG32(D1GRPH_INTERRUPT_CONTROL, d1grph);
  3246. WREG32(D2GRPH_INTERRUPT_CONTROL, d2grph);
  3247. WREG32(GRBM_INT_CNTL, grbm_int_cntl);
  3248. if (ASIC_IS_DCE3(rdev)) {
  3249. WREG32(DC_HPD1_INT_CONTROL, hpd1);
  3250. WREG32(DC_HPD2_INT_CONTROL, hpd2);
  3251. WREG32(DC_HPD3_INT_CONTROL, hpd3);
  3252. WREG32(DC_HPD4_INT_CONTROL, hpd4);
  3253. if (ASIC_IS_DCE32(rdev)) {
  3254. WREG32(DC_HPD5_INT_CONTROL, hpd5);
  3255. WREG32(DC_HPD6_INT_CONTROL, hpd6);
  3256. WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, hdmi0);
  3257. WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, hdmi1);
  3258. } else {
  3259. WREG32(HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
  3260. WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, hdmi1);
  3261. }
  3262. } else {
  3263. WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
  3264. WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
  3265. WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, hpd3);
  3266. WREG32(HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
  3267. WREG32(HDMI1_AUDIO_PACKET_CONTROL, hdmi1);
  3268. }
  3269. return 0;
  3270. }
  3271. static void r600_irq_ack(struct radeon_device *rdev)
  3272. {
  3273. u32 tmp;
  3274. if (ASIC_IS_DCE3(rdev)) {
  3275. rdev->irq.stat_regs.r600.disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS);
  3276. rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE);
  3277. rdev->irq.stat_regs.r600.disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2);
  3278. if (ASIC_IS_DCE32(rdev)) {
  3279. rdev->irq.stat_regs.r600.hdmi0_status = RREG32(AFMT_STATUS + DCE3_HDMI_OFFSET0);
  3280. rdev->irq.stat_regs.r600.hdmi1_status = RREG32(AFMT_STATUS + DCE3_HDMI_OFFSET1);
  3281. } else {
  3282. rdev->irq.stat_regs.r600.hdmi0_status = RREG32(HDMI0_STATUS);
  3283. rdev->irq.stat_regs.r600.hdmi1_status = RREG32(DCE3_HDMI1_STATUS);
  3284. }
  3285. } else {
  3286. rdev->irq.stat_regs.r600.disp_int = RREG32(DISP_INTERRUPT_STATUS);
  3287. rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
  3288. rdev->irq.stat_regs.r600.disp_int_cont2 = 0;
  3289. rdev->irq.stat_regs.r600.hdmi0_status = RREG32(HDMI0_STATUS);
  3290. rdev->irq.stat_regs.r600.hdmi1_status = RREG32(HDMI1_STATUS);
  3291. }
  3292. rdev->irq.stat_regs.r600.d1grph_int = RREG32(D1GRPH_INTERRUPT_STATUS);
  3293. rdev->irq.stat_regs.r600.d2grph_int = RREG32(D2GRPH_INTERRUPT_STATUS);
  3294. if (rdev->irq.stat_regs.r600.d1grph_int & DxGRPH_PFLIP_INT_OCCURRED)
  3295. WREG32(D1GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR);
  3296. if (rdev->irq.stat_regs.r600.d2grph_int & DxGRPH_PFLIP_INT_OCCURRED)
  3297. WREG32(D2GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR);
  3298. if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT)
  3299. WREG32(D1MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
  3300. if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT)
  3301. WREG32(D1MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
  3302. if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT)
  3303. WREG32(D2MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
  3304. if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT)
  3305. WREG32(D2MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
  3306. if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) {
  3307. if (ASIC_IS_DCE3(rdev)) {
  3308. tmp = RREG32(DC_HPD1_INT_CONTROL);
  3309. tmp |= DC_HPDx_INT_ACK;
  3310. WREG32(DC_HPD1_INT_CONTROL, tmp);
  3311. } else {
  3312. tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
  3313. tmp |= DC_HPDx_INT_ACK;
  3314. WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
  3315. }
  3316. }
  3317. if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) {
  3318. if (ASIC_IS_DCE3(rdev)) {
  3319. tmp = RREG32(DC_HPD2_INT_CONTROL);
  3320. tmp |= DC_HPDx_INT_ACK;
  3321. WREG32(DC_HPD2_INT_CONTROL, tmp);
  3322. } else {
  3323. tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
  3324. tmp |= DC_HPDx_INT_ACK;
  3325. WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
  3326. }
  3327. }
  3328. if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) {
  3329. if (ASIC_IS_DCE3(rdev)) {
  3330. tmp = RREG32(DC_HPD3_INT_CONTROL);
  3331. tmp |= DC_HPDx_INT_ACK;
  3332. WREG32(DC_HPD3_INT_CONTROL, tmp);
  3333. } else {
  3334. tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
  3335. tmp |= DC_HPDx_INT_ACK;
  3336. WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
  3337. }
  3338. }
  3339. if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) {
  3340. tmp = RREG32(DC_HPD4_INT_CONTROL);
  3341. tmp |= DC_HPDx_INT_ACK;
  3342. WREG32(DC_HPD4_INT_CONTROL, tmp);
  3343. }
  3344. if (ASIC_IS_DCE32(rdev)) {
  3345. if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) {
  3346. tmp = RREG32(DC_HPD5_INT_CONTROL);
  3347. tmp |= DC_HPDx_INT_ACK;
  3348. WREG32(DC_HPD5_INT_CONTROL, tmp);
  3349. }
  3350. if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
  3351. tmp = RREG32(DC_HPD5_INT_CONTROL);
  3352. tmp |= DC_HPDx_INT_ACK;
  3353. WREG32(DC_HPD6_INT_CONTROL, tmp);
  3354. }
  3355. if (rdev->irq.stat_regs.r600.hdmi0_status & AFMT_AZ_FORMAT_WTRIG) {
  3356. tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0);
  3357. tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
  3358. WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, tmp);
  3359. }
  3360. if (rdev->irq.stat_regs.r600.hdmi1_status & AFMT_AZ_FORMAT_WTRIG) {
  3361. tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1);
  3362. tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
  3363. WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, tmp);
  3364. }
  3365. } else {
  3366. if (rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG) {
  3367. tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL);
  3368. tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK;
  3369. WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp);
  3370. }
  3371. if (rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG) {
  3372. if (ASIC_IS_DCE3(rdev)) {
  3373. tmp = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL);
  3374. tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK;
  3375. WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, tmp);
  3376. } else {
  3377. tmp = RREG32(HDMI1_AUDIO_PACKET_CONTROL);
  3378. tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK;
  3379. WREG32(HDMI1_AUDIO_PACKET_CONTROL, tmp);
  3380. }
  3381. }
  3382. }
  3383. }
  3384. void r600_irq_disable(struct radeon_device *rdev)
  3385. {
  3386. r600_disable_interrupts(rdev);
  3387. /* Wait and acknowledge irq */
  3388. mdelay(1);
  3389. r600_irq_ack(rdev);
  3390. r600_disable_interrupt_state(rdev);
  3391. }
  3392. static u32 r600_get_ih_wptr(struct radeon_device *rdev)
  3393. {
  3394. u32 wptr, tmp;
  3395. if (rdev->wb.enabled)
  3396. wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]);
  3397. else
  3398. wptr = RREG32(IH_RB_WPTR);
  3399. if (wptr & RB_OVERFLOW) {
  3400. /* When a ring buffer overflow happen start parsing interrupt
  3401. * from the last not overwritten vector (wptr + 16). Hopefully
  3402. * this should allow us to catchup.
  3403. */
  3404. dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
  3405. wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
  3406. rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
  3407. tmp = RREG32(IH_RB_CNTL);
  3408. tmp |= IH_WPTR_OVERFLOW_CLEAR;
  3409. WREG32(IH_RB_CNTL, tmp);
  3410. }
  3411. return (wptr & rdev->ih.ptr_mask);
  3412. }
  3413. /* r600 IV Ring
  3414. * Each IV ring entry is 128 bits:
  3415. * [7:0] - interrupt source id
  3416. * [31:8] - reserved
  3417. * [59:32] - interrupt source data
  3418. * [127:60] - reserved
  3419. *
  3420. * The basic interrupt vector entries
  3421. * are decoded as follows:
  3422. * src_id src_data description
  3423. * 1 0 D1 Vblank
  3424. * 1 1 D1 Vline
  3425. * 5 0 D2 Vblank
  3426. * 5 1 D2 Vline
  3427. * 19 0 FP Hot plug detection A
  3428. * 19 1 FP Hot plug detection B
  3429. * 19 2 DAC A auto-detection
  3430. * 19 3 DAC B auto-detection
  3431. * 21 4 HDMI block A
  3432. * 21 5 HDMI block B
  3433. * 176 - CP_INT RB
  3434. * 177 - CP_INT IB1
  3435. * 178 - CP_INT IB2
  3436. * 181 - EOP Interrupt
  3437. * 233 - GUI Idle
  3438. *
  3439. * Note, these are based on r600 and may need to be
  3440. * adjusted or added to on newer asics
  3441. */
  3442. int r600_irq_process(struct radeon_device *rdev)
  3443. {
  3444. u32 wptr;
  3445. u32 rptr;
  3446. u32 src_id, src_data;
  3447. u32 ring_index;
  3448. bool queue_hotplug = false;
  3449. bool queue_hdmi = false;
  3450. if (!rdev->ih.enabled || rdev->shutdown)
  3451. return IRQ_NONE;
  3452. /* No MSIs, need a dummy read to flush PCI DMAs */
  3453. if (!rdev->msi_enabled)
  3454. RREG32(IH_RB_WPTR);
  3455. wptr = r600_get_ih_wptr(rdev);
  3456. restart_ih:
  3457. /* is somebody else already processing irqs? */
  3458. if (atomic_xchg(&rdev->ih.lock, 1))
  3459. return IRQ_NONE;
  3460. rptr = rdev->ih.rptr;
  3461. DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
  3462. /* Order reading of wptr vs. reading of IH ring data */
  3463. rmb();
  3464. /* display interrupts */
  3465. r600_irq_ack(rdev);
  3466. while (rptr != wptr) {
  3467. /* wptr/rptr are in bytes! */
  3468. ring_index = rptr / 4;
  3469. src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
  3470. src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
  3471. switch (src_id) {
  3472. case 1: /* D1 vblank/vline */
  3473. switch (src_data) {
  3474. case 0: /* D1 vblank */
  3475. if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT) {
  3476. if (rdev->irq.crtc_vblank_int[0]) {
  3477. drm_handle_vblank(rdev->ddev, 0);
  3478. rdev->pm.vblank_sync = true;
  3479. wake_up(&rdev->irq.vblank_queue);
  3480. }
  3481. if (atomic_read(&rdev->irq.pflip[0]))
  3482. radeon_crtc_handle_flip(rdev, 0);
  3483. rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
  3484. DRM_DEBUG("IH: D1 vblank\n");
  3485. }
  3486. break;
  3487. case 1: /* D1 vline */
  3488. if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT) {
  3489. rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VLINE_INTERRUPT;
  3490. DRM_DEBUG("IH: D1 vline\n");
  3491. }
  3492. break;
  3493. default:
  3494. DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
  3495. break;
  3496. }
  3497. break;
  3498. case 5: /* D2 vblank/vline */
  3499. switch (src_data) {
  3500. case 0: /* D2 vblank */
  3501. if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT) {
  3502. if (rdev->irq.crtc_vblank_int[1]) {
  3503. drm_handle_vblank(rdev->ddev, 1);
  3504. rdev->pm.vblank_sync = true;
  3505. wake_up(&rdev->irq.vblank_queue);
  3506. }
  3507. if (atomic_read(&rdev->irq.pflip[1]))
  3508. radeon_crtc_handle_flip(rdev, 1);
  3509. rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT;
  3510. DRM_DEBUG("IH: D2 vblank\n");
  3511. }
  3512. break;
  3513. case 1: /* D1 vline */
  3514. if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT) {
  3515. rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VLINE_INTERRUPT;
  3516. DRM_DEBUG("IH: D2 vline\n");
  3517. }
  3518. break;
  3519. default:
  3520. DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
  3521. break;
  3522. }
  3523. break;
  3524. case 19: /* HPD/DAC hotplug */
  3525. switch (src_data) {
  3526. case 0:
  3527. if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) {
  3528. rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD1_INTERRUPT;
  3529. queue_hotplug = true;
  3530. DRM_DEBUG("IH: HPD1\n");
  3531. }
  3532. break;
  3533. case 1:
  3534. if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) {
  3535. rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD2_INTERRUPT;
  3536. queue_hotplug = true;
  3537. DRM_DEBUG("IH: HPD2\n");
  3538. }
  3539. break;
  3540. case 4:
  3541. if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) {
  3542. rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD3_INTERRUPT;
  3543. queue_hotplug = true;
  3544. DRM_DEBUG("IH: HPD3\n");
  3545. }
  3546. break;
  3547. case 5:
  3548. if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) {
  3549. rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD4_INTERRUPT;
  3550. queue_hotplug = true;
  3551. DRM_DEBUG("IH: HPD4\n");
  3552. }
  3553. break;
  3554. case 10:
  3555. if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) {
  3556. rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
  3557. queue_hotplug = true;
  3558. DRM_DEBUG("IH: HPD5\n");
  3559. }
  3560. break;
  3561. case 12:
  3562. if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
  3563. rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
  3564. queue_hotplug = true;
  3565. DRM_DEBUG("IH: HPD6\n");
  3566. }
  3567. break;
  3568. default:
  3569. DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
  3570. break;
  3571. }
  3572. break;
  3573. case 21: /* hdmi */
  3574. switch (src_data) {
  3575. case 4:
  3576. if (rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG) {
  3577. rdev->irq.stat_regs.r600.hdmi0_status &= ~HDMI0_AZ_FORMAT_WTRIG;
  3578. queue_hdmi = true;
  3579. DRM_DEBUG("IH: HDMI0\n");
  3580. }
  3581. break;
  3582. case 5:
  3583. if (rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG) {
  3584. rdev->irq.stat_regs.r600.hdmi1_status &= ~HDMI0_AZ_FORMAT_WTRIG;
  3585. queue_hdmi = true;
  3586. DRM_DEBUG("IH: HDMI1\n");
  3587. }
  3588. break;
  3589. default:
  3590. DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
  3591. break;
  3592. }
  3593. break;
  3594. case 176: /* CP_INT in ring buffer */
  3595. case 177: /* CP_INT in IB1 */
  3596. case 178: /* CP_INT in IB2 */
  3597. DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
  3598. radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
  3599. break;
  3600. case 181: /* CP EOP event */
  3601. DRM_DEBUG("IH: CP EOP\n");
  3602. radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
  3603. break;
  3604. case 224: /* DMA trap event */
  3605. DRM_DEBUG("IH: DMA trap\n");
  3606. radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
  3607. break;
  3608. case 233: /* GUI IDLE */
  3609. DRM_DEBUG("IH: GUI idle\n");
  3610. break;
  3611. default:
  3612. DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
  3613. break;
  3614. }
  3615. /* wptr/rptr are in bytes! */
  3616. rptr += 16;
  3617. rptr &= rdev->ih.ptr_mask;
  3618. }
  3619. if (queue_hotplug)
  3620. schedule_work(&rdev->hotplug_work);
  3621. if (queue_hdmi)
  3622. schedule_work(&rdev->audio_work);
  3623. rdev->ih.rptr = rptr;
  3624. WREG32(IH_RB_RPTR, rdev->ih.rptr);
  3625. atomic_set(&rdev->ih.lock, 0);
  3626. /* make sure wptr hasn't changed while processing */
  3627. wptr = r600_get_ih_wptr(rdev);
  3628. if (wptr != rptr)
  3629. goto restart_ih;
  3630. return IRQ_HANDLED;
  3631. }
  3632. /*
  3633. * Debugfs info
  3634. */
  3635. #if defined(CONFIG_DEBUG_FS)
  3636. static int r600_debugfs_mc_info(struct seq_file *m, void *data)
  3637. {
  3638. struct drm_info_node *node = (struct drm_info_node *) m->private;
  3639. struct drm_device *dev = node->minor->dev;
  3640. struct radeon_device *rdev = dev->dev_private;
  3641. DREG32_SYS(m, rdev, R_000E50_SRBM_STATUS);
  3642. DREG32_SYS(m, rdev, VM_L2_STATUS);
  3643. return 0;
  3644. }
  3645. static struct drm_info_list r600_mc_info_list[] = {
  3646. {"r600_mc_info", r600_debugfs_mc_info, 0, NULL},
  3647. };
  3648. #endif
  3649. int r600_debugfs_mc_info_init(struct radeon_device *rdev)
  3650. {
  3651. #if defined(CONFIG_DEBUG_FS)
  3652. return radeon_debugfs_add_files(rdev, r600_mc_info_list, ARRAY_SIZE(r600_mc_info_list));
  3653. #else
  3654. return 0;
  3655. #endif
  3656. }
  3657. /**
  3658. * r600_ioctl_wait_idle - flush host path cache on wait idle ioctl
  3659. * rdev: radeon device structure
  3660. * bo: buffer object struct which userspace is waiting for idle
  3661. *
  3662. * Some R6XX/R7XX doesn't seems to take into account HDP flush performed
  3663. * through ring buffer, this leads to corruption in rendering, see
  3664. * http://bugzilla.kernel.org/show_bug.cgi?id=15186 to avoid this we
  3665. * directly perform HDP flush by writing register through MMIO.
  3666. */
  3667. void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
  3668. {
  3669. /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
  3670. * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL.
  3671. * This seems to cause problems on some AGP cards. Just use the old
  3672. * method for them.
  3673. */
  3674. if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
  3675. rdev->vram_scratch.ptr && !(rdev->flags & RADEON_IS_AGP)) {
  3676. void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
  3677. u32 tmp;
  3678. WREG32(HDP_DEBUG1, 0);
  3679. tmp = readl((void __iomem *)ptr);
  3680. } else
  3681. WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
  3682. }
  3683. void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes)
  3684. {
  3685. u32 link_width_cntl, mask, target_reg;
  3686. if (rdev->flags & RADEON_IS_IGP)
  3687. return;
  3688. if (!(rdev->flags & RADEON_IS_PCIE))
  3689. return;
  3690. /* x2 cards have a special sequence */
  3691. if (ASIC_IS_X2(rdev))
  3692. return;
  3693. /* FIXME wait for idle */
  3694. switch (lanes) {
  3695. case 0:
  3696. mask = RADEON_PCIE_LC_LINK_WIDTH_X0;
  3697. break;
  3698. case 1:
  3699. mask = RADEON_PCIE_LC_LINK_WIDTH_X1;
  3700. break;
  3701. case 2:
  3702. mask = RADEON_PCIE_LC_LINK_WIDTH_X2;
  3703. break;
  3704. case 4:
  3705. mask = RADEON_PCIE_LC_LINK_WIDTH_X4;
  3706. break;
  3707. case 8:
  3708. mask = RADEON_PCIE_LC_LINK_WIDTH_X8;
  3709. break;
  3710. case 12:
  3711. mask = RADEON_PCIE_LC_LINK_WIDTH_X12;
  3712. break;
  3713. case 16:
  3714. default:
  3715. mask = RADEON_PCIE_LC_LINK_WIDTH_X16;
  3716. break;
  3717. }
  3718. link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
  3719. if ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) ==
  3720. (mask << RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT))
  3721. return;
  3722. if (link_width_cntl & R600_PCIE_LC_UPCONFIGURE_DIS)
  3723. return;
  3724. link_width_cntl &= ~(RADEON_PCIE_LC_LINK_WIDTH_MASK |
  3725. RADEON_PCIE_LC_RECONFIG_NOW |
  3726. R600_PCIE_LC_RENEGOTIATE_EN |
  3727. R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE);
  3728. link_width_cntl |= mask;
  3729. WREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
  3730. /* some northbridges can renegotiate the link rather than requiring
  3731. * a complete re-config.
  3732. * e.g., AMD 780/790 northbridges (pci ids: 0x5956, 0x5957, 0x5958, etc.)
  3733. */
  3734. if (link_width_cntl & R600_PCIE_LC_RENEGOTIATION_SUPPORT)
  3735. link_width_cntl |= R600_PCIE_LC_RENEGOTIATE_EN | R600_PCIE_LC_UPCONFIGURE_SUPPORT;
  3736. else
  3737. link_width_cntl |= R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE;
  3738. WREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL, (link_width_cntl |
  3739. RADEON_PCIE_LC_RECONFIG_NOW));
  3740. if (rdev->family >= CHIP_RV770)
  3741. target_reg = R700_TARGET_AND_CURRENT_PROFILE_INDEX;
  3742. else
  3743. target_reg = R600_TARGET_AND_CURRENT_PROFILE_INDEX;
  3744. /* wait for lane set to complete */
  3745. link_width_cntl = RREG32(target_reg);
  3746. while (link_width_cntl == 0xffffffff)
  3747. link_width_cntl = RREG32(target_reg);
  3748. }
  3749. int r600_get_pcie_lanes(struct radeon_device *rdev)
  3750. {
  3751. u32 link_width_cntl;
  3752. if (rdev->flags & RADEON_IS_IGP)
  3753. return 0;
  3754. if (!(rdev->flags & RADEON_IS_PCIE))
  3755. return 0;
  3756. /* x2 cards have a special sequence */
  3757. if (ASIC_IS_X2(rdev))
  3758. return 0;
  3759. /* FIXME wait for idle */
  3760. link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
  3761. switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) {
  3762. case RADEON_PCIE_LC_LINK_WIDTH_X0:
  3763. return 0;
  3764. case RADEON_PCIE_LC_LINK_WIDTH_X1:
  3765. return 1;
  3766. case RADEON_PCIE_LC_LINK_WIDTH_X2:
  3767. return 2;
  3768. case RADEON_PCIE_LC_LINK_WIDTH_X4:
  3769. return 4;
  3770. case RADEON_PCIE_LC_LINK_WIDTH_X8:
  3771. return 8;
  3772. case RADEON_PCIE_LC_LINK_WIDTH_X16:
  3773. default:
  3774. return 16;
  3775. }
  3776. }
  3777. static void r600_pcie_gen2_enable(struct radeon_device *rdev)
  3778. {
  3779. u32 link_width_cntl, lanes, speed_cntl, training_cntl, tmp;
  3780. u16 link_cntl2;
  3781. u32 mask;
  3782. int ret;
  3783. if (radeon_pcie_gen2 == 0)
  3784. return;
  3785. if (rdev->flags & RADEON_IS_IGP)
  3786. return;
  3787. if (!(rdev->flags & RADEON_IS_PCIE))
  3788. return;
  3789. /* x2 cards have a special sequence */
  3790. if (ASIC_IS_X2(rdev))
  3791. return;
  3792. /* only RV6xx+ chips are supported */
  3793. if (rdev->family <= CHIP_R600)
  3794. return;
  3795. ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
  3796. if (ret != 0)
  3797. return;
  3798. if (!(mask & DRM_PCIE_SPEED_50))
  3799. return;
  3800. speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
  3801. if (speed_cntl & LC_CURRENT_DATA_RATE) {
  3802. DRM_INFO("PCIE gen 2 link speeds already enabled\n");
  3803. return;
  3804. }
  3805. DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
  3806. /* 55 nm r6xx asics */
  3807. if ((rdev->family == CHIP_RV670) ||
  3808. (rdev->family == CHIP_RV620) ||
  3809. (rdev->family == CHIP_RV635)) {
  3810. /* advertise upconfig capability */
  3811. link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
  3812. link_width_cntl &= ~LC_UPCONFIGURE_DIS;
  3813. WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
  3814. link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
  3815. if (link_width_cntl & LC_RENEGOTIATION_SUPPORT) {
  3816. lanes = (link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT;
  3817. link_width_cntl &= ~(LC_LINK_WIDTH_MASK |
  3818. LC_RECONFIG_ARC_MISSING_ESCAPE);
  3819. link_width_cntl |= lanes | LC_RECONFIG_NOW | LC_RENEGOTIATE_EN;
  3820. WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
  3821. } else {
  3822. link_width_cntl |= LC_UPCONFIGURE_DIS;
  3823. WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
  3824. }
  3825. }
  3826. speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
  3827. if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
  3828. (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
  3829. /* 55 nm r6xx asics */
  3830. if ((rdev->family == CHIP_RV670) ||
  3831. (rdev->family == CHIP_RV620) ||
  3832. (rdev->family == CHIP_RV635)) {
  3833. WREG32(MM_CFGREGS_CNTL, 0x8);
  3834. link_cntl2 = RREG32(0x4088);
  3835. WREG32(MM_CFGREGS_CNTL, 0);
  3836. /* not supported yet */
  3837. if (link_cntl2 & SELECTABLE_DEEMPHASIS)
  3838. return;
  3839. }
  3840. speed_cntl &= ~LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK;
  3841. speed_cntl |= (0x3 << LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT);
  3842. speed_cntl &= ~LC_VOLTAGE_TIMER_SEL_MASK;
  3843. speed_cntl &= ~LC_FORCE_DIS_HW_SPEED_CHANGE;
  3844. speed_cntl |= LC_FORCE_EN_HW_SPEED_CHANGE;
  3845. WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
  3846. tmp = RREG32(0x541c);
  3847. WREG32(0x541c, tmp | 0x8);
  3848. WREG32(MM_CFGREGS_CNTL, MM_WR_TO_CFG_EN);
  3849. link_cntl2 = RREG16(0x4088);
  3850. link_cntl2 &= ~TARGET_LINK_SPEED_MASK;
  3851. link_cntl2 |= 0x2;
  3852. WREG16(0x4088, link_cntl2);
  3853. WREG32(MM_CFGREGS_CNTL, 0);
  3854. if ((rdev->family == CHIP_RV670) ||
  3855. (rdev->family == CHIP_RV620) ||
  3856. (rdev->family == CHIP_RV635)) {
  3857. training_cntl = RREG32_PCIE_P(PCIE_LC_TRAINING_CNTL);
  3858. training_cntl &= ~LC_POINT_7_PLUS_EN;
  3859. WREG32_PCIE_P(PCIE_LC_TRAINING_CNTL, training_cntl);
  3860. } else {
  3861. speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
  3862. speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
  3863. WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
  3864. }
  3865. speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
  3866. speed_cntl |= LC_GEN2_EN_STRAP;
  3867. WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
  3868. } else {
  3869. link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
  3870. /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
  3871. if (1)
  3872. link_width_cntl |= LC_UPCONFIGURE_DIS;
  3873. else
  3874. link_width_cntl &= ~LC_UPCONFIGURE_DIS;
  3875. WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
  3876. }
  3877. }
  3878. /**
  3879. * r600_get_gpu_clock - return GPU clock counter snapshot
  3880. *
  3881. * @rdev: radeon_device pointer
  3882. *
  3883. * Fetches a GPU clock counter snapshot (R6xx-cayman).
  3884. * Returns the 64 bit clock counter snapshot.
  3885. */
  3886. uint64_t r600_get_gpu_clock(struct radeon_device *rdev)
  3887. {
  3888. uint64_t clock;
  3889. mutex_lock(&rdev->gpu_clock_mutex);
  3890. WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1);
  3891. clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) |
  3892. ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
  3893. mutex_unlock(&rdev->gpu_clock_mutex);
  3894. return clock;
  3895. }