evergreen.c 112 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478
  1. /*
  2. * Copyright 2010 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. * Authors: Alex Deucher
  23. */
  24. #include <linux/firmware.h>
  25. #include <linux/platform_device.h>
  26. #include <linux/slab.h>
  27. #include "drmP.h"
  28. #include "radeon.h"
  29. #include "radeon_asic.h"
  30. #include "radeon_drm.h"
  31. #include "evergreend.h"
  32. #include "atom.h"
  33. #include "avivod.h"
  34. #include "evergreen_reg.h"
  35. #include "evergreen_blit_shaders.h"
  36. #define EVERGREEN_PFP_UCODE_SIZE 1120
  37. #define EVERGREEN_PM4_UCODE_SIZE 1376
  38. static void evergreen_gpu_init(struct radeon_device *rdev);
  39. void evergreen_fini(struct radeon_device *rdev);
  40. void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
  41. extern void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
  42. int ring, u32 cp_int_cntl);
  43. void evergreen_tiling_fields(unsigned tiling_flags, unsigned *bankw,
  44. unsigned *bankh, unsigned *mtaspect,
  45. unsigned *tile_split)
  46. {
  47. *bankw = (tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK;
  48. *bankh = (tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK;
  49. *mtaspect = (tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK;
  50. *tile_split = (tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK;
  51. switch (*bankw) {
  52. default:
  53. case 1: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_1; break;
  54. case 2: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_2; break;
  55. case 4: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_4; break;
  56. case 8: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_8; break;
  57. }
  58. switch (*bankh) {
  59. default:
  60. case 1: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_1; break;
  61. case 2: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_2; break;
  62. case 4: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_4; break;
  63. case 8: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_8; break;
  64. }
  65. switch (*mtaspect) {
  66. default:
  67. case 1: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_1; break;
  68. case 2: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_2; break;
  69. case 4: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_4; break;
  70. case 8: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_8; break;
  71. }
  72. }
  73. void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
  74. {
  75. u16 ctl, v;
  76. int err;
  77. err = pcie_capability_read_word(rdev->pdev, PCI_EXP_DEVCTL, &ctl);
  78. if (err)
  79. return;
  80. v = (ctl & PCI_EXP_DEVCTL_READRQ) >> 12;
  81. /* if bios or OS sets MAX_READ_REQUEST_SIZE to an invalid value, fix it
  82. * to avoid hangs or perfomance issues
  83. */
  84. if ((v == 0) || (v == 6) || (v == 7)) {
  85. ctl &= ~PCI_EXP_DEVCTL_READRQ;
  86. ctl |= (2 << 12);
  87. pcie_capability_write_word(rdev->pdev, PCI_EXP_DEVCTL, ctl);
  88. }
  89. }
  90. /**
  91. * dce4_wait_for_vblank - vblank wait asic callback.
  92. *
  93. * @rdev: radeon_device pointer
  94. * @crtc: crtc to wait for vblank on
  95. *
  96. * Wait for vblank on the requested crtc (evergreen+).
  97. */
  98. void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc)
  99. {
  100. struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc];
  101. int i;
  102. if (RREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset) & EVERGREEN_CRTC_MASTER_EN) {
  103. for (i = 0; i < rdev->usec_timeout; i++) {
  104. if (!(RREG32(EVERGREEN_CRTC_STATUS + radeon_crtc->crtc_offset) & EVERGREEN_CRTC_V_BLANK))
  105. break;
  106. udelay(1);
  107. }
  108. for (i = 0; i < rdev->usec_timeout; i++) {
  109. if (RREG32(EVERGREEN_CRTC_STATUS + radeon_crtc->crtc_offset) & EVERGREEN_CRTC_V_BLANK)
  110. break;
  111. udelay(1);
  112. }
  113. }
  114. }
  115. /**
  116. * radeon_irq_kms_pflip_irq_get - pre-pageflip callback.
  117. *
  118. * @rdev: radeon_device pointer
  119. * @crtc: crtc to prepare for pageflip on
  120. *
  121. * Pre-pageflip callback (evergreen+).
  122. * Enables the pageflip irq (vblank irq).
  123. */
  124. void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc)
  125. {
  126. /* enable the pflip int */
  127. radeon_irq_kms_pflip_irq_get(rdev, crtc);
  128. }
  129. /**
  130. * evergreen_post_page_flip - pos-pageflip callback.
  131. *
  132. * @rdev: radeon_device pointer
  133. * @crtc: crtc to cleanup pageflip on
  134. *
  135. * Post-pageflip callback (evergreen+).
  136. * Disables the pageflip irq (vblank irq).
  137. */
  138. void evergreen_post_page_flip(struct radeon_device *rdev, int crtc)
  139. {
  140. /* disable the pflip int */
  141. radeon_irq_kms_pflip_irq_put(rdev, crtc);
  142. }
  143. /**
  144. * evergreen_page_flip - pageflip callback.
  145. *
  146. * @rdev: radeon_device pointer
  147. * @crtc_id: crtc to cleanup pageflip on
  148. * @crtc_base: new address of the crtc (GPU MC address)
  149. *
  150. * Does the actual pageflip (evergreen+).
  151. * During vblank we take the crtc lock and wait for the update_pending
  152. * bit to go high, when it does, we release the lock, and allow the
  153. * double buffered update to take place.
  154. * Returns the current update pending status.
  155. */
  156. u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
  157. {
  158. struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
  159. u32 tmp = RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset);
  160. int i;
  161. /* Lock the graphics update lock */
  162. tmp |= EVERGREEN_GRPH_UPDATE_LOCK;
  163. WREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
  164. /* update the scanout addresses */
  165. WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
  166. upper_32_bits(crtc_base));
  167. WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
  168. (u32)crtc_base);
  169. WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
  170. upper_32_bits(crtc_base));
  171. WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
  172. (u32)crtc_base);
  173. /* Wait for update_pending to go high. */
  174. for (i = 0; i < rdev->usec_timeout; i++) {
  175. if (RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING)
  176. break;
  177. udelay(1);
  178. }
  179. DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
  180. /* Unlock the lock, so double-buffering can take place inside vblank */
  181. tmp &= ~EVERGREEN_GRPH_UPDATE_LOCK;
  182. WREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
  183. /* Return current update_pending status: */
  184. return RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING;
  185. }
  186. /* get temperature in millidegrees */
  187. int evergreen_get_temp(struct radeon_device *rdev)
  188. {
  189. u32 temp, toffset;
  190. int actual_temp = 0;
  191. if (rdev->family == CHIP_JUNIPER) {
  192. toffset = (RREG32(CG_THERMAL_CTRL) & TOFFSET_MASK) >>
  193. TOFFSET_SHIFT;
  194. temp = (RREG32(CG_TS0_STATUS) & TS0_ADC_DOUT_MASK) >>
  195. TS0_ADC_DOUT_SHIFT;
  196. if (toffset & 0x100)
  197. actual_temp = temp / 2 - (0x200 - toffset);
  198. else
  199. actual_temp = temp / 2 + toffset;
  200. actual_temp = actual_temp * 1000;
  201. } else {
  202. temp = (RREG32(CG_MULT_THERMAL_STATUS) & ASIC_T_MASK) >>
  203. ASIC_T_SHIFT;
  204. if (temp & 0x400)
  205. actual_temp = -256;
  206. else if (temp & 0x200)
  207. actual_temp = 255;
  208. else if (temp & 0x100) {
  209. actual_temp = temp & 0x1ff;
  210. actual_temp |= ~0x1ff;
  211. } else
  212. actual_temp = temp & 0xff;
  213. actual_temp = (actual_temp * 1000) / 2;
  214. }
  215. return actual_temp;
  216. }
  217. int sumo_get_temp(struct radeon_device *rdev)
  218. {
  219. u32 temp = RREG32(CG_THERMAL_STATUS) & 0xff;
  220. int actual_temp = temp - 49;
  221. return actual_temp * 1000;
  222. }
  223. /**
  224. * sumo_pm_init_profile - Initialize power profiles callback.
  225. *
  226. * @rdev: radeon_device pointer
  227. *
  228. * Initialize the power states used in profile mode
  229. * (sumo, trinity, SI).
  230. * Used for profile mode only.
  231. */
  232. void sumo_pm_init_profile(struct radeon_device *rdev)
  233. {
  234. int idx;
  235. /* default */
  236. rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
  237. rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
  238. rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
  239. rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
  240. /* low,mid sh/mh */
  241. if (rdev->flags & RADEON_IS_MOBILITY)
  242. idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
  243. else
  244. idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
  245. rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx;
  246. rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx;
  247. rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
  248. rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
  249. rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx;
  250. rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx;
  251. rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
  252. rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
  253. rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx;
  254. rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx;
  255. rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
  256. rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
  257. rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx;
  258. rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx;
  259. rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
  260. rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
  261. /* high sh/mh */
  262. idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
  263. rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx;
  264. rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx;
  265. rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
  266. rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx =
  267. rdev->pm.power_state[idx].num_clock_modes - 1;
  268. rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx;
  269. rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx;
  270. rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
  271. rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx =
  272. rdev->pm.power_state[idx].num_clock_modes - 1;
  273. }
  274. /**
  275. * evergreen_pm_misc - set additional pm hw parameters callback.
  276. *
  277. * @rdev: radeon_device pointer
  278. *
  279. * Set non-clock parameters associated with a power state
  280. * (voltage, etc.) (evergreen+).
  281. */
  282. void evergreen_pm_misc(struct radeon_device *rdev)
  283. {
  284. int req_ps_idx = rdev->pm.requested_power_state_index;
  285. int req_cm_idx = rdev->pm.requested_clock_mode_index;
  286. struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx];
  287. struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
  288. if (voltage->type == VOLTAGE_SW) {
  289. /* 0xff01 is a flag rather then an actual voltage */
  290. if (voltage->voltage == 0xff01)
  291. return;
  292. if (voltage->voltage && (voltage->voltage != rdev->pm.current_vddc)) {
  293. radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
  294. rdev->pm.current_vddc = voltage->voltage;
  295. DRM_DEBUG("Setting: vddc: %d\n", voltage->voltage);
  296. }
  297. /* 0xff01 is a flag rather then an actual voltage */
  298. if (voltage->vddci == 0xff01)
  299. return;
  300. if (voltage->vddci && (voltage->vddci != rdev->pm.current_vddci)) {
  301. radeon_atom_set_voltage(rdev, voltage->vddci, SET_VOLTAGE_TYPE_ASIC_VDDCI);
  302. rdev->pm.current_vddci = voltage->vddci;
  303. DRM_DEBUG("Setting: vddci: %d\n", voltage->vddci);
  304. }
  305. }
  306. }
  307. /**
  308. * evergreen_pm_prepare - pre-power state change callback.
  309. *
  310. * @rdev: radeon_device pointer
  311. *
  312. * Prepare for a power state change (evergreen+).
  313. */
  314. void evergreen_pm_prepare(struct radeon_device *rdev)
  315. {
  316. struct drm_device *ddev = rdev->ddev;
  317. struct drm_crtc *crtc;
  318. struct radeon_crtc *radeon_crtc;
  319. u32 tmp;
  320. /* disable any active CRTCs */
  321. list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
  322. radeon_crtc = to_radeon_crtc(crtc);
  323. if (radeon_crtc->enabled) {
  324. tmp = RREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset);
  325. tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
  326. WREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
  327. }
  328. }
  329. }
  330. /**
  331. * evergreen_pm_finish - post-power state change callback.
  332. *
  333. * @rdev: radeon_device pointer
  334. *
  335. * Clean up after a power state change (evergreen+).
  336. */
  337. void evergreen_pm_finish(struct radeon_device *rdev)
  338. {
  339. struct drm_device *ddev = rdev->ddev;
  340. struct drm_crtc *crtc;
  341. struct radeon_crtc *radeon_crtc;
  342. u32 tmp;
  343. /* enable any active CRTCs */
  344. list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
  345. radeon_crtc = to_radeon_crtc(crtc);
  346. if (radeon_crtc->enabled) {
  347. tmp = RREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset);
  348. tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
  349. WREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
  350. }
  351. }
  352. }
  353. /**
  354. * evergreen_hpd_sense - hpd sense callback.
  355. *
  356. * @rdev: radeon_device pointer
  357. * @hpd: hpd (hotplug detect) pin
  358. *
  359. * Checks if a digital monitor is connected (evergreen+).
  360. * Returns true if connected, false if not connected.
  361. */
  362. bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
  363. {
  364. bool connected = false;
  365. switch (hpd) {
  366. case RADEON_HPD_1:
  367. if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
  368. connected = true;
  369. break;
  370. case RADEON_HPD_2:
  371. if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
  372. connected = true;
  373. break;
  374. case RADEON_HPD_3:
  375. if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
  376. connected = true;
  377. break;
  378. case RADEON_HPD_4:
  379. if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
  380. connected = true;
  381. break;
  382. case RADEON_HPD_5:
  383. if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
  384. connected = true;
  385. break;
  386. case RADEON_HPD_6:
  387. if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
  388. connected = true;
  389. break;
  390. default:
  391. break;
  392. }
  393. return connected;
  394. }
  395. /**
  396. * evergreen_hpd_set_polarity - hpd set polarity callback.
  397. *
  398. * @rdev: radeon_device pointer
  399. * @hpd: hpd (hotplug detect) pin
  400. *
  401. * Set the polarity of the hpd pin (evergreen+).
  402. */
  403. void evergreen_hpd_set_polarity(struct radeon_device *rdev,
  404. enum radeon_hpd_id hpd)
  405. {
  406. u32 tmp;
  407. bool connected = evergreen_hpd_sense(rdev, hpd);
  408. switch (hpd) {
  409. case RADEON_HPD_1:
  410. tmp = RREG32(DC_HPD1_INT_CONTROL);
  411. if (connected)
  412. tmp &= ~DC_HPDx_INT_POLARITY;
  413. else
  414. tmp |= DC_HPDx_INT_POLARITY;
  415. WREG32(DC_HPD1_INT_CONTROL, tmp);
  416. break;
  417. case RADEON_HPD_2:
  418. tmp = RREG32(DC_HPD2_INT_CONTROL);
  419. if (connected)
  420. tmp &= ~DC_HPDx_INT_POLARITY;
  421. else
  422. tmp |= DC_HPDx_INT_POLARITY;
  423. WREG32(DC_HPD2_INT_CONTROL, tmp);
  424. break;
  425. case RADEON_HPD_3:
  426. tmp = RREG32(DC_HPD3_INT_CONTROL);
  427. if (connected)
  428. tmp &= ~DC_HPDx_INT_POLARITY;
  429. else
  430. tmp |= DC_HPDx_INT_POLARITY;
  431. WREG32(DC_HPD3_INT_CONTROL, tmp);
  432. break;
  433. case RADEON_HPD_4:
  434. tmp = RREG32(DC_HPD4_INT_CONTROL);
  435. if (connected)
  436. tmp &= ~DC_HPDx_INT_POLARITY;
  437. else
  438. tmp |= DC_HPDx_INT_POLARITY;
  439. WREG32(DC_HPD4_INT_CONTROL, tmp);
  440. break;
  441. case RADEON_HPD_5:
  442. tmp = RREG32(DC_HPD5_INT_CONTROL);
  443. if (connected)
  444. tmp &= ~DC_HPDx_INT_POLARITY;
  445. else
  446. tmp |= DC_HPDx_INT_POLARITY;
  447. WREG32(DC_HPD5_INT_CONTROL, tmp);
  448. break;
  449. case RADEON_HPD_6:
  450. tmp = RREG32(DC_HPD6_INT_CONTROL);
  451. if (connected)
  452. tmp &= ~DC_HPDx_INT_POLARITY;
  453. else
  454. tmp |= DC_HPDx_INT_POLARITY;
  455. WREG32(DC_HPD6_INT_CONTROL, tmp);
  456. break;
  457. default:
  458. break;
  459. }
  460. }
  461. /**
  462. * evergreen_hpd_init - hpd setup callback.
  463. *
  464. * @rdev: radeon_device pointer
  465. *
  466. * Setup the hpd pins used by the card (evergreen+).
  467. * Enable the pin, set the polarity, and enable the hpd interrupts.
  468. */
  469. void evergreen_hpd_init(struct radeon_device *rdev)
  470. {
  471. struct drm_device *dev = rdev->ddev;
  472. struct drm_connector *connector;
  473. unsigned enabled = 0;
  474. u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) |
  475. DC_HPDx_RX_INT_TIMER(0xfa) | DC_HPDx_EN;
  476. list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
  477. struct radeon_connector *radeon_connector = to_radeon_connector(connector);
  478. switch (radeon_connector->hpd.hpd) {
  479. case RADEON_HPD_1:
  480. WREG32(DC_HPD1_CONTROL, tmp);
  481. break;
  482. case RADEON_HPD_2:
  483. WREG32(DC_HPD2_CONTROL, tmp);
  484. break;
  485. case RADEON_HPD_3:
  486. WREG32(DC_HPD3_CONTROL, tmp);
  487. break;
  488. case RADEON_HPD_4:
  489. WREG32(DC_HPD4_CONTROL, tmp);
  490. break;
  491. case RADEON_HPD_5:
  492. WREG32(DC_HPD5_CONTROL, tmp);
  493. break;
  494. case RADEON_HPD_6:
  495. WREG32(DC_HPD6_CONTROL, tmp);
  496. break;
  497. default:
  498. break;
  499. }
  500. radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
  501. enabled |= 1 << radeon_connector->hpd.hpd;
  502. }
  503. radeon_irq_kms_enable_hpd(rdev, enabled);
  504. }
  505. /**
  506. * evergreen_hpd_fini - hpd tear down callback.
  507. *
  508. * @rdev: radeon_device pointer
  509. *
  510. * Tear down the hpd pins used by the card (evergreen+).
  511. * Disable the hpd interrupts.
  512. */
  513. void evergreen_hpd_fini(struct radeon_device *rdev)
  514. {
  515. struct drm_device *dev = rdev->ddev;
  516. struct drm_connector *connector;
  517. unsigned disabled = 0;
  518. list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
  519. struct radeon_connector *radeon_connector = to_radeon_connector(connector);
  520. switch (radeon_connector->hpd.hpd) {
  521. case RADEON_HPD_1:
  522. WREG32(DC_HPD1_CONTROL, 0);
  523. break;
  524. case RADEON_HPD_2:
  525. WREG32(DC_HPD2_CONTROL, 0);
  526. break;
  527. case RADEON_HPD_3:
  528. WREG32(DC_HPD3_CONTROL, 0);
  529. break;
  530. case RADEON_HPD_4:
  531. WREG32(DC_HPD4_CONTROL, 0);
  532. break;
  533. case RADEON_HPD_5:
  534. WREG32(DC_HPD5_CONTROL, 0);
  535. break;
  536. case RADEON_HPD_6:
  537. WREG32(DC_HPD6_CONTROL, 0);
  538. break;
  539. default:
  540. break;
  541. }
  542. disabled |= 1 << radeon_connector->hpd.hpd;
  543. }
  544. radeon_irq_kms_disable_hpd(rdev, disabled);
  545. }
  546. /* watermark setup */
  547. static u32 evergreen_line_buffer_adjust(struct radeon_device *rdev,
  548. struct radeon_crtc *radeon_crtc,
  549. struct drm_display_mode *mode,
  550. struct drm_display_mode *other_mode)
  551. {
  552. u32 tmp;
  553. /*
  554. * Line Buffer Setup
  555. * There are 3 line buffers, each one shared by 2 display controllers.
  556. * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between
  557. * the display controllers. The paritioning is done via one of four
  558. * preset allocations specified in bits 2:0:
  559. * first display controller
  560. * 0 - first half of lb (3840 * 2)
  561. * 1 - first 3/4 of lb (5760 * 2)
  562. * 2 - whole lb (7680 * 2), other crtc must be disabled
  563. * 3 - first 1/4 of lb (1920 * 2)
  564. * second display controller
  565. * 4 - second half of lb (3840 * 2)
  566. * 5 - second 3/4 of lb (5760 * 2)
  567. * 6 - whole lb (7680 * 2), other crtc must be disabled
  568. * 7 - last 1/4 of lb (1920 * 2)
  569. */
  570. /* this can get tricky if we have two large displays on a paired group
  571. * of crtcs. Ideally for multiple large displays we'd assign them to
  572. * non-linked crtcs for maximum line buffer allocation.
  573. */
  574. if (radeon_crtc->base.enabled && mode) {
  575. if (other_mode)
  576. tmp = 0; /* 1/2 */
  577. else
  578. tmp = 2; /* whole */
  579. } else
  580. tmp = 0;
  581. /* second controller of the pair uses second half of the lb */
  582. if (radeon_crtc->crtc_id % 2)
  583. tmp += 4;
  584. WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset, tmp);
  585. if (radeon_crtc->base.enabled && mode) {
  586. switch (tmp) {
  587. case 0:
  588. case 4:
  589. default:
  590. if (ASIC_IS_DCE5(rdev))
  591. return 4096 * 2;
  592. else
  593. return 3840 * 2;
  594. case 1:
  595. case 5:
  596. if (ASIC_IS_DCE5(rdev))
  597. return 6144 * 2;
  598. else
  599. return 5760 * 2;
  600. case 2:
  601. case 6:
  602. if (ASIC_IS_DCE5(rdev))
  603. return 8192 * 2;
  604. else
  605. return 7680 * 2;
  606. case 3:
  607. case 7:
  608. if (ASIC_IS_DCE5(rdev))
  609. return 2048 * 2;
  610. else
  611. return 1920 * 2;
  612. }
  613. }
  614. /* controller not enabled, so no lb used */
  615. return 0;
  616. }
  617. u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev)
  618. {
  619. u32 tmp = RREG32(MC_SHARED_CHMAP);
  620. switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
  621. case 0:
  622. default:
  623. return 1;
  624. case 1:
  625. return 2;
  626. case 2:
  627. return 4;
  628. case 3:
  629. return 8;
  630. }
  631. }
  632. struct evergreen_wm_params {
  633. u32 dram_channels; /* number of dram channels */
  634. u32 yclk; /* bandwidth per dram data pin in kHz */
  635. u32 sclk; /* engine clock in kHz */
  636. u32 disp_clk; /* display clock in kHz */
  637. u32 src_width; /* viewport width */
  638. u32 active_time; /* active display time in ns */
  639. u32 blank_time; /* blank time in ns */
  640. bool interlaced; /* mode is interlaced */
  641. fixed20_12 vsc; /* vertical scale ratio */
  642. u32 num_heads; /* number of active crtcs */
  643. u32 bytes_per_pixel; /* bytes per pixel display + overlay */
  644. u32 lb_size; /* line buffer allocated to pipe */
  645. u32 vtaps; /* vertical scaler taps */
  646. };
  647. static u32 evergreen_dram_bandwidth(struct evergreen_wm_params *wm)
  648. {
  649. /* Calculate DRAM Bandwidth and the part allocated to display. */
  650. fixed20_12 dram_efficiency; /* 0.7 */
  651. fixed20_12 yclk, dram_channels, bandwidth;
  652. fixed20_12 a;
  653. a.full = dfixed_const(1000);
  654. yclk.full = dfixed_const(wm->yclk);
  655. yclk.full = dfixed_div(yclk, a);
  656. dram_channels.full = dfixed_const(wm->dram_channels * 4);
  657. a.full = dfixed_const(10);
  658. dram_efficiency.full = dfixed_const(7);
  659. dram_efficiency.full = dfixed_div(dram_efficiency, a);
  660. bandwidth.full = dfixed_mul(dram_channels, yclk);
  661. bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
  662. return dfixed_trunc(bandwidth);
  663. }
  664. static u32 evergreen_dram_bandwidth_for_display(struct evergreen_wm_params *wm)
  665. {
  666. /* Calculate DRAM Bandwidth and the part allocated to display. */
  667. fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
  668. fixed20_12 yclk, dram_channels, bandwidth;
  669. fixed20_12 a;
  670. a.full = dfixed_const(1000);
  671. yclk.full = dfixed_const(wm->yclk);
  672. yclk.full = dfixed_div(yclk, a);
  673. dram_channels.full = dfixed_const(wm->dram_channels * 4);
  674. a.full = dfixed_const(10);
  675. disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
  676. disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
  677. bandwidth.full = dfixed_mul(dram_channels, yclk);
  678. bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
  679. return dfixed_trunc(bandwidth);
  680. }
  681. static u32 evergreen_data_return_bandwidth(struct evergreen_wm_params *wm)
  682. {
  683. /* Calculate the display Data return Bandwidth */
  684. fixed20_12 return_efficiency; /* 0.8 */
  685. fixed20_12 sclk, bandwidth;
  686. fixed20_12 a;
  687. a.full = dfixed_const(1000);
  688. sclk.full = dfixed_const(wm->sclk);
  689. sclk.full = dfixed_div(sclk, a);
  690. a.full = dfixed_const(10);
  691. return_efficiency.full = dfixed_const(8);
  692. return_efficiency.full = dfixed_div(return_efficiency, a);
  693. a.full = dfixed_const(32);
  694. bandwidth.full = dfixed_mul(a, sclk);
  695. bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
  696. return dfixed_trunc(bandwidth);
  697. }
  698. static u32 evergreen_dmif_request_bandwidth(struct evergreen_wm_params *wm)
  699. {
  700. /* Calculate the DMIF Request Bandwidth */
  701. fixed20_12 disp_clk_request_efficiency; /* 0.8 */
  702. fixed20_12 disp_clk, bandwidth;
  703. fixed20_12 a;
  704. a.full = dfixed_const(1000);
  705. disp_clk.full = dfixed_const(wm->disp_clk);
  706. disp_clk.full = dfixed_div(disp_clk, a);
  707. a.full = dfixed_const(10);
  708. disp_clk_request_efficiency.full = dfixed_const(8);
  709. disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
  710. a.full = dfixed_const(32);
  711. bandwidth.full = dfixed_mul(a, disp_clk);
  712. bandwidth.full = dfixed_mul(bandwidth, disp_clk_request_efficiency);
  713. return dfixed_trunc(bandwidth);
  714. }
  715. static u32 evergreen_available_bandwidth(struct evergreen_wm_params *wm)
  716. {
  717. /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
  718. u32 dram_bandwidth = evergreen_dram_bandwidth(wm);
  719. u32 data_return_bandwidth = evergreen_data_return_bandwidth(wm);
  720. u32 dmif_req_bandwidth = evergreen_dmif_request_bandwidth(wm);
  721. return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
  722. }
  723. static u32 evergreen_average_bandwidth(struct evergreen_wm_params *wm)
  724. {
  725. /* Calculate the display mode Average Bandwidth
  726. * DisplayMode should contain the source and destination dimensions,
  727. * timing, etc.
  728. */
  729. fixed20_12 bpp;
  730. fixed20_12 line_time;
  731. fixed20_12 src_width;
  732. fixed20_12 bandwidth;
  733. fixed20_12 a;
  734. a.full = dfixed_const(1000);
  735. line_time.full = dfixed_const(wm->active_time + wm->blank_time);
  736. line_time.full = dfixed_div(line_time, a);
  737. bpp.full = dfixed_const(wm->bytes_per_pixel);
  738. src_width.full = dfixed_const(wm->src_width);
  739. bandwidth.full = dfixed_mul(src_width, bpp);
  740. bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
  741. bandwidth.full = dfixed_div(bandwidth, line_time);
  742. return dfixed_trunc(bandwidth);
  743. }
  744. static u32 evergreen_latency_watermark(struct evergreen_wm_params *wm)
  745. {
  746. /* First calcualte the latency in ns */
  747. u32 mc_latency = 2000; /* 2000 ns. */
  748. u32 available_bandwidth = evergreen_available_bandwidth(wm);
  749. u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
  750. u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
  751. u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
  752. u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
  753. (wm->num_heads * cursor_line_pair_return_time);
  754. u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
  755. u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
  756. fixed20_12 a, b, c;
  757. if (wm->num_heads == 0)
  758. return 0;
  759. a.full = dfixed_const(2);
  760. b.full = dfixed_const(1);
  761. if ((wm->vsc.full > a.full) ||
  762. ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
  763. (wm->vtaps >= 5) ||
  764. ((wm->vsc.full >= a.full) && wm->interlaced))
  765. max_src_lines_per_dst_line = 4;
  766. else
  767. max_src_lines_per_dst_line = 2;
  768. a.full = dfixed_const(available_bandwidth);
  769. b.full = dfixed_const(wm->num_heads);
  770. a.full = dfixed_div(a, b);
  771. b.full = dfixed_const(1000);
  772. c.full = dfixed_const(wm->disp_clk);
  773. b.full = dfixed_div(c, b);
  774. c.full = dfixed_const(wm->bytes_per_pixel);
  775. b.full = dfixed_mul(b, c);
  776. lb_fill_bw = min(dfixed_trunc(a), dfixed_trunc(b));
  777. a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
  778. b.full = dfixed_const(1000);
  779. c.full = dfixed_const(lb_fill_bw);
  780. b.full = dfixed_div(c, b);
  781. a.full = dfixed_div(a, b);
  782. line_fill_time = dfixed_trunc(a);
  783. if (line_fill_time < wm->active_time)
  784. return latency;
  785. else
  786. return latency + (line_fill_time - wm->active_time);
  787. }
  788. static bool evergreen_average_bandwidth_vs_dram_bandwidth_for_display(struct evergreen_wm_params *wm)
  789. {
  790. if (evergreen_average_bandwidth(wm) <=
  791. (evergreen_dram_bandwidth_for_display(wm) / wm->num_heads))
  792. return true;
  793. else
  794. return false;
  795. };
  796. static bool evergreen_average_bandwidth_vs_available_bandwidth(struct evergreen_wm_params *wm)
  797. {
  798. if (evergreen_average_bandwidth(wm) <=
  799. (evergreen_available_bandwidth(wm) / wm->num_heads))
  800. return true;
  801. else
  802. return false;
  803. };
  804. static bool evergreen_check_latency_hiding(struct evergreen_wm_params *wm)
  805. {
  806. u32 lb_partitions = wm->lb_size / wm->src_width;
  807. u32 line_time = wm->active_time + wm->blank_time;
  808. u32 latency_tolerant_lines;
  809. u32 latency_hiding;
  810. fixed20_12 a;
  811. a.full = dfixed_const(1);
  812. if (wm->vsc.full > a.full)
  813. latency_tolerant_lines = 1;
  814. else {
  815. if (lb_partitions <= (wm->vtaps + 1))
  816. latency_tolerant_lines = 1;
  817. else
  818. latency_tolerant_lines = 2;
  819. }
  820. latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
  821. if (evergreen_latency_watermark(wm) <= latency_hiding)
  822. return true;
  823. else
  824. return false;
  825. }
  826. static void evergreen_program_watermarks(struct radeon_device *rdev,
  827. struct radeon_crtc *radeon_crtc,
  828. u32 lb_size, u32 num_heads)
  829. {
  830. struct drm_display_mode *mode = &radeon_crtc->base.mode;
  831. struct evergreen_wm_params wm;
  832. u32 pixel_period;
  833. u32 line_time = 0;
  834. u32 latency_watermark_a = 0, latency_watermark_b = 0;
  835. u32 priority_a_mark = 0, priority_b_mark = 0;
  836. u32 priority_a_cnt = PRIORITY_OFF;
  837. u32 priority_b_cnt = PRIORITY_OFF;
  838. u32 pipe_offset = radeon_crtc->crtc_id * 16;
  839. u32 tmp, arb_control3;
  840. fixed20_12 a, b, c;
  841. if (radeon_crtc->base.enabled && num_heads && mode) {
  842. pixel_period = 1000000 / (u32)mode->clock;
  843. line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
  844. priority_a_cnt = 0;
  845. priority_b_cnt = 0;
  846. wm.yclk = rdev->pm.current_mclk * 10;
  847. wm.sclk = rdev->pm.current_sclk * 10;
  848. wm.disp_clk = mode->clock;
  849. wm.src_width = mode->crtc_hdisplay;
  850. wm.active_time = mode->crtc_hdisplay * pixel_period;
  851. wm.blank_time = line_time - wm.active_time;
  852. wm.interlaced = false;
  853. if (mode->flags & DRM_MODE_FLAG_INTERLACE)
  854. wm.interlaced = true;
  855. wm.vsc = radeon_crtc->vsc;
  856. wm.vtaps = 1;
  857. if (radeon_crtc->rmx_type != RMX_OFF)
  858. wm.vtaps = 2;
  859. wm.bytes_per_pixel = 4; /* XXX: get this from fb config */
  860. wm.lb_size = lb_size;
  861. wm.dram_channels = evergreen_get_number_of_dram_channels(rdev);
  862. wm.num_heads = num_heads;
  863. /* set for high clocks */
  864. latency_watermark_a = min(evergreen_latency_watermark(&wm), (u32)65535);
  865. /* set for low clocks */
  866. /* wm.yclk = low clk; wm.sclk = low clk */
  867. latency_watermark_b = min(evergreen_latency_watermark(&wm), (u32)65535);
  868. /* possibly force display priority to high */
  869. /* should really do this at mode validation time... */
  870. if (!evergreen_average_bandwidth_vs_dram_bandwidth_for_display(&wm) ||
  871. !evergreen_average_bandwidth_vs_available_bandwidth(&wm) ||
  872. !evergreen_check_latency_hiding(&wm) ||
  873. (rdev->disp_priority == 2)) {
  874. DRM_DEBUG_KMS("force priority to high\n");
  875. priority_a_cnt |= PRIORITY_ALWAYS_ON;
  876. priority_b_cnt |= PRIORITY_ALWAYS_ON;
  877. }
  878. a.full = dfixed_const(1000);
  879. b.full = dfixed_const(mode->clock);
  880. b.full = dfixed_div(b, a);
  881. c.full = dfixed_const(latency_watermark_a);
  882. c.full = dfixed_mul(c, b);
  883. c.full = dfixed_mul(c, radeon_crtc->hsc);
  884. c.full = dfixed_div(c, a);
  885. a.full = dfixed_const(16);
  886. c.full = dfixed_div(c, a);
  887. priority_a_mark = dfixed_trunc(c);
  888. priority_a_cnt |= priority_a_mark & PRIORITY_MARK_MASK;
  889. a.full = dfixed_const(1000);
  890. b.full = dfixed_const(mode->clock);
  891. b.full = dfixed_div(b, a);
  892. c.full = dfixed_const(latency_watermark_b);
  893. c.full = dfixed_mul(c, b);
  894. c.full = dfixed_mul(c, radeon_crtc->hsc);
  895. c.full = dfixed_div(c, a);
  896. a.full = dfixed_const(16);
  897. c.full = dfixed_div(c, a);
  898. priority_b_mark = dfixed_trunc(c);
  899. priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
  900. }
  901. /* select wm A */
  902. arb_control3 = RREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset);
  903. tmp = arb_control3;
  904. tmp &= ~LATENCY_WATERMARK_MASK(3);
  905. tmp |= LATENCY_WATERMARK_MASK(1);
  906. WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, tmp);
  907. WREG32(PIPE0_LATENCY_CONTROL + pipe_offset,
  908. (LATENCY_LOW_WATERMARK(latency_watermark_a) |
  909. LATENCY_HIGH_WATERMARK(line_time)));
  910. /* select wm B */
  911. tmp = RREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset);
  912. tmp &= ~LATENCY_WATERMARK_MASK(3);
  913. tmp |= LATENCY_WATERMARK_MASK(2);
  914. WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, tmp);
  915. WREG32(PIPE0_LATENCY_CONTROL + pipe_offset,
  916. (LATENCY_LOW_WATERMARK(latency_watermark_b) |
  917. LATENCY_HIGH_WATERMARK(line_time)));
  918. /* restore original selection */
  919. WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, arb_control3);
  920. /* write the priority marks */
  921. WREG32(PRIORITY_A_CNT + radeon_crtc->crtc_offset, priority_a_cnt);
  922. WREG32(PRIORITY_B_CNT + radeon_crtc->crtc_offset, priority_b_cnt);
  923. }
  924. /**
  925. * evergreen_bandwidth_update - update display watermarks callback.
  926. *
  927. * @rdev: radeon_device pointer
  928. *
  929. * Update the display watermarks based on the requested mode(s)
  930. * (evergreen+).
  931. */
  932. void evergreen_bandwidth_update(struct radeon_device *rdev)
  933. {
  934. struct drm_display_mode *mode0 = NULL;
  935. struct drm_display_mode *mode1 = NULL;
  936. u32 num_heads = 0, lb_size;
  937. int i;
  938. radeon_update_display_priority(rdev);
  939. for (i = 0; i < rdev->num_crtc; i++) {
  940. if (rdev->mode_info.crtcs[i]->base.enabled)
  941. num_heads++;
  942. }
  943. for (i = 0; i < rdev->num_crtc; i += 2) {
  944. mode0 = &rdev->mode_info.crtcs[i]->base.mode;
  945. mode1 = &rdev->mode_info.crtcs[i+1]->base.mode;
  946. lb_size = evergreen_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i], mode0, mode1);
  947. evergreen_program_watermarks(rdev, rdev->mode_info.crtcs[i], lb_size, num_heads);
  948. lb_size = evergreen_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i+1], mode1, mode0);
  949. evergreen_program_watermarks(rdev, rdev->mode_info.crtcs[i+1], lb_size, num_heads);
  950. }
  951. }
  952. /**
  953. * evergreen_mc_wait_for_idle - wait for MC idle callback.
  954. *
  955. * @rdev: radeon_device pointer
  956. *
  957. * Wait for the MC (memory controller) to be idle.
  958. * (evergreen+).
  959. * Returns 0 if the MC is idle, -1 if not.
  960. */
  961. int evergreen_mc_wait_for_idle(struct radeon_device *rdev)
  962. {
  963. unsigned i;
  964. u32 tmp;
  965. for (i = 0; i < rdev->usec_timeout; i++) {
  966. /* read MC_STATUS */
  967. tmp = RREG32(SRBM_STATUS) & 0x1F00;
  968. if (!tmp)
  969. return 0;
  970. udelay(1);
  971. }
  972. return -1;
  973. }
  974. /*
  975. * GART
  976. */
  977. void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev)
  978. {
  979. unsigned i;
  980. u32 tmp;
  981. WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
  982. WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
  983. for (i = 0; i < rdev->usec_timeout; i++) {
  984. /* read MC_STATUS */
  985. tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE);
  986. tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
  987. if (tmp == 2) {
  988. printk(KERN_WARNING "[drm] r600 flush TLB failed\n");
  989. return;
  990. }
  991. if (tmp) {
  992. return;
  993. }
  994. udelay(1);
  995. }
  996. }
  997. int evergreen_pcie_gart_enable(struct radeon_device *rdev)
  998. {
  999. u32 tmp;
  1000. int r;
  1001. if (rdev->gart.robj == NULL) {
  1002. dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
  1003. return -EINVAL;
  1004. }
  1005. r = radeon_gart_table_vram_pin(rdev);
  1006. if (r)
  1007. return r;
  1008. radeon_gart_restore(rdev);
  1009. /* Setup L2 cache */
  1010. WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
  1011. ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
  1012. EFFECTIVE_L2_QUEUE_SIZE(7));
  1013. WREG32(VM_L2_CNTL2, 0);
  1014. WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
  1015. /* Setup TLB control */
  1016. tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
  1017. SYSTEM_ACCESS_MODE_NOT_IN_SYS |
  1018. SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
  1019. EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
  1020. if (rdev->flags & RADEON_IS_IGP) {
  1021. WREG32(FUS_MC_VM_MD_L1_TLB0_CNTL, tmp);
  1022. WREG32(FUS_MC_VM_MD_L1_TLB1_CNTL, tmp);
  1023. WREG32(FUS_MC_VM_MD_L1_TLB2_CNTL, tmp);
  1024. } else {
  1025. WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
  1026. WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
  1027. WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
  1028. if ((rdev->family == CHIP_JUNIPER) ||
  1029. (rdev->family == CHIP_CYPRESS) ||
  1030. (rdev->family == CHIP_HEMLOCK) ||
  1031. (rdev->family == CHIP_BARTS))
  1032. WREG32(MC_VM_MD_L1_TLB3_CNTL, tmp);
  1033. }
  1034. WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
  1035. WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
  1036. WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
  1037. WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
  1038. WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
  1039. WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
  1040. WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
  1041. WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
  1042. RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
  1043. WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
  1044. (u32)(rdev->dummy_page.addr >> 12));
  1045. WREG32(VM_CONTEXT1_CNTL, 0);
  1046. evergreen_pcie_gart_tlb_flush(rdev);
  1047. DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
  1048. (unsigned)(rdev->mc.gtt_size >> 20),
  1049. (unsigned long long)rdev->gart.table_addr);
  1050. rdev->gart.ready = true;
  1051. return 0;
  1052. }
  1053. void evergreen_pcie_gart_disable(struct radeon_device *rdev)
  1054. {
  1055. u32 tmp;
  1056. /* Disable all tables */
  1057. WREG32(VM_CONTEXT0_CNTL, 0);
  1058. WREG32(VM_CONTEXT1_CNTL, 0);
  1059. /* Setup L2 cache */
  1060. WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
  1061. EFFECTIVE_L2_QUEUE_SIZE(7));
  1062. WREG32(VM_L2_CNTL2, 0);
  1063. WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
  1064. /* Setup TLB control */
  1065. tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
  1066. WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
  1067. WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
  1068. WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
  1069. WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
  1070. WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
  1071. WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
  1072. WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
  1073. radeon_gart_table_vram_unpin(rdev);
  1074. }
  1075. void evergreen_pcie_gart_fini(struct radeon_device *rdev)
  1076. {
  1077. evergreen_pcie_gart_disable(rdev);
  1078. radeon_gart_table_vram_free(rdev);
  1079. radeon_gart_fini(rdev);
  1080. }
  1081. void evergreen_agp_enable(struct radeon_device *rdev)
  1082. {
  1083. u32 tmp;
  1084. /* Setup L2 cache */
  1085. WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
  1086. ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
  1087. EFFECTIVE_L2_QUEUE_SIZE(7));
  1088. WREG32(VM_L2_CNTL2, 0);
  1089. WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
  1090. /* Setup TLB control */
  1091. tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
  1092. SYSTEM_ACCESS_MODE_NOT_IN_SYS |
  1093. SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
  1094. EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
  1095. WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
  1096. WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
  1097. WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
  1098. WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
  1099. WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
  1100. WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
  1101. WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
  1102. WREG32(VM_CONTEXT0_CNTL, 0);
  1103. WREG32(VM_CONTEXT1_CNTL, 0);
  1104. }
  1105. void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
  1106. {
  1107. save->vga_control[0] = RREG32(D1VGA_CONTROL);
  1108. save->vga_control[1] = RREG32(D2VGA_CONTROL);
  1109. save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
  1110. save->vga_hdp_control = RREG32(VGA_HDP_CONTROL);
  1111. save->crtc_control[0] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET);
  1112. save->crtc_control[1] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
  1113. if (rdev->num_crtc >= 4) {
  1114. save->vga_control[2] = RREG32(EVERGREEN_D3VGA_CONTROL);
  1115. save->vga_control[3] = RREG32(EVERGREEN_D4VGA_CONTROL);
  1116. save->crtc_control[2] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET);
  1117. save->crtc_control[3] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
  1118. }
  1119. if (rdev->num_crtc >= 6) {
  1120. save->vga_control[4] = RREG32(EVERGREEN_D5VGA_CONTROL);
  1121. save->vga_control[5] = RREG32(EVERGREEN_D6VGA_CONTROL);
  1122. save->crtc_control[4] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET);
  1123. save->crtc_control[5] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
  1124. }
  1125. /* Stop all video */
  1126. WREG32(VGA_RENDER_CONTROL, 0);
  1127. WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1);
  1128. WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1);
  1129. if (rdev->num_crtc >= 4) {
  1130. WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1);
  1131. WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1);
  1132. }
  1133. if (rdev->num_crtc >= 6) {
  1134. WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1);
  1135. WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1);
  1136. }
  1137. WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
  1138. WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
  1139. if (rdev->num_crtc >= 4) {
  1140. WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
  1141. WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
  1142. }
  1143. if (rdev->num_crtc >= 6) {
  1144. WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
  1145. WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
  1146. }
  1147. WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
  1148. WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
  1149. if (rdev->num_crtc >= 4) {
  1150. WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
  1151. WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
  1152. }
  1153. if (rdev->num_crtc >= 6) {
  1154. WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
  1155. WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
  1156. }
  1157. WREG32(D1VGA_CONTROL, 0);
  1158. WREG32(D2VGA_CONTROL, 0);
  1159. if (rdev->num_crtc >= 4) {
  1160. WREG32(EVERGREEN_D3VGA_CONTROL, 0);
  1161. WREG32(EVERGREEN_D4VGA_CONTROL, 0);
  1162. }
  1163. if (rdev->num_crtc >= 6) {
  1164. WREG32(EVERGREEN_D5VGA_CONTROL, 0);
  1165. WREG32(EVERGREEN_D6VGA_CONTROL, 0);
  1166. }
  1167. }
  1168. void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save)
  1169. {
  1170. WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC0_REGISTER_OFFSET,
  1171. upper_32_bits(rdev->mc.vram_start));
  1172. WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC0_REGISTER_OFFSET,
  1173. upper_32_bits(rdev->mc.vram_start));
  1174. WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC0_REGISTER_OFFSET,
  1175. (u32)rdev->mc.vram_start);
  1176. WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC0_REGISTER_OFFSET,
  1177. (u32)rdev->mc.vram_start);
  1178. WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC1_REGISTER_OFFSET,
  1179. upper_32_bits(rdev->mc.vram_start));
  1180. WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC1_REGISTER_OFFSET,
  1181. upper_32_bits(rdev->mc.vram_start));
  1182. WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC1_REGISTER_OFFSET,
  1183. (u32)rdev->mc.vram_start);
  1184. WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC1_REGISTER_OFFSET,
  1185. (u32)rdev->mc.vram_start);
  1186. if (rdev->num_crtc >= 4) {
  1187. WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET,
  1188. upper_32_bits(rdev->mc.vram_start));
  1189. WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET,
  1190. upper_32_bits(rdev->mc.vram_start));
  1191. WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET,
  1192. (u32)rdev->mc.vram_start);
  1193. WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET,
  1194. (u32)rdev->mc.vram_start);
  1195. WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET,
  1196. upper_32_bits(rdev->mc.vram_start));
  1197. WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET,
  1198. upper_32_bits(rdev->mc.vram_start));
  1199. WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET,
  1200. (u32)rdev->mc.vram_start);
  1201. WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET,
  1202. (u32)rdev->mc.vram_start);
  1203. }
  1204. if (rdev->num_crtc >= 6) {
  1205. WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET,
  1206. upper_32_bits(rdev->mc.vram_start));
  1207. WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET,
  1208. upper_32_bits(rdev->mc.vram_start));
  1209. WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET,
  1210. (u32)rdev->mc.vram_start);
  1211. WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET,
  1212. (u32)rdev->mc.vram_start);
  1213. WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET,
  1214. upper_32_bits(rdev->mc.vram_start));
  1215. WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET,
  1216. upper_32_bits(rdev->mc.vram_start));
  1217. WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET,
  1218. (u32)rdev->mc.vram_start);
  1219. WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET,
  1220. (u32)rdev->mc.vram_start);
  1221. }
  1222. WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start));
  1223. WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
  1224. /* Unlock host access */
  1225. WREG32(VGA_HDP_CONTROL, save->vga_hdp_control);
  1226. mdelay(1);
  1227. /* Restore video state */
  1228. WREG32(D1VGA_CONTROL, save->vga_control[0]);
  1229. WREG32(D2VGA_CONTROL, save->vga_control[1]);
  1230. if (rdev->num_crtc >= 4) {
  1231. WREG32(EVERGREEN_D3VGA_CONTROL, save->vga_control[2]);
  1232. WREG32(EVERGREEN_D4VGA_CONTROL, save->vga_control[3]);
  1233. }
  1234. if (rdev->num_crtc >= 6) {
  1235. WREG32(EVERGREEN_D5VGA_CONTROL, save->vga_control[4]);
  1236. WREG32(EVERGREEN_D6VGA_CONTROL, save->vga_control[5]);
  1237. }
  1238. WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1);
  1239. WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1);
  1240. if (rdev->num_crtc >= 4) {
  1241. WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1);
  1242. WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1);
  1243. }
  1244. if (rdev->num_crtc >= 6) {
  1245. WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1);
  1246. WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1);
  1247. }
  1248. WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, save->crtc_control[0]);
  1249. WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, save->crtc_control[1]);
  1250. if (rdev->num_crtc >= 4) {
  1251. WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, save->crtc_control[2]);
  1252. WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, save->crtc_control[3]);
  1253. }
  1254. if (rdev->num_crtc >= 6) {
  1255. WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, save->crtc_control[4]);
  1256. WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, save->crtc_control[5]);
  1257. }
  1258. WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
  1259. WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
  1260. if (rdev->num_crtc >= 4) {
  1261. WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
  1262. WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
  1263. }
  1264. if (rdev->num_crtc >= 6) {
  1265. WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
  1266. WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
  1267. }
  1268. WREG32(VGA_RENDER_CONTROL, save->vga_render_control);
  1269. }
  1270. void evergreen_mc_program(struct radeon_device *rdev)
  1271. {
  1272. struct evergreen_mc_save save;
  1273. u32 tmp;
  1274. int i, j;
  1275. /* Initialize HDP */
  1276. for (i = 0, j = 0; i < 32; i++, j += 0x18) {
  1277. WREG32((0x2c14 + j), 0x00000000);
  1278. WREG32((0x2c18 + j), 0x00000000);
  1279. WREG32((0x2c1c + j), 0x00000000);
  1280. WREG32((0x2c20 + j), 0x00000000);
  1281. WREG32((0x2c24 + j), 0x00000000);
  1282. }
  1283. WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
  1284. evergreen_mc_stop(rdev, &save);
  1285. if (evergreen_mc_wait_for_idle(rdev)) {
  1286. dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
  1287. }
  1288. /* Lockout access through VGA aperture*/
  1289. WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
  1290. /* Update configuration */
  1291. if (rdev->flags & RADEON_IS_AGP) {
  1292. if (rdev->mc.vram_start < rdev->mc.gtt_start) {
  1293. /* VRAM before AGP */
  1294. WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
  1295. rdev->mc.vram_start >> 12);
  1296. WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
  1297. rdev->mc.gtt_end >> 12);
  1298. } else {
  1299. /* VRAM after AGP */
  1300. WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
  1301. rdev->mc.gtt_start >> 12);
  1302. WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
  1303. rdev->mc.vram_end >> 12);
  1304. }
  1305. } else {
  1306. WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
  1307. rdev->mc.vram_start >> 12);
  1308. WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
  1309. rdev->mc.vram_end >> 12);
  1310. }
  1311. WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12);
  1312. /* llano/ontario only */
  1313. if ((rdev->family == CHIP_PALM) ||
  1314. (rdev->family == CHIP_SUMO) ||
  1315. (rdev->family == CHIP_SUMO2)) {
  1316. tmp = RREG32(MC_FUS_VM_FB_OFFSET) & 0x000FFFFF;
  1317. tmp |= ((rdev->mc.vram_end >> 20) & 0xF) << 24;
  1318. tmp |= ((rdev->mc.vram_start >> 20) & 0xF) << 20;
  1319. WREG32(MC_FUS_VM_FB_OFFSET, tmp);
  1320. }
  1321. tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
  1322. tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
  1323. WREG32(MC_VM_FB_LOCATION, tmp);
  1324. WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
  1325. WREG32(HDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
  1326. WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
  1327. if (rdev->flags & RADEON_IS_AGP) {
  1328. WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16);
  1329. WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16);
  1330. WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
  1331. } else {
  1332. WREG32(MC_VM_AGP_BASE, 0);
  1333. WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
  1334. WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
  1335. }
  1336. if (evergreen_mc_wait_for_idle(rdev)) {
  1337. dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
  1338. }
  1339. evergreen_mc_resume(rdev, &save);
  1340. /* we need to own VRAM, so turn off the VGA renderer here
  1341. * to stop it overwriting our objects */
  1342. rv515_vga_render_disable(rdev);
  1343. }
  1344. /*
  1345. * CP.
  1346. */
  1347. void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
  1348. {
  1349. struct radeon_ring *ring = &rdev->ring[ib->ring];
  1350. u32 next_rptr;
  1351. /* set to DX10/11 mode */
  1352. radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
  1353. radeon_ring_write(ring, 1);
  1354. if (ring->rptr_save_reg) {
  1355. next_rptr = ring->wptr + 3 + 4;
  1356. radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
  1357. radeon_ring_write(ring, ((ring->rptr_save_reg -
  1358. PACKET3_SET_CONFIG_REG_START) >> 2));
  1359. radeon_ring_write(ring, next_rptr);
  1360. } else if (rdev->wb.enabled) {
  1361. next_rptr = ring->wptr + 5 + 4;
  1362. radeon_ring_write(ring, PACKET3(PACKET3_MEM_WRITE, 3));
  1363. radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
  1364. radeon_ring_write(ring, (upper_32_bits(ring->next_rptr_gpu_addr) & 0xff) | (1 << 18));
  1365. radeon_ring_write(ring, next_rptr);
  1366. radeon_ring_write(ring, 0);
  1367. }
  1368. radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
  1369. radeon_ring_write(ring,
  1370. #ifdef __BIG_ENDIAN
  1371. (2 << 0) |
  1372. #endif
  1373. (ib->gpu_addr & 0xFFFFFFFC));
  1374. radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
  1375. radeon_ring_write(ring, ib->length_dw);
  1376. }
  1377. static int evergreen_cp_load_microcode(struct radeon_device *rdev)
  1378. {
  1379. const __be32 *fw_data;
  1380. int i;
  1381. if (!rdev->me_fw || !rdev->pfp_fw)
  1382. return -EINVAL;
  1383. r700_cp_stop(rdev);
  1384. WREG32(CP_RB_CNTL,
  1385. #ifdef __BIG_ENDIAN
  1386. BUF_SWAP_32BIT |
  1387. #endif
  1388. RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
  1389. fw_data = (const __be32 *)rdev->pfp_fw->data;
  1390. WREG32(CP_PFP_UCODE_ADDR, 0);
  1391. for (i = 0; i < EVERGREEN_PFP_UCODE_SIZE; i++)
  1392. WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
  1393. WREG32(CP_PFP_UCODE_ADDR, 0);
  1394. fw_data = (const __be32 *)rdev->me_fw->data;
  1395. WREG32(CP_ME_RAM_WADDR, 0);
  1396. for (i = 0; i < EVERGREEN_PM4_UCODE_SIZE; i++)
  1397. WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
  1398. WREG32(CP_PFP_UCODE_ADDR, 0);
  1399. WREG32(CP_ME_RAM_WADDR, 0);
  1400. WREG32(CP_ME_RAM_RADDR, 0);
  1401. return 0;
  1402. }
  1403. static int evergreen_cp_start(struct radeon_device *rdev)
  1404. {
  1405. struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
  1406. int r, i;
  1407. uint32_t cp_me;
  1408. r = radeon_ring_lock(rdev, ring, 7);
  1409. if (r) {
  1410. DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
  1411. return r;
  1412. }
  1413. radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
  1414. radeon_ring_write(ring, 0x1);
  1415. radeon_ring_write(ring, 0x0);
  1416. radeon_ring_write(ring, rdev->config.evergreen.max_hw_contexts - 1);
  1417. radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
  1418. radeon_ring_write(ring, 0);
  1419. radeon_ring_write(ring, 0);
  1420. radeon_ring_unlock_commit(rdev, ring);
  1421. cp_me = 0xff;
  1422. WREG32(CP_ME_CNTL, cp_me);
  1423. r = radeon_ring_lock(rdev, ring, evergreen_default_size + 19);
  1424. if (r) {
  1425. DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
  1426. return r;
  1427. }
  1428. /* setup clear context state */
  1429. radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
  1430. radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
  1431. for (i = 0; i < evergreen_default_size; i++)
  1432. radeon_ring_write(ring, evergreen_default_state[i]);
  1433. radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
  1434. radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
  1435. /* set clear context state */
  1436. radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
  1437. radeon_ring_write(ring, 0);
  1438. /* SQ_VTX_BASE_VTX_LOC */
  1439. radeon_ring_write(ring, 0xc0026f00);
  1440. radeon_ring_write(ring, 0x00000000);
  1441. radeon_ring_write(ring, 0x00000000);
  1442. radeon_ring_write(ring, 0x00000000);
  1443. /* Clear consts */
  1444. radeon_ring_write(ring, 0xc0036f00);
  1445. radeon_ring_write(ring, 0x00000bc4);
  1446. radeon_ring_write(ring, 0xffffffff);
  1447. radeon_ring_write(ring, 0xffffffff);
  1448. radeon_ring_write(ring, 0xffffffff);
  1449. radeon_ring_write(ring, 0xc0026900);
  1450. radeon_ring_write(ring, 0x00000316);
  1451. radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
  1452. radeon_ring_write(ring, 0x00000010); /* */
  1453. radeon_ring_unlock_commit(rdev, ring);
  1454. return 0;
  1455. }
  1456. int evergreen_cp_resume(struct radeon_device *rdev)
  1457. {
  1458. struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
  1459. u32 tmp;
  1460. u32 rb_bufsz;
  1461. int r;
  1462. /* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */
  1463. WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP |
  1464. SOFT_RESET_PA |
  1465. SOFT_RESET_SH |
  1466. SOFT_RESET_VGT |
  1467. SOFT_RESET_SPI |
  1468. SOFT_RESET_SX));
  1469. RREG32(GRBM_SOFT_RESET);
  1470. mdelay(15);
  1471. WREG32(GRBM_SOFT_RESET, 0);
  1472. RREG32(GRBM_SOFT_RESET);
  1473. /* Set ring buffer size */
  1474. rb_bufsz = drm_order(ring->ring_size / 8);
  1475. tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
  1476. #ifdef __BIG_ENDIAN
  1477. tmp |= BUF_SWAP_32BIT;
  1478. #endif
  1479. WREG32(CP_RB_CNTL, tmp);
  1480. WREG32(CP_SEM_WAIT_TIMER, 0x0);
  1481. WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
  1482. /* Set the write pointer delay */
  1483. WREG32(CP_RB_WPTR_DELAY, 0);
  1484. /* Initialize the ring buffer's read and write pointers */
  1485. WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
  1486. WREG32(CP_RB_RPTR_WR, 0);
  1487. ring->wptr = 0;
  1488. WREG32(CP_RB_WPTR, ring->wptr);
  1489. /* set the wb address wether it's enabled or not */
  1490. WREG32(CP_RB_RPTR_ADDR,
  1491. ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC));
  1492. WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
  1493. WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
  1494. if (rdev->wb.enabled)
  1495. WREG32(SCRATCH_UMSK, 0xff);
  1496. else {
  1497. tmp |= RB_NO_UPDATE;
  1498. WREG32(SCRATCH_UMSK, 0);
  1499. }
  1500. mdelay(1);
  1501. WREG32(CP_RB_CNTL, tmp);
  1502. WREG32(CP_RB_BASE, ring->gpu_addr >> 8);
  1503. WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
  1504. ring->rptr = RREG32(CP_RB_RPTR);
  1505. evergreen_cp_start(rdev);
  1506. ring->ready = true;
  1507. r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring);
  1508. if (r) {
  1509. ring->ready = false;
  1510. return r;
  1511. }
  1512. return 0;
  1513. }
  1514. /*
  1515. * Core functions
  1516. */
  1517. static void evergreen_gpu_init(struct radeon_device *rdev)
  1518. {
  1519. u32 gb_addr_config;
  1520. u32 mc_shared_chmap, mc_arb_ramcfg;
  1521. u32 sx_debug_1;
  1522. u32 smx_dc_ctl0;
  1523. u32 sq_config;
  1524. u32 sq_lds_resource_mgmt;
  1525. u32 sq_gpr_resource_mgmt_1;
  1526. u32 sq_gpr_resource_mgmt_2;
  1527. u32 sq_gpr_resource_mgmt_3;
  1528. u32 sq_thread_resource_mgmt;
  1529. u32 sq_thread_resource_mgmt_2;
  1530. u32 sq_stack_resource_mgmt_1;
  1531. u32 sq_stack_resource_mgmt_2;
  1532. u32 sq_stack_resource_mgmt_3;
  1533. u32 vgt_cache_invalidation;
  1534. u32 hdp_host_path_cntl, tmp;
  1535. u32 disabled_rb_mask;
  1536. int i, j, num_shader_engines, ps_thread_count;
  1537. switch (rdev->family) {
  1538. case CHIP_CYPRESS:
  1539. case CHIP_HEMLOCK:
  1540. rdev->config.evergreen.num_ses = 2;
  1541. rdev->config.evergreen.max_pipes = 4;
  1542. rdev->config.evergreen.max_tile_pipes = 8;
  1543. rdev->config.evergreen.max_simds = 10;
  1544. rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
  1545. rdev->config.evergreen.max_gprs = 256;
  1546. rdev->config.evergreen.max_threads = 248;
  1547. rdev->config.evergreen.max_gs_threads = 32;
  1548. rdev->config.evergreen.max_stack_entries = 512;
  1549. rdev->config.evergreen.sx_num_of_sets = 4;
  1550. rdev->config.evergreen.sx_max_export_size = 256;
  1551. rdev->config.evergreen.sx_max_export_pos_size = 64;
  1552. rdev->config.evergreen.sx_max_export_smx_size = 192;
  1553. rdev->config.evergreen.max_hw_contexts = 8;
  1554. rdev->config.evergreen.sq_num_cf_insts = 2;
  1555. rdev->config.evergreen.sc_prim_fifo_size = 0x100;
  1556. rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
  1557. rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
  1558. gb_addr_config = CYPRESS_GB_ADDR_CONFIG_GOLDEN;
  1559. break;
  1560. case CHIP_JUNIPER:
  1561. rdev->config.evergreen.num_ses = 1;
  1562. rdev->config.evergreen.max_pipes = 4;
  1563. rdev->config.evergreen.max_tile_pipes = 4;
  1564. rdev->config.evergreen.max_simds = 10;
  1565. rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
  1566. rdev->config.evergreen.max_gprs = 256;
  1567. rdev->config.evergreen.max_threads = 248;
  1568. rdev->config.evergreen.max_gs_threads = 32;
  1569. rdev->config.evergreen.max_stack_entries = 512;
  1570. rdev->config.evergreen.sx_num_of_sets = 4;
  1571. rdev->config.evergreen.sx_max_export_size = 256;
  1572. rdev->config.evergreen.sx_max_export_pos_size = 64;
  1573. rdev->config.evergreen.sx_max_export_smx_size = 192;
  1574. rdev->config.evergreen.max_hw_contexts = 8;
  1575. rdev->config.evergreen.sq_num_cf_insts = 2;
  1576. rdev->config.evergreen.sc_prim_fifo_size = 0x100;
  1577. rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
  1578. rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
  1579. gb_addr_config = JUNIPER_GB_ADDR_CONFIG_GOLDEN;
  1580. break;
  1581. case CHIP_REDWOOD:
  1582. rdev->config.evergreen.num_ses = 1;
  1583. rdev->config.evergreen.max_pipes = 4;
  1584. rdev->config.evergreen.max_tile_pipes = 4;
  1585. rdev->config.evergreen.max_simds = 5;
  1586. rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses;
  1587. rdev->config.evergreen.max_gprs = 256;
  1588. rdev->config.evergreen.max_threads = 248;
  1589. rdev->config.evergreen.max_gs_threads = 32;
  1590. rdev->config.evergreen.max_stack_entries = 256;
  1591. rdev->config.evergreen.sx_num_of_sets = 4;
  1592. rdev->config.evergreen.sx_max_export_size = 256;
  1593. rdev->config.evergreen.sx_max_export_pos_size = 64;
  1594. rdev->config.evergreen.sx_max_export_smx_size = 192;
  1595. rdev->config.evergreen.max_hw_contexts = 8;
  1596. rdev->config.evergreen.sq_num_cf_insts = 2;
  1597. rdev->config.evergreen.sc_prim_fifo_size = 0x100;
  1598. rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
  1599. rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
  1600. gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN;
  1601. break;
  1602. case CHIP_CEDAR:
  1603. default:
  1604. rdev->config.evergreen.num_ses = 1;
  1605. rdev->config.evergreen.max_pipes = 2;
  1606. rdev->config.evergreen.max_tile_pipes = 2;
  1607. rdev->config.evergreen.max_simds = 2;
  1608. rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
  1609. rdev->config.evergreen.max_gprs = 256;
  1610. rdev->config.evergreen.max_threads = 192;
  1611. rdev->config.evergreen.max_gs_threads = 16;
  1612. rdev->config.evergreen.max_stack_entries = 256;
  1613. rdev->config.evergreen.sx_num_of_sets = 4;
  1614. rdev->config.evergreen.sx_max_export_size = 128;
  1615. rdev->config.evergreen.sx_max_export_pos_size = 32;
  1616. rdev->config.evergreen.sx_max_export_smx_size = 96;
  1617. rdev->config.evergreen.max_hw_contexts = 4;
  1618. rdev->config.evergreen.sq_num_cf_insts = 1;
  1619. rdev->config.evergreen.sc_prim_fifo_size = 0x40;
  1620. rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
  1621. rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
  1622. gb_addr_config = CEDAR_GB_ADDR_CONFIG_GOLDEN;
  1623. break;
  1624. case CHIP_PALM:
  1625. rdev->config.evergreen.num_ses = 1;
  1626. rdev->config.evergreen.max_pipes = 2;
  1627. rdev->config.evergreen.max_tile_pipes = 2;
  1628. rdev->config.evergreen.max_simds = 2;
  1629. rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
  1630. rdev->config.evergreen.max_gprs = 256;
  1631. rdev->config.evergreen.max_threads = 192;
  1632. rdev->config.evergreen.max_gs_threads = 16;
  1633. rdev->config.evergreen.max_stack_entries = 256;
  1634. rdev->config.evergreen.sx_num_of_sets = 4;
  1635. rdev->config.evergreen.sx_max_export_size = 128;
  1636. rdev->config.evergreen.sx_max_export_pos_size = 32;
  1637. rdev->config.evergreen.sx_max_export_smx_size = 96;
  1638. rdev->config.evergreen.max_hw_contexts = 4;
  1639. rdev->config.evergreen.sq_num_cf_insts = 1;
  1640. rdev->config.evergreen.sc_prim_fifo_size = 0x40;
  1641. rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
  1642. rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
  1643. gb_addr_config = CEDAR_GB_ADDR_CONFIG_GOLDEN;
  1644. break;
  1645. case CHIP_SUMO:
  1646. rdev->config.evergreen.num_ses = 1;
  1647. rdev->config.evergreen.max_pipes = 4;
  1648. rdev->config.evergreen.max_tile_pipes = 2;
  1649. if (rdev->pdev->device == 0x9648)
  1650. rdev->config.evergreen.max_simds = 3;
  1651. else if ((rdev->pdev->device == 0x9647) ||
  1652. (rdev->pdev->device == 0x964a))
  1653. rdev->config.evergreen.max_simds = 4;
  1654. else
  1655. rdev->config.evergreen.max_simds = 5;
  1656. rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses;
  1657. rdev->config.evergreen.max_gprs = 256;
  1658. rdev->config.evergreen.max_threads = 248;
  1659. rdev->config.evergreen.max_gs_threads = 32;
  1660. rdev->config.evergreen.max_stack_entries = 256;
  1661. rdev->config.evergreen.sx_num_of_sets = 4;
  1662. rdev->config.evergreen.sx_max_export_size = 256;
  1663. rdev->config.evergreen.sx_max_export_pos_size = 64;
  1664. rdev->config.evergreen.sx_max_export_smx_size = 192;
  1665. rdev->config.evergreen.max_hw_contexts = 8;
  1666. rdev->config.evergreen.sq_num_cf_insts = 2;
  1667. rdev->config.evergreen.sc_prim_fifo_size = 0x40;
  1668. rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
  1669. rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
  1670. gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN;
  1671. break;
  1672. case CHIP_SUMO2:
  1673. rdev->config.evergreen.num_ses = 1;
  1674. rdev->config.evergreen.max_pipes = 4;
  1675. rdev->config.evergreen.max_tile_pipes = 4;
  1676. rdev->config.evergreen.max_simds = 2;
  1677. rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
  1678. rdev->config.evergreen.max_gprs = 256;
  1679. rdev->config.evergreen.max_threads = 248;
  1680. rdev->config.evergreen.max_gs_threads = 32;
  1681. rdev->config.evergreen.max_stack_entries = 512;
  1682. rdev->config.evergreen.sx_num_of_sets = 4;
  1683. rdev->config.evergreen.sx_max_export_size = 256;
  1684. rdev->config.evergreen.sx_max_export_pos_size = 64;
  1685. rdev->config.evergreen.sx_max_export_smx_size = 192;
  1686. rdev->config.evergreen.max_hw_contexts = 8;
  1687. rdev->config.evergreen.sq_num_cf_insts = 2;
  1688. rdev->config.evergreen.sc_prim_fifo_size = 0x40;
  1689. rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
  1690. rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
  1691. gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN;
  1692. break;
  1693. case CHIP_BARTS:
  1694. rdev->config.evergreen.num_ses = 2;
  1695. rdev->config.evergreen.max_pipes = 4;
  1696. rdev->config.evergreen.max_tile_pipes = 8;
  1697. rdev->config.evergreen.max_simds = 7;
  1698. rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
  1699. rdev->config.evergreen.max_gprs = 256;
  1700. rdev->config.evergreen.max_threads = 248;
  1701. rdev->config.evergreen.max_gs_threads = 32;
  1702. rdev->config.evergreen.max_stack_entries = 512;
  1703. rdev->config.evergreen.sx_num_of_sets = 4;
  1704. rdev->config.evergreen.sx_max_export_size = 256;
  1705. rdev->config.evergreen.sx_max_export_pos_size = 64;
  1706. rdev->config.evergreen.sx_max_export_smx_size = 192;
  1707. rdev->config.evergreen.max_hw_contexts = 8;
  1708. rdev->config.evergreen.sq_num_cf_insts = 2;
  1709. rdev->config.evergreen.sc_prim_fifo_size = 0x100;
  1710. rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
  1711. rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
  1712. gb_addr_config = BARTS_GB_ADDR_CONFIG_GOLDEN;
  1713. break;
  1714. case CHIP_TURKS:
  1715. rdev->config.evergreen.num_ses = 1;
  1716. rdev->config.evergreen.max_pipes = 4;
  1717. rdev->config.evergreen.max_tile_pipes = 4;
  1718. rdev->config.evergreen.max_simds = 6;
  1719. rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses;
  1720. rdev->config.evergreen.max_gprs = 256;
  1721. rdev->config.evergreen.max_threads = 248;
  1722. rdev->config.evergreen.max_gs_threads = 32;
  1723. rdev->config.evergreen.max_stack_entries = 256;
  1724. rdev->config.evergreen.sx_num_of_sets = 4;
  1725. rdev->config.evergreen.sx_max_export_size = 256;
  1726. rdev->config.evergreen.sx_max_export_pos_size = 64;
  1727. rdev->config.evergreen.sx_max_export_smx_size = 192;
  1728. rdev->config.evergreen.max_hw_contexts = 8;
  1729. rdev->config.evergreen.sq_num_cf_insts = 2;
  1730. rdev->config.evergreen.sc_prim_fifo_size = 0x100;
  1731. rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
  1732. rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
  1733. gb_addr_config = TURKS_GB_ADDR_CONFIG_GOLDEN;
  1734. break;
  1735. case CHIP_CAICOS:
  1736. rdev->config.evergreen.num_ses = 1;
  1737. rdev->config.evergreen.max_pipes = 4;
  1738. rdev->config.evergreen.max_tile_pipes = 2;
  1739. rdev->config.evergreen.max_simds = 2;
  1740. rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
  1741. rdev->config.evergreen.max_gprs = 256;
  1742. rdev->config.evergreen.max_threads = 192;
  1743. rdev->config.evergreen.max_gs_threads = 16;
  1744. rdev->config.evergreen.max_stack_entries = 256;
  1745. rdev->config.evergreen.sx_num_of_sets = 4;
  1746. rdev->config.evergreen.sx_max_export_size = 128;
  1747. rdev->config.evergreen.sx_max_export_pos_size = 32;
  1748. rdev->config.evergreen.sx_max_export_smx_size = 96;
  1749. rdev->config.evergreen.max_hw_contexts = 4;
  1750. rdev->config.evergreen.sq_num_cf_insts = 1;
  1751. rdev->config.evergreen.sc_prim_fifo_size = 0x40;
  1752. rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
  1753. rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
  1754. gb_addr_config = CAICOS_GB_ADDR_CONFIG_GOLDEN;
  1755. break;
  1756. }
  1757. /* Initialize HDP */
  1758. for (i = 0, j = 0; i < 32; i++, j += 0x18) {
  1759. WREG32((0x2c14 + j), 0x00000000);
  1760. WREG32((0x2c18 + j), 0x00000000);
  1761. WREG32((0x2c1c + j), 0x00000000);
  1762. WREG32((0x2c20 + j), 0x00000000);
  1763. WREG32((0x2c24 + j), 0x00000000);
  1764. }
  1765. WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
  1766. evergreen_fix_pci_max_read_req_size(rdev);
  1767. mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
  1768. if ((rdev->family == CHIP_PALM) ||
  1769. (rdev->family == CHIP_SUMO) ||
  1770. (rdev->family == CHIP_SUMO2))
  1771. mc_arb_ramcfg = RREG32(FUS_MC_ARB_RAMCFG);
  1772. else
  1773. mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
  1774. /* setup tiling info dword. gb_addr_config is not adequate since it does
  1775. * not have bank info, so create a custom tiling dword.
  1776. * bits 3:0 num_pipes
  1777. * bits 7:4 num_banks
  1778. * bits 11:8 group_size
  1779. * bits 15:12 row_size
  1780. */
  1781. rdev->config.evergreen.tile_config = 0;
  1782. switch (rdev->config.evergreen.max_tile_pipes) {
  1783. case 1:
  1784. default:
  1785. rdev->config.evergreen.tile_config |= (0 << 0);
  1786. break;
  1787. case 2:
  1788. rdev->config.evergreen.tile_config |= (1 << 0);
  1789. break;
  1790. case 4:
  1791. rdev->config.evergreen.tile_config |= (2 << 0);
  1792. break;
  1793. case 8:
  1794. rdev->config.evergreen.tile_config |= (3 << 0);
  1795. break;
  1796. }
  1797. /* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */
  1798. if (rdev->flags & RADEON_IS_IGP)
  1799. rdev->config.evergreen.tile_config |= 1 << 4;
  1800. else {
  1801. if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT)
  1802. rdev->config.evergreen.tile_config |= 1 << 4;
  1803. else
  1804. rdev->config.evergreen.tile_config |= 0 << 4;
  1805. }
  1806. rdev->config.evergreen.tile_config |= 0 << 8;
  1807. rdev->config.evergreen.tile_config |=
  1808. ((gb_addr_config & 0x30000000) >> 28) << 12;
  1809. num_shader_engines = (gb_addr_config & NUM_SHADER_ENGINES(3) >> 12) + 1;
  1810. if ((rdev->family >= CHIP_CEDAR) && (rdev->family <= CHIP_HEMLOCK)) {
  1811. u32 efuse_straps_4;
  1812. u32 efuse_straps_3;
  1813. WREG32(RCU_IND_INDEX, 0x204);
  1814. efuse_straps_4 = RREG32(RCU_IND_DATA);
  1815. WREG32(RCU_IND_INDEX, 0x203);
  1816. efuse_straps_3 = RREG32(RCU_IND_DATA);
  1817. tmp = (((efuse_straps_4 & 0xf) << 4) |
  1818. ((efuse_straps_3 & 0xf0000000) >> 28));
  1819. } else {
  1820. tmp = 0;
  1821. for (i = (rdev->config.evergreen.num_ses - 1); i >= 0; i--) {
  1822. u32 rb_disable_bitmap;
  1823. WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
  1824. WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
  1825. rb_disable_bitmap = (RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000) >> 16;
  1826. tmp <<= 4;
  1827. tmp |= rb_disable_bitmap;
  1828. }
  1829. }
  1830. /* enabled rb are just the one not disabled :) */
  1831. disabled_rb_mask = tmp;
  1832. WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
  1833. WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
  1834. WREG32(GB_ADDR_CONFIG, gb_addr_config);
  1835. WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
  1836. WREG32(HDP_ADDR_CONFIG, gb_addr_config);
  1837. tmp = gb_addr_config & NUM_PIPES_MASK;
  1838. tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.evergreen.max_backends,
  1839. EVERGREEN_MAX_BACKENDS, disabled_rb_mask);
  1840. WREG32(GB_BACKEND_MAP, tmp);
  1841. WREG32(CGTS_SYS_TCC_DISABLE, 0);
  1842. WREG32(CGTS_TCC_DISABLE, 0);
  1843. WREG32(CGTS_USER_SYS_TCC_DISABLE, 0);
  1844. WREG32(CGTS_USER_TCC_DISABLE, 0);
  1845. /* set HW defaults for 3D engine */
  1846. WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) |
  1847. ROQ_IB2_START(0x2b)));
  1848. WREG32(CP_MEQ_THRESHOLDS, STQ_SPLIT(0x30));
  1849. WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO |
  1850. SYNC_GRADIENT |
  1851. SYNC_WALKER |
  1852. SYNC_ALIGNER));
  1853. sx_debug_1 = RREG32(SX_DEBUG_1);
  1854. sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS;
  1855. WREG32(SX_DEBUG_1, sx_debug_1);
  1856. smx_dc_ctl0 = RREG32(SMX_DC_CTL0);
  1857. smx_dc_ctl0 &= ~NUMBER_OF_SETS(0x1ff);
  1858. smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.evergreen.sx_num_of_sets);
  1859. WREG32(SMX_DC_CTL0, smx_dc_ctl0);
  1860. if (rdev->family <= CHIP_SUMO2)
  1861. WREG32(SMX_SAR_CTL0, 0x00010000);
  1862. WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_size / 4) - 1) |
  1863. POSITION_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_pos_size / 4) - 1) |
  1864. SMX_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_smx_size / 4) - 1)));
  1865. WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.evergreen.sc_prim_fifo_size) |
  1866. SC_HIZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_hiz_tile_fifo_size) |
  1867. SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_earlyz_tile_fifo_size)));
  1868. WREG32(VGT_NUM_INSTANCES, 1);
  1869. WREG32(SPI_CONFIG_CNTL, 0);
  1870. WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
  1871. WREG32(CP_PERFMON_CNTL, 0);
  1872. WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.evergreen.sq_num_cf_insts) |
  1873. FETCH_FIFO_HIWATER(0x4) |
  1874. DONE_FIFO_HIWATER(0xe0) |
  1875. ALU_UPDATE_FIFO_HIWATER(0x8)));
  1876. sq_config = RREG32(SQ_CONFIG);
  1877. sq_config &= ~(PS_PRIO(3) |
  1878. VS_PRIO(3) |
  1879. GS_PRIO(3) |
  1880. ES_PRIO(3));
  1881. sq_config |= (VC_ENABLE |
  1882. EXPORT_SRC_C |
  1883. PS_PRIO(0) |
  1884. VS_PRIO(1) |
  1885. GS_PRIO(2) |
  1886. ES_PRIO(3));
  1887. switch (rdev->family) {
  1888. case CHIP_CEDAR:
  1889. case CHIP_PALM:
  1890. case CHIP_SUMO:
  1891. case CHIP_SUMO2:
  1892. case CHIP_CAICOS:
  1893. /* no vertex cache */
  1894. sq_config &= ~VC_ENABLE;
  1895. break;
  1896. default:
  1897. break;
  1898. }
  1899. sq_lds_resource_mgmt = RREG32(SQ_LDS_RESOURCE_MGMT);
  1900. sq_gpr_resource_mgmt_1 = NUM_PS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2))* 12 / 32);
  1901. sq_gpr_resource_mgmt_1 |= NUM_VS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 6 / 32);
  1902. sq_gpr_resource_mgmt_1 |= NUM_CLAUSE_TEMP_GPRS(4);
  1903. sq_gpr_resource_mgmt_2 = NUM_GS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 4 / 32);
  1904. sq_gpr_resource_mgmt_2 |= NUM_ES_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 4 / 32);
  1905. sq_gpr_resource_mgmt_3 = NUM_HS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32);
  1906. sq_gpr_resource_mgmt_3 |= NUM_LS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32);
  1907. switch (rdev->family) {
  1908. case CHIP_CEDAR:
  1909. case CHIP_PALM:
  1910. case CHIP_SUMO:
  1911. case CHIP_SUMO2:
  1912. ps_thread_count = 96;
  1913. break;
  1914. default:
  1915. ps_thread_count = 128;
  1916. break;
  1917. }
  1918. sq_thread_resource_mgmt = NUM_PS_THREADS(ps_thread_count);
  1919. sq_thread_resource_mgmt |= NUM_VS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
  1920. sq_thread_resource_mgmt |= NUM_GS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
  1921. sq_thread_resource_mgmt |= NUM_ES_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
  1922. sq_thread_resource_mgmt_2 = NUM_HS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
  1923. sq_thread_resource_mgmt_2 |= NUM_LS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
  1924. sq_stack_resource_mgmt_1 = NUM_PS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
  1925. sq_stack_resource_mgmt_1 |= NUM_VS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
  1926. sq_stack_resource_mgmt_2 = NUM_GS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
  1927. sq_stack_resource_mgmt_2 |= NUM_ES_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
  1928. sq_stack_resource_mgmt_3 = NUM_HS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
  1929. sq_stack_resource_mgmt_3 |= NUM_LS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
  1930. WREG32(SQ_CONFIG, sq_config);
  1931. WREG32(SQ_GPR_RESOURCE_MGMT_1, sq_gpr_resource_mgmt_1);
  1932. WREG32(SQ_GPR_RESOURCE_MGMT_2, sq_gpr_resource_mgmt_2);
  1933. WREG32(SQ_GPR_RESOURCE_MGMT_3, sq_gpr_resource_mgmt_3);
  1934. WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
  1935. WREG32(SQ_THREAD_RESOURCE_MGMT_2, sq_thread_resource_mgmt_2);
  1936. WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
  1937. WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
  1938. WREG32(SQ_STACK_RESOURCE_MGMT_3, sq_stack_resource_mgmt_3);
  1939. WREG32(SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, 0);
  1940. WREG32(SQ_LDS_RESOURCE_MGMT, sq_lds_resource_mgmt);
  1941. WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
  1942. FORCE_EOV_MAX_REZ_CNT(255)));
  1943. switch (rdev->family) {
  1944. case CHIP_CEDAR:
  1945. case CHIP_PALM:
  1946. case CHIP_SUMO:
  1947. case CHIP_SUMO2:
  1948. case CHIP_CAICOS:
  1949. vgt_cache_invalidation = CACHE_INVALIDATION(TC_ONLY);
  1950. break;
  1951. default:
  1952. vgt_cache_invalidation = CACHE_INVALIDATION(VC_AND_TC);
  1953. break;
  1954. }
  1955. vgt_cache_invalidation |= AUTO_INVLD_EN(ES_AND_GS_AUTO);
  1956. WREG32(VGT_CACHE_INVALIDATION, vgt_cache_invalidation);
  1957. WREG32(VGT_GS_VERTEX_REUSE, 16);
  1958. WREG32(PA_SU_LINE_STIPPLE_VALUE, 0);
  1959. WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
  1960. WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, 14);
  1961. WREG32(VGT_OUT_DEALLOC_CNTL, 16);
  1962. WREG32(CB_PERF_CTR0_SEL_0, 0);
  1963. WREG32(CB_PERF_CTR0_SEL_1, 0);
  1964. WREG32(CB_PERF_CTR1_SEL_0, 0);
  1965. WREG32(CB_PERF_CTR1_SEL_1, 0);
  1966. WREG32(CB_PERF_CTR2_SEL_0, 0);
  1967. WREG32(CB_PERF_CTR2_SEL_1, 0);
  1968. WREG32(CB_PERF_CTR3_SEL_0, 0);
  1969. WREG32(CB_PERF_CTR3_SEL_1, 0);
  1970. /* clear render buffer base addresses */
  1971. WREG32(CB_COLOR0_BASE, 0);
  1972. WREG32(CB_COLOR1_BASE, 0);
  1973. WREG32(CB_COLOR2_BASE, 0);
  1974. WREG32(CB_COLOR3_BASE, 0);
  1975. WREG32(CB_COLOR4_BASE, 0);
  1976. WREG32(CB_COLOR5_BASE, 0);
  1977. WREG32(CB_COLOR6_BASE, 0);
  1978. WREG32(CB_COLOR7_BASE, 0);
  1979. WREG32(CB_COLOR8_BASE, 0);
  1980. WREG32(CB_COLOR9_BASE, 0);
  1981. WREG32(CB_COLOR10_BASE, 0);
  1982. WREG32(CB_COLOR11_BASE, 0);
  1983. /* set the shader const cache sizes to 0 */
  1984. for (i = SQ_ALU_CONST_BUFFER_SIZE_PS_0; i < 0x28200; i += 4)
  1985. WREG32(i, 0);
  1986. for (i = SQ_ALU_CONST_BUFFER_SIZE_HS_0; i < 0x29000; i += 4)
  1987. WREG32(i, 0);
  1988. tmp = RREG32(HDP_MISC_CNTL);
  1989. tmp |= HDP_FLUSH_INVALIDATE_CACHE;
  1990. WREG32(HDP_MISC_CNTL, tmp);
  1991. hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
  1992. WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
  1993. WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
  1994. udelay(50);
  1995. }
  1996. int evergreen_mc_init(struct radeon_device *rdev)
  1997. {
  1998. u32 tmp;
  1999. int chansize, numchan;
  2000. /* Get VRAM informations */
  2001. rdev->mc.vram_is_ddr = true;
  2002. if ((rdev->family == CHIP_PALM) ||
  2003. (rdev->family == CHIP_SUMO) ||
  2004. (rdev->family == CHIP_SUMO2))
  2005. tmp = RREG32(FUS_MC_ARB_RAMCFG);
  2006. else
  2007. tmp = RREG32(MC_ARB_RAMCFG);
  2008. if (tmp & CHANSIZE_OVERRIDE) {
  2009. chansize = 16;
  2010. } else if (tmp & CHANSIZE_MASK) {
  2011. chansize = 64;
  2012. } else {
  2013. chansize = 32;
  2014. }
  2015. tmp = RREG32(MC_SHARED_CHMAP);
  2016. switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
  2017. case 0:
  2018. default:
  2019. numchan = 1;
  2020. break;
  2021. case 1:
  2022. numchan = 2;
  2023. break;
  2024. case 2:
  2025. numchan = 4;
  2026. break;
  2027. case 3:
  2028. numchan = 8;
  2029. break;
  2030. }
  2031. rdev->mc.vram_width = numchan * chansize;
  2032. /* Could aper size report 0 ? */
  2033. rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
  2034. rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
  2035. /* Setup GPU memory space */
  2036. if ((rdev->family == CHIP_PALM) ||
  2037. (rdev->family == CHIP_SUMO) ||
  2038. (rdev->family == CHIP_SUMO2)) {
  2039. /* size in bytes on fusion */
  2040. rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
  2041. rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
  2042. } else {
  2043. /* size in MB on evergreen/cayman/tn */
  2044. rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
  2045. rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
  2046. }
  2047. rdev->mc.visible_vram_size = rdev->mc.aper_size;
  2048. r700_vram_gtt_location(rdev, &rdev->mc);
  2049. radeon_update_bandwidth_info(rdev);
  2050. return 0;
  2051. }
  2052. bool evergreen_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
  2053. {
  2054. u32 srbm_status;
  2055. u32 grbm_status;
  2056. u32 grbm_status_se0, grbm_status_se1;
  2057. srbm_status = RREG32(SRBM_STATUS);
  2058. grbm_status = RREG32(GRBM_STATUS);
  2059. grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
  2060. grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
  2061. if (!(grbm_status & GUI_ACTIVE)) {
  2062. radeon_ring_lockup_update(ring);
  2063. return false;
  2064. }
  2065. /* force CP activities */
  2066. radeon_ring_force_activity(rdev, ring);
  2067. return radeon_ring_test_lockup(rdev, ring);
  2068. }
  2069. static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
  2070. {
  2071. struct evergreen_mc_save save;
  2072. u32 grbm_reset = 0;
  2073. if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
  2074. return 0;
  2075. dev_info(rdev->dev, "GPU softreset \n");
  2076. dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
  2077. RREG32(GRBM_STATUS));
  2078. dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
  2079. RREG32(GRBM_STATUS_SE0));
  2080. dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
  2081. RREG32(GRBM_STATUS_SE1));
  2082. dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
  2083. RREG32(SRBM_STATUS));
  2084. dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
  2085. RREG32(CP_STALLED_STAT1));
  2086. dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
  2087. RREG32(CP_STALLED_STAT2));
  2088. dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
  2089. RREG32(CP_BUSY_STAT));
  2090. dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
  2091. RREG32(CP_STAT));
  2092. evergreen_mc_stop(rdev, &save);
  2093. if (evergreen_mc_wait_for_idle(rdev)) {
  2094. dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
  2095. }
  2096. /* Disable CP parsing/prefetching */
  2097. WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
  2098. /* reset all the gfx blocks */
  2099. grbm_reset = (SOFT_RESET_CP |
  2100. SOFT_RESET_CB |
  2101. SOFT_RESET_DB |
  2102. SOFT_RESET_PA |
  2103. SOFT_RESET_SC |
  2104. SOFT_RESET_SPI |
  2105. SOFT_RESET_SH |
  2106. SOFT_RESET_SX |
  2107. SOFT_RESET_TC |
  2108. SOFT_RESET_TA |
  2109. SOFT_RESET_VC |
  2110. SOFT_RESET_VGT);
  2111. dev_info(rdev->dev, " GRBM_SOFT_RESET=0x%08X\n", grbm_reset);
  2112. WREG32(GRBM_SOFT_RESET, grbm_reset);
  2113. (void)RREG32(GRBM_SOFT_RESET);
  2114. udelay(50);
  2115. WREG32(GRBM_SOFT_RESET, 0);
  2116. (void)RREG32(GRBM_SOFT_RESET);
  2117. /* Wait a little for things to settle down */
  2118. udelay(50);
  2119. dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
  2120. RREG32(GRBM_STATUS));
  2121. dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
  2122. RREG32(GRBM_STATUS_SE0));
  2123. dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
  2124. RREG32(GRBM_STATUS_SE1));
  2125. dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
  2126. RREG32(SRBM_STATUS));
  2127. dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
  2128. RREG32(CP_STALLED_STAT1));
  2129. dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
  2130. RREG32(CP_STALLED_STAT2));
  2131. dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
  2132. RREG32(CP_BUSY_STAT));
  2133. dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
  2134. RREG32(CP_STAT));
  2135. evergreen_mc_resume(rdev, &save);
  2136. return 0;
  2137. }
  2138. int evergreen_asic_reset(struct radeon_device *rdev)
  2139. {
  2140. return evergreen_gpu_soft_reset(rdev);
  2141. }
  2142. /* Interrupts */
  2143. u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc)
  2144. {
  2145. switch (crtc) {
  2146. case 0:
  2147. return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC0_REGISTER_OFFSET);
  2148. case 1:
  2149. return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC1_REGISTER_OFFSET);
  2150. case 2:
  2151. return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC2_REGISTER_OFFSET);
  2152. case 3:
  2153. return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC3_REGISTER_OFFSET);
  2154. case 4:
  2155. return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC4_REGISTER_OFFSET);
  2156. case 5:
  2157. return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC5_REGISTER_OFFSET);
  2158. default:
  2159. return 0;
  2160. }
  2161. }
  2162. void evergreen_disable_interrupt_state(struct radeon_device *rdev)
  2163. {
  2164. u32 tmp;
  2165. if (rdev->family >= CHIP_CAYMAN) {
  2166. cayman_cp_int_cntl_setup(rdev, 0,
  2167. CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
  2168. cayman_cp_int_cntl_setup(rdev, 1, 0);
  2169. cayman_cp_int_cntl_setup(rdev, 2, 0);
  2170. } else
  2171. WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
  2172. WREG32(GRBM_INT_CNTL, 0);
  2173. WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
  2174. WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
  2175. if (rdev->num_crtc >= 4) {
  2176. WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
  2177. WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
  2178. }
  2179. if (rdev->num_crtc >= 6) {
  2180. WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
  2181. WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
  2182. }
  2183. WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
  2184. WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
  2185. if (rdev->num_crtc >= 4) {
  2186. WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
  2187. WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
  2188. }
  2189. if (rdev->num_crtc >= 6) {
  2190. WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
  2191. WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
  2192. }
  2193. /* only one DAC on DCE6 */
  2194. if (!ASIC_IS_DCE6(rdev))
  2195. WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
  2196. WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
  2197. tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
  2198. WREG32(DC_HPD1_INT_CONTROL, tmp);
  2199. tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
  2200. WREG32(DC_HPD2_INT_CONTROL, tmp);
  2201. tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
  2202. WREG32(DC_HPD3_INT_CONTROL, tmp);
  2203. tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
  2204. WREG32(DC_HPD4_INT_CONTROL, tmp);
  2205. tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
  2206. WREG32(DC_HPD5_INT_CONTROL, tmp);
  2207. tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
  2208. WREG32(DC_HPD6_INT_CONTROL, tmp);
  2209. }
  2210. int evergreen_irq_set(struct radeon_device *rdev)
  2211. {
  2212. u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
  2213. u32 cp_int_cntl1 = 0, cp_int_cntl2 = 0;
  2214. u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
  2215. u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
  2216. u32 grbm_int_cntl = 0;
  2217. u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0;
  2218. u32 afmt1 = 0, afmt2 = 0, afmt3 = 0, afmt4 = 0, afmt5 = 0, afmt6 = 0;
  2219. if (!rdev->irq.installed) {
  2220. WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
  2221. return -EINVAL;
  2222. }
  2223. /* don't enable anything if the ih is disabled */
  2224. if (!rdev->ih.enabled) {
  2225. r600_disable_interrupts(rdev);
  2226. /* force the active interrupt state to all disabled */
  2227. evergreen_disable_interrupt_state(rdev);
  2228. return 0;
  2229. }
  2230. hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
  2231. hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
  2232. hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
  2233. hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
  2234. hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
  2235. hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
  2236. afmt1 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
  2237. afmt2 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
  2238. afmt3 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
  2239. afmt4 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
  2240. afmt5 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
  2241. afmt6 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
  2242. if (rdev->family >= CHIP_CAYMAN) {
  2243. /* enable CP interrupts on all rings */
  2244. if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
  2245. DRM_DEBUG("evergreen_irq_set: sw int gfx\n");
  2246. cp_int_cntl |= TIME_STAMP_INT_ENABLE;
  2247. }
  2248. if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP1_INDEX])) {
  2249. DRM_DEBUG("evergreen_irq_set: sw int cp1\n");
  2250. cp_int_cntl1 |= TIME_STAMP_INT_ENABLE;
  2251. }
  2252. if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP2_INDEX])) {
  2253. DRM_DEBUG("evergreen_irq_set: sw int cp2\n");
  2254. cp_int_cntl2 |= TIME_STAMP_INT_ENABLE;
  2255. }
  2256. } else {
  2257. if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
  2258. DRM_DEBUG("evergreen_irq_set: sw int gfx\n");
  2259. cp_int_cntl |= RB_INT_ENABLE;
  2260. cp_int_cntl |= TIME_STAMP_INT_ENABLE;
  2261. }
  2262. }
  2263. if (rdev->irq.crtc_vblank_int[0] ||
  2264. atomic_read(&rdev->irq.pflip[0])) {
  2265. DRM_DEBUG("evergreen_irq_set: vblank 0\n");
  2266. crtc1 |= VBLANK_INT_MASK;
  2267. }
  2268. if (rdev->irq.crtc_vblank_int[1] ||
  2269. atomic_read(&rdev->irq.pflip[1])) {
  2270. DRM_DEBUG("evergreen_irq_set: vblank 1\n");
  2271. crtc2 |= VBLANK_INT_MASK;
  2272. }
  2273. if (rdev->irq.crtc_vblank_int[2] ||
  2274. atomic_read(&rdev->irq.pflip[2])) {
  2275. DRM_DEBUG("evergreen_irq_set: vblank 2\n");
  2276. crtc3 |= VBLANK_INT_MASK;
  2277. }
  2278. if (rdev->irq.crtc_vblank_int[3] ||
  2279. atomic_read(&rdev->irq.pflip[3])) {
  2280. DRM_DEBUG("evergreen_irq_set: vblank 3\n");
  2281. crtc4 |= VBLANK_INT_MASK;
  2282. }
  2283. if (rdev->irq.crtc_vblank_int[4] ||
  2284. atomic_read(&rdev->irq.pflip[4])) {
  2285. DRM_DEBUG("evergreen_irq_set: vblank 4\n");
  2286. crtc5 |= VBLANK_INT_MASK;
  2287. }
  2288. if (rdev->irq.crtc_vblank_int[5] ||
  2289. atomic_read(&rdev->irq.pflip[5])) {
  2290. DRM_DEBUG("evergreen_irq_set: vblank 5\n");
  2291. crtc6 |= VBLANK_INT_MASK;
  2292. }
  2293. if (rdev->irq.hpd[0]) {
  2294. DRM_DEBUG("evergreen_irq_set: hpd 1\n");
  2295. hpd1 |= DC_HPDx_INT_EN;
  2296. }
  2297. if (rdev->irq.hpd[1]) {
  2298. DRM_DEBUG("evergreen_irq_set: hpd 2\n");
  2299. hpd2 |= DC_HPDx_INT_EN;
  2300. }
  2301. if (rdev->irq.hpd[2]) {
  2302. DRM_DEBUG("evergreen_irq_set: hpd 3\n");
  2303. hpd3 |= DC_HPDx_INT_EN;
  2304. }
  2305. if (rdev->irq.hpd[3]) {
  2306. DRM_DEBUG("evergreen_irq_set: hpd 4\n");
  2307. hpd4 |= DC_HPDx_INT_EN;
  2308. }
  2309. if (rdev->irq.hpd[4]) {
  2310. DRM_DEBUG("evergreen_irq_set: hpd 5\n");
  2311. hpd5 |= DC_HPDx_INT_EN;
  2312. }
  2313. if (rdev->irq.hpd[5]) {
  2314. DRM_DEBUG("evergreen_irq_set: hpd 6\n");
  2315. hpd6 |= DC_HPDx_INT_EN;
  2316. }
  2317. if (rdev->irq.afmt[0]) {
  2318. DRM_DEBUG("evergreen_irq_set: hdmi 0\n");
  2319. afmt1 |= AFMT_AZ_FORMAT_WTRIG_MASK;
  2320. }
  2321. if (rdev->irq.afmt[1]) {
  2322. DRM_DEBUG("evergreen_irq_set: hdmi 1\n");
  2323. afmt2 |= AFMT_AZ_FORMAT_WTRIG_MASK;
  2324. }
  2325. if (rdev->irq.afmt[2]) {
  2326. DRM_DEBUG("evergreen_irq_set: hdmi 2\n");
  2327. afmt3 |= AFMT_AZ_FORMAT_WTRIG_MASK;
  2328. }
  2329. if (rdev->irq.afmt[3]) {
  2330. DRM_DEBUG("evergreen_irq_set: hdmi 3\n");
  2331. afmt4 |= AFMT_AZ_FORMAT_WTRIG_MASK;
  2332. }
  2333. if (rdev->irq.afmt[4]) {
  2334. DRM_DEBUG("evergreen_irq_set: hdmi 4\n");
  2335. afmt5 |= AFMT_AZ_FORMAT_WTRIG_MASK;
  2336. }
  2337. if (rdev->irq.afmt[5]) {
  2338. DRM_DEBUG("evergreen_irq_set: hdmi 5\n");
  2339. afmt6 |= AFMT_AZ_FORMAT_WTRIG_MASK;
  2340. }
  2341. if (rdev->irq.gui_idle) {
  2342. DRM_DEBUG("gui idle\n");
  2343. grbm_int_cntl |= GUI_IDLE_INT_ENABLE;
  2344. }
  2345. if (rdev->family >= CHIP_CAYMAN) {
  2346. cayman_cp_int_cntl_setup(rdev, 0, cp_int_cntl);
  2347. cayman_cp_int_cntl_setup(rdev, 1, cp_int_cntl1);
  2348. cayman_cp_int_cntl_setup(rdev, 2, cp_int_cntl2);
  2349. } else
  2350. WREG32(CP_INT_CNTL, cp_int_cntl);
  2351. WREG32(GRBM_INT_CNTL, grbm_int_cntl);
  2352. WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
  2353. WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2);
  2354. if (rdev->num_crtc >= 4) {
  2355. WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3);
  2356. WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4);
  2357. }
  2358. if (rdev->num_crtc >= 6) {
  2359. WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5);
  2360. WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
  2361. }
  2362. WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1);
  2363. WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2);
  2364. if (rdev->num_crtc >= 4) {
  2365. WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3);
  2366. WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4);
  2367. }
  2368. if (rdev->num_crtc >= 6) {
  2369. WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, grph5);
  2370. WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6);
  2371. }
  2372. WREG32(DC_HPD1_INT_CONTROL, hpd1);
  2373. WREG32(DC_HPD2_INT_CONTROL, hpd2);
  2374. WREG32(DC_HPD3_INT_CONTROL, hpd3);
  2375. WREG32(DC_HPD4_INT_CONTROL, hpd4);
  2376. WREG32(DC_HPD5_INT_CONTROL, hpd5);
  2377. WREG32(DC_HPD6_INT_CONTROL, hpd6);
  2378. WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, afmt1);
  2379. WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, afmt2);
  2380. WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, afmt3);
  2381. WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, afmt4);
  2382. WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, afmt5);
  2383. WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, afmt6);
  2384. return 0;
  2385. }
  2386. static void evergreen_irq_ack(struct radeon_device *rdev)
  2387. {
  2388. u32 tmp;
  2389. rdev->irq.stat_regs.evergreen.disp_int = RREG32(DISP_INTERRUPT_STATUS);
  2390. rdev->irq.stat_regs.evergreen.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
  2391. rdev->irq.stat_regs.evergreen.disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2);
  2392. rdev->irq.stat_regs.evergreen.disp_int_cont3 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE3);
  2393. rdev->irq.stat_regs.evergreen.disp_int_cont4 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE4);
  2394. rdev->irq.stat_regs.evergreen.disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5);
  2395. rdev->irq.stat_regs.evergreen.d1grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET);
  2396. rdev->irq.stat_regs.evergreen.d2grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET);
  2397. if (rdev->num_crtc >= 4) {
  2398. rdev->irq.stat_regs.evergreen.d3grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET);
  2399. rdev->irq.stat_regs.evergreen.d4grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET);
  2400. }
  2401. if (rdev->num_crtc >= 6) {
  2402. rdev->irq.stat_regs.evergreen.d5grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET);
  2403. rdev->irq.stat_regs.evergreen.d6grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET);
  2404. }
  2405. rdev->irq.stat_regs.evergreen.afmt_status1 = RREG32(AFMT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET);
  2406. rdev->irq.stat_regs.evergreen.afmt_status2 = RREG32(AFMT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET);
  2407. rdev->irq.stat_regs.evergreen.afmt_status3 = RREG32(AFMT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET);
  2408. rdev->irq.stat_regs.evergreen.afmt_status4 = RREG32(AFMT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET);
  2409. rdev->irq.stat_regs.evergreen.afmt_status5 = RREG32(AFMT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET);
  2410. rdev->irq.stat_regs.evergreen.afmt_status6 = RREG32(AFMT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET);
  2411. if (rdev->irq.stat_regs.evergreen.d1grph_int & GRPH_PFLIP_INT_OCCURRED)
  2412. WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
  2413. if (rdev->irq.stat_regs.evergreen.d2grph_int & GRPH_PFLIP_INT_OCCURRED)
  2414. WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
  2415. if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT)
  2416. WREG32(VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK);
  2417. if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT)
  2418. WREG32(VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VLINE_ACK);
  2419. if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT)
  2420. WREG32(VBLANK_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VBLANK_ACK);
  2421. if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT)
  2422. WREG32(VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK);
  2423. if (rdev->num_crtc >= 4) {
  2424. if (rdev->irq.stat_regs.evergreen.d3grph_int & GRPH_PFLIP_INT_OCCURRED)
  2425. WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
  2426. if (rdev->irq.stat_regs.evergreen.d4grph_int & GRPH_PFLIP_INT_OCCURRED)
  2427. WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
  2428. if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT)
  2429. WREG32(VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK);
  2430. if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT)
  2431. WREG32(VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VLINE_ACK);
  2432. if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT)
  2433. WREG32(VBLANK_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VBLANK_ACK);
  2434. if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT)
  2435. WREG32(VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VLINE_ACK);
  2436. }
  2437. if (rdev->num_crtc >= 6) {
  2438. if (rdev->irq.stat_regs.evergreen.d5grph_int & GRPH_PFLIP_INT_OCCURRED)
  2439. WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
  2440. if (rdev->irq.stat_regs.evergreen.d6grph_int & GRPH_PFLIP_INT_OCCURRED)
  2441. WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
  2442. if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT)
  2443. WREG32(VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK);
  2444. if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT)
  2445. WREG32(VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VLINE_ACK);
  2446. if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT)
  2447. WREG32(VBLANK_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VBLANK_ACK);
  2448. if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT)
  2449. WREG32(VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VLINE_ACK);
  2450. }
  2451. if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
  2452. tmp = RREG32(DC_HPD1_INT_CONTROL);
  2453. tmp |= DC_HPDx_INT_ACK;
  2454. WREG32(DC_HPD1_INT_CONTROL, tmp);
  2455. }
  2456. if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {
  2457. tmp = RREG32(DC_HPD2_INT_CONTROL);
  2458. tmp |= DC_HPDx_INT_ACK;
  2459. WREG32(DC_HPD2_INT_CONTROL, tmp);
  2460. }
  2461. if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {
  2462. tmp = RREG32(DC_HPD3_INT_CONTROL);
  2463. tmp |= DC_HPDx_INT_ACK;
  2464. WREG32(DC_HPD3_INT_CONTROL, tmp);
  2465. }
  2466. if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {
  2467. tmp = RREG32(DC_HPD4_INT_CONTROL);
  2468. tmp |= DC_HPDx_INT_ACK;
  2469. WREG32(DC_HPD4_INT_CONTROL, tmp);
  2470. }
  2471. if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {
  2472. tmp = RREG32(DC_HPD5_INT_CONTROL);
  2473. tmp |= DC_HPDx_INT_ACK;
  2474. WREG32(DC_HPD5_INT_CONTROL, tmp);
  2475. }
  2476. if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
  2477. tmp = RREG32(DC_HPD5_INT_CONTROL);
  2478. tmp |= DC_HPDx_INT_ACK;
  2479. WREG32(DC_HPD6_INT_CONTROL, tmp);
  2480. }
  2481. if (rdev->irq.stat_regs.evergreen.afmt_status1 & AFMT_AZ_FORMAT_WTRIG) {
  2482. tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET);
  2483. tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
  2484. WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, tmp);
  2485. }
  2486. if (rdev->irq.stat_regs.evergreen.afmt_status2 & AFMT_AZ_FORMAT_WTRIG) {
  2487. tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
  2488. tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
  2489. WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, tmp);
  2490. }
  2491. if (rdev->irq.stat_regs.evergreen.afmt_status3 & AFMT_AZ_FORMAT_WTRIG) {
  2492. tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET);
  2493. tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
  2494. WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, tmp);
  2495. }
  2496. if (rdev->irq.stat_regs.evergreen.afmt_status4 & AFMT_AZ_FORMAT_WTRIG) {
  2497. tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
  2498. tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
  2499. WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, tmp);
  2500. }
  2501. if (rdev->irq.stat_regs.evergreen.afmt_status5 & AFMT_AZ_FORMAT_WTRIG) {
  2502. tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET);
  2503. tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
  2504. WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, tmp);
  2505. }
  2506. if (rdev->irq.stat_regs.evergreen.afmt_status6 & AFMT_AZ_FORMAT_WTRIG) {
  2507. tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
  2508. tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
  2509. WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, tmp);
  2510. }
  2511. }
  2512. void evergreen_irq_disable(struct radeon_device *rdev)
  2513. {
  2514. r600_disable_interrupts(rdev);
  2515. /* Wait and acknowledge irq */
  2516. mdelay(1);
  2517. evergreen_irq_ack(rdev);
  2518. evergreen_disable_interrupt_state(rdev);
  2519. }
  2520. void evergreen_irq_suspend(struct radeon_device *rdev)
  2521. {
  2522. evergreen_irq_disable(rdev);
  2523. r600_rlc_stop(rdev);
  2524. }
  2525. static u32 evergreen_get_ih_wptr(struct radeon_device *rdev)
  2526. {
  2527. u32 wptr, tmp;
  2528. if (rdev->wb.enabled)
  2529. wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]);
  2530. else
  2531. wptr = RREG32(IH_RB_WPTR);
  2532. if (wptr & RB_OVERFLOW) {
  2533. /* When a ring buffer overflow happen start parsing interrupt
  2534. * from the last not overwritten vector (wptr + 16). Hopefully
  2535. * this should allow us to catchup.
  2536. */
  2537. dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
  2538. wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
  2539. rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
  2540. tmp = RREG32(IH_RB_CNTL);
  2541. tmp |= IH_WPTR_OVERFLOW_CLEAR;
  2542. WREG32(IH_RB_CNTL, tmp);
  2543. }
  2544. return (wptr & rdev->ih.ptr_mask);
  2545. }
  2546. int evergreen_irq_process(struct radeon_device *rdev)
  2547. {
  2548. u32 wptr;
  2549. u32 rptr;
  2550. u32 src_id, src_data;
  2551. u32 ring_index;
  2552. bool queue_hotplug = false;
  2553. bool queue_hdmi = false;
  2554. if (!rdev->ih.enabled || rdev->shutdown)
  2555. return IRQ_NONE;
  2556. wptr = evergreen_get_ih_wptr(rdev);
  2557. restart_ih:
  2558. /* is somebody else already processing irqs? */
  2559. if (atomic_xchg(&rdev->ih.lock, 1))
  2560. return IRQ_NONE;
  2561. rptr = rdev->ih.rptr;
  2562. DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
  2563. /* Order reading of wptr vs. reading of IH ring data */
  2564. rmb();
  2565. /* display interrupts */
  2566. evergreen_irq_ack(rdev);
  2567. while (rptr != wptr) {
  2568. /* wptr/rptr are in bytes! */
  2569. ring_index = rptr / 4;
  2570. src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
  2571. src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
  2572. switch (src_id) {
  2573. case 1: /* D1 vblank/vline */
  2574. switch (src_data) {
  2575. case 0: /* D1 vblank */
  2576. if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) {
  2577. if (rdev->irq.crtc_vblank_int[0]) {
  2578. drm_handle_vblank(rdev->ddev, 0);
  2579. rdev->pm.vblank_sync = true;
  2580. wake_up(&rdev->irq.vblank_queue);
  2581. }
  2582. if (atomic_read(&rdev->irq.pflip[0]))
  2583. radeon_crtc_handle_flip(rdev, 0);
  2584. rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
  2585. DRM_DEBUG("IH: D1 vblank\n");
  2586. }
  2587. break;
  2588. case 1: /* D1 vline */
  2589. if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) {
  2590. rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT;
  2591. DRM_DEBUG("IH: D1 vline\n");
  2592. }
  2593. break;
  2594. default:
  2595. DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
  2596. break;
  2597. }
  2598. break;
  2599. case 2: /* D2 vblank/vline */
  2600. switch (src_data) {
  2601. case 0: /* D2 vblank */
  2602. if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
  2603. if (rdev->irq.crtc_vblank_int[1]) {
  2604. drm_handle_vblank(rdev->ddev, 1);
  2605. rdev->pm.vblank_sync = true;
  2606. wake_up(&rdev->irq.vblank_queue);
  2607. }
  2608. if (atomic_read(&rdev->irq.pflip[1]))
  2609. radeon_crtc_handle_flip(rdev, 1);
  2610. rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
  2611. DRM_DEBUG("IH: D2 vblank\n");
  2612. }
  2613. break;
  2614. case 1: /* D2 vline */
  2615. if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) {
  2616. rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
  2617. DRM_DEBUG("IH: D2 vline\n");
  2618. }
  2619. break;
  2620. default:
  2621. DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
  2622. break;
  2623. }
  2624. break;
  2625. case 3: /* D3 vblank/vline */
  2626. switch (src_data) {
  2627. case 0: /* D3 vblank */
  2628. if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
  2629. if (rdev->irq.crtc_vblank_int[2]) {
  2630. drm_handle_vblank(rdev->ddev, 2);
  2631. rdev->pm.vblank_sync = true;
  2632. wake_up(&rdev->irq.vblank_queue);
  2633. }
  2634. if (atomic_read(&rdev->irq.pflip[2]))
  2635. radeon_crtc_handle_flip(rdev, 2);
  2636. rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
  2637. DRM_DEBUG("IH: D3 vblank\n");
  2638. }
  2639. break;
  2640. case 1: /* D3 vline */
  2641. if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {
  2642. rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
  2643. DRM_DEBUG("IH: D3 vline\n");
  2644. }
  2645. break;
  2646. default:
  2647. DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
  2648. break;
  2649. }
  2650. break;
  2651. case 4: /* D4 vblank/vline */
  2652. switch (src_data) {
  2653. case 0: /* D4 vblank */
  2654. if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
  2655. if (rdev->irq.crtc_vblank_int[3]) {
  2656. drm_handle_vblank(rdev->ddev, 3);
  2657. rdev->pm.vblank_sync = true;
  2658. wake_up(&rdev->irq.vblank_queue);
  2659. }
  2660. if (atomic_read(&rdev->irq.pflip[3]))
  2661. radeon_crtc_handle_flip(rdev, 3);
  2662. rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
  2663. DRM_DEBUG("IH: D4 vblank\n");
  2664. }
  2665. break;
  2666. case 1: /* D4 vline */
  2667. if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {
  2668. rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
  2669. DRM_DEBUG("IH: D4 vline\n");
  2670. }
  2671. break;
  2672. default:
  2673. DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
  2674. break;
  2675. }
  2676. break;
  2677. case 5: /* D5 vblank/vline */
  2678. switch (src_data) {
  2679. case 0: /* D5 vblank */
  2680. if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
  2681. if (rdev->irq.crtc_vblank_int[4]) {
  2682. drm_handle_vblank(rdev->ddev, 4);
  2683. rdev->pm.vblank_sync = true;
  2684. wake_up(&rdev->irq.vblank_queue);
  2685. }
  2686. if (atomic_read(&rdev->irq.pflip[4]))
  2687. radeon_crtc_handle_flip(rdev, 4);
  2688. rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
  2689. DRM_DEBUG("IH: D5 vblank\n");
  2690. }
  2691. break;
  2692. case 1: /* D5 vline */
  2693. if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {
  2694. rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
  2695. DRM_DEBUG("IH: D5 vline\n");
  2696. }
  2697. break;
  2698. default:
  2699. DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
  2700. break;
  2701. }
  2702. break;
  2703. case 6: /* D6 vblank/vline */
  2704. switch (src_data) {
  2705. case 0: /* D6 vblank */
  2706. if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
  2707. if (rdev->irq.crtc_vblank_int[5]) {
  2708. drm_handle_vblank(rdev->ddev, 5);
  2709. rdev->pm.vblank_sync = true;
  2710. wake_up(&rdev->irq.vblank_queue);
  2711. }
  2712. if (atomic_read(&rdev->irq.pflip[5]))
  2713. radeon_crtc_handle_flip(rdev, 5);
  2714. rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
  2715. DRM_DEBUG("IH: D6 vblank\n");
  2716. }
  2717. break;
  2718. case 1: /* D6 vline */
  2719. if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {
  2720. rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
  2721. DRM_DEBUG("IH: D6 vline\n");
  2722. }
  2723. break;
  2724. default:
  2725. DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
  2726. break;
  2727. }
  2728. break;
  2729. case 42: /* HPD hotplug */
  2730. switch (src_data) {
  2731. case 0:
  2732. if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
  2733. rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT;
  2734. queue_hotplug = true;
  2735. DRM_DEBUG("IH: HPD1\n");
  2736. }
  2737. break;
  2738. case 1:
  2739. if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {
  2740. rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT;
  2741. queue_hotplug = true;
  2742. DRM_DEBUG("IH: HPD2\n");
  2743. }
  2744. break;
  2745. case 2:
  2746. if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {
  2747. rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
  2748. queue_hotplug = true;
  2749. DRM_DEBUG("IH: HPD3\n");
  2750. }
  2751. break;
  2752. case 3:
  2753. if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {
  2754. rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
  2755. queue_hotplug = true;
  2756. DRM_DEBUG("IH: HPD4\n");
  2757. }
  2758. break;
  2759. case 4:
  2760. if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {
  2761. rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
  2762. queue_hotplug = true;
  2763. DRM_DEBUG("IH: HPD5\n");
  2764. }
  2765. break;
  2766. case 5:
  2767. if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
  2768. rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
  2769. queue_hotplug = true;
  2770. DRM_DEBUG("IH: HPD6\n");
  2771. }
  2772. break;
  2773. default:
  2774. DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
  2775. break;
  2776. }
  2777. break;
  2778. case 44: /* hdmi */
  2779. switch (src_data) {
  2780. case 0:
  2781. if (rdev->irq.stat_regs.evergreen.afmt_status1 & AFMT_AZ_FORMAT_WTRIG) {
  2782. rdev->irq.stat_regs.evergreen.afmt_status1 &= ~AFMT_AZ_FORMAT_WTRIG;
  2783. queue_hdmi = true;
  2784. DRM_DEBUG("IH: HDMI0\n");
  2785. }
  2786. break;
  2787. case 1:
  2788. if (rdev->irq.stat_regs.evergreen.afmt_status2 & AFMT_AZ_FORMAT_WTRIG) {
  2789. rdev->irq.stat_regs.evergreen.afmt_status2 &= ~AFMT_AZ_FORMAT_WTRIG;
  2790. queue_hdmi = true;
  2791. DRM_DEBUG("IH: HDMI1\n");
  2792. }
  2793. break;
  2794. case 2:
  2795. if (rdev->irq.stat_regs.evergreen.afmt_status3 & AFMT_AZ_FORMAT_WTRIG) {
  2796. rdev->irq.stat_regs.evergreen.afmt_status3 &= ~AFMT_AZ_FORMAT_WTRIG;
  2797. queue_hdmi = true;
  2798. DRM_DEBUG("IH: HDMI2\n");
  2799. }
  2800. break;
  2801. case 3:
  2802. if (rdev->irq.stat_regs.evergreen.afmt_status4 & AFMT_AZ_FORMAT_WTRIG) {
  2803. rdev->irq.stat_regs.evergreen.afmt_status4 &= ~AFMT_AZ_FORMAT_WTRIG;
  2804. queue_hdmi = true;
  2805. DRM_DEBUG("IH: HDMI3\n");
  2806. }
  2807. break;
  2808. case 4:
  2809. if (rdev->irq.stat_regs.evergreen.afmt_status5 & AFMT_AZ_FORMAT_WTRIG) {
  2810. rdev->irq.stat_regs.evergreen.afmt_status5 &= ~AFMT_AZ_FORMAT_WTRIG;
  2811. queue_hdmi = true;
  2812. DRM_DEBUG("IH: HDMI4\n");
  2813. }
  2814. break;
  2815. case 5:
  2816. if (rdev->irq.stat_regs.evergreen.afmt_status6 & AFMT_AZ_FORMAT_WTRIG) {
  2817. rdev->irq.stat_regs.evergreen.afmt_status6 &= ~AFMT_AZ_FORMAT_WTRIG;
  2818. queue_hdmi = true;
  2819. DRM_DEBUG("IH: HDMI5\n");
  2820. }
  2821. break;
  2822. default:
  2823. DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
  2824. break;
  2825. }
  2826. break;
  2827. case 176: /* CP_INT in ring buffer */
  2828. case 177: /* CP_INT in IB1 */
  2829. case 178: /* CP_INT in IB2 */
  2830. DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
  2831. radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
  2832. break;
  2833. case 181: /* CP EOP event */
  2834. DRM_DEBUG("IH: CP EOP\n");
  2835. if (rdev->family >= CHIP_CAYMAN) {
  2836. switch (src_data) {
  2837. case 0:
  2838. radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
  2839. break;
  2840. case 1:
  2841. radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
  2842. break;
  2843. case 2:
  2844. radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
  2845. break;
  2846. }
  2847. } else
  2848. radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
  2849. break;
  2850. case 233: /* GUI IDLE */
  2851. DRM_DEBUG("IH: GUI idle\n");
  2852. wake_up(&rdev->irq.idle_queue);
  2853. break;
  2854. default:
  2855. DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
  2856. break;
  2857. }
  2858. /* wptr/rptr are in bytes! */
  2859. rptr += 16;
  2860. rptr &= rdev->ih.ptr_mask;
  2861. }
  2862. if (queue_hotplug)
  2863. schedule_work(&rdev->hotplug_work);
  2864. if (queue_hdmi)
  2865. schedule_work(&rdev->audio_work);
  2866. rdev->ih.rptr = rptr;
  2867. WREG32(IH_RB_RPTR, rdev->ih.rptr);
  2868. atomic_set(&rdev->ih.lock, 0);
  2869. /* make sure wptr hasn't changed while processing */
  2870. wptr = evergreen_get_ih_wptr(rdev);
  2871. if (wptr != rptr)
  2872. goto restart_ih;
  2873. return IRQ_HANDLED;
  2874. }
  2875. static int evergreen_startup(struct radeon_device *rdev)
  2876. {
  2877. struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
  2878. int r;
  2879. /* enable pcie gen2 link */
  2880. evergreen_pcie_gen2_enable(rdev);
  2881. if (ASIC_IS_DCE5(rdev)) {
  2882. if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
  2883. r = ni_init_microcode(rdev);
  2884. if (r) {
  2885. DRM_ERROR("Failed to load firmware!\n");
  2886. return r;
  2887. }
  2888. }
  2889. r = ni_mc_load_microcode(rdev);
  2890. if (r) {
  2891. DRM_ERROR("Failed to load MC firmware!\n");
  2892. return r;
  2893. }
  2894. } else {
  2895. if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
  2896. r = r600_init_microcode(rdev);
  2897. if (r) {
  2898. DRM_ERROR("Failed to load firmware!\n");
  2899. return r;
  2900. }
  2901. }
  2902. }
  2903. r = r600_vram_scratch_init(rdev);
  2904. if (r)
  2905. return r;
  2906. evergreen_mc_program(rdev);
  2907. if (rdev->flags & RADEON_IS_AGP) {
  2908. evergreen_agp_enable(rdev);
  2909. } else {
  2910. r = evergreen_pcie_gart_enable(rdev);
  2911. if (r)
  2912. return r;
  2913. }
  2914. evergreen_gpu_init(rdev);
  2915. r = evergreen_blit_init(rdev);
  2916. if (r) {
  2917. r600_blit_fini(rdev);
  2918. rdev->asic->copy.copy = NULL;
  2919. dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
  2920. }
  2921. /* allocate wb buffer */
  2922. r = radeon_wb_init(rdev);
  2923. if (r)
  2924. return r;
  2925. r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
  2926. if (r) {
  2927. dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
  2928. return r;
  2929. }
  2930. /* Enable IRQ */
  2931. r = r600_irq_init(rdev);
  2932. if (r) {
  2933. DRM_ERROR("radeon: IH init failed (%d).\n", r);
  2934. radeon_irq_kms_fini(rdev);
  2935. return r;
  2936. }
  2937. evergreen_irq_set(rdev);
  2938. r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
  2939. R600_CP_RB_RPTR, R600_CP_RB_WPTR,
  2940. 0, 0xfffff, RADEON_CP_PACKET2);
  2941. if (r)
  2942. return r;
  2943. r = evergreen_cp_load_microcode(rdev);
  2944. if (r)
  2945. return r;
  2946. r = evergreen_cp_resume(rdev);
  2947. if (r)
  2948. return r;
  2949. r = radeon_ib_pool_init(rdev);
  2950. if (r) {
  2951. dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
  2952. return r;
  2953. }
  2954. r = r600_audio_init(rdev);
  2955. if (r) {
  2956. DRM_ERROR("radeon: audio init failed\n");
  2957. return r;
  2958. }
  2959. return 0;
  2960. }
  2961. int evergreen_resume(struct radeon_device *rdev)
  2962. {
  2963. int r;
  2964. /* reset the asic, the gfx blocks are often in a bad state
  2965. * after the driver is unloaded or after a resume
  2966. */
  2967. if (radeon_asic_reset(rdev))
  2968. dev_warn(rdev->dev, "GPU reset failed !\n");
  2969. /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
  2970. * posting will perform necessary task to bring back GPU into good
  2971. * shape.
  2972. */
  2973. /* post card */
  2974. atom_asic_init(rdev->mode_info.atom_context);
  2975. rdev->accel_working = true;
  2976. r = evergreen_startup(rdev);
  2977. if (r) {
  2978. DRM_ERROR("evergreen startup failed on resume\n");
  2979. rdev->accel_working = false;
  2980. return r;
  2981. }
  2982. return r;
  2983. }
  2984. int evergreen_suspend(struct radeon_device *rdev)
  2985. {
  2986. struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
  2987. r600_audio_fini(rdev);
  2988. r700_cp_stop(rdev);
  2989. ring->ready = false;
  2990. evergreen_irq_suspend(rdev);
  2991. radeon_wb_disable(rdev);
  2992. evergreen_pcie_gart_disable(rdev);
  2993. return 0;
  2994. }
  2995. /* Plan is to move initialization in that function and use
  2996. * helper function so that radeon_device_init pretty much
  2997. * do nothing more than calling asic specific function. This
  2998. * should also allow to remove a bunch of callback function
  2999. * like vram_info.
  3000. */
  3001. int evergreen_init(struct radeon_device *rdev)
  3002. {
  3003. int r;
  3004. /* Read BIOS */
  3005. if (!radeon_get_bios(rdev)) {
  3006. if (ASIC_IS_AVIVO(rdev))
  3007. return -EINVAL;
  3008. }
  3009. /* Must be an ATOMBIOS */
  3010. if (!rdev->is_atom_bios) {
  3011. dev_err(rdev->dev, "Expecting atombios for evergreen GPU\n");
  3012. return -EINVAL;
  3013. }
  3014. r = radeon_atombios_init(rdev);
  3015. if (r)
  3016. return r;
  3017. /* reset the asic, the gfx blocks are often in a bad state
  3018. * after the driver is unloaded or after a resume
  3019. */
  3020. if (radeon_asic_reset(rdev))
  3021. dev_warn(rdev->dev, "GPU reset failed !\n");
  3022. /* Post card if necessary */
  3023. if (!radeon_card_posted(rdev)) {
  3024. if (!rdev->bios) {
  3025. dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
  3026. return -EINVAL;
  3027. }
  3028. DRM_INFO("GPU not posted. posting now...\n");
  3029. atom_asic_init(rdev->mode_info.atom_context);
  3030. }
  3031. /* Initialize scratch registers */
  3032. r600_scratch_init(rdev);
  3033. /* Initialize surface registers */
  3034. radeon_surface_init(rdev);
  3035. /* Initialize clocks */
  3036. radeon_get_clock_info(rdev->ddev);
  3037. /* Fence driver */
  3038. r = radeon_fence_driver_init(rdev);
  3039. if (r)
  3040. return r;
  3041. /* initialize AGP */
  3042. if (rdev->flags & RADEON_IS_AGP) {
  3043. r = radeon_agp_init(rdev);
  3044. if (r)
  3045. radeon_agp_disable(rdev);
  3046. }
  3047. /* initialize memory controller */
  3048. r = evergreen_mc_init(rdev);
  3049. if (r)
  3050. return r;
  3051. /* Memory manager */
  3052. r = radeon_bo_init(rdev);
  3053. if (r)
  3054. return r;
  3055. r = radeon_irq_kms_init(rdev);
  3056. if (r)
  3057. return r;
  3058. rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
  3059. r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
  3060. rdev->ih.ring_obj = NULL;
  3061. r600_ih_ring_init(rdev, 64 * 1024);
  3062. r = r600_pcie_gart_init(rdev);
  3063. if (r)
  3064. return r;
  3065. rdev->accel_working = true;
  3066. r = evergreen_startup(rdev);
  3067. if (r) {
  3068. dev_err(rdev->dev, "disabling GPU acceleration\n");
  3069. r700_cp_fini(rdev);
  3070. r600_irq_fini(rdev);
  3071. radeon_wb_fini(rdev);
  3072. radeon_ib_pool_fini(rdev);
  3073. radeon_irq_kms_fini(rdev);
  3074. evergreen_pcie_gart_fini(rdev);
  3075. rdev->accel_working = false;
  3076. }
  3077. /* Don't start up if the MC ucode is missing on BTC parts.
  3078. * The default clocks and voltages before the MC ucode
  3079. * is loaded are not suffient for advanced operations.
  3080. */
  3081. if (ASIC_IS_DCE5(rdev)) {
  3082. if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) {
  3083. DRM_ERROR("radeon: MC ucode required for NI+.\n");
  3084. return -EINVAL;
  3085. }
  3086. }
  3087. return 0;
  3088. }
  3089. void evergreen_fini(struct radeon_device *rdev)
  3090. {
  3091. r600_audio_fini(rdev);
  3092. r600_blit_fini(rdev);
  3093. r700_cp_fini(rdev);
  3094. r600_irq_fini(rdev);
  3095. radeon_wb_fini(rdev);
  3096. radeon_ib_pool_fini(rdev);
  3097. radeon_irq_kms_fini(rdev);
  3098. evergreen_pcie_gart_fini(rdev);
  3099. r600_vram_scratch_fini(rdev);
  3100. radeon_gem_fini(rdev);
  3101. radeon_fence_driver_fini(rdev);
  3102. radeon_agp_fini(rdev);
  3103. radeon_bo_fini(rdev);
  3104. radeon_atombios_fini(rdev);
  3105. kfree(rdev->bios);
  3106. rdev->bios = NULL;
  3107. }
  3108. void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
  3109. {
  3110. u32 link_width_cntl, speed_cntl, mask;
  3111. int ret;
  3112. if (radeon_pcie_gen2 == 0)
  3113. return;
  3114. if (rdev->flags & RADEON_IS_IGP)
  3115. return;
  3116. if (!(rdev->flags & RADEON_IS_PCIE))
  3117. return;
  3118. /* x2 cards have a special sequence */
  3119. if (ASIC_IS_X2(rdev))
  3120. return;
  3121. ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
  3122. if (ret != 0)
  3123. return;
  3124. if (!(mask & DRM_PCIE_SPEED_50))
  3125. return;
  3126. DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
  3127. speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
  3128. if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) ||
  3129. (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
  3130. link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
  3131. link_width_cntl &= ~LC_UPCONFIGURE_DIS;
  3132. WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
  3133. speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
  3134. speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
  3135. WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
  3136. speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
  3137. speed_cntl |= LC_CLR_FAILED_SPD_CHANGE_CNT;
  3138. WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
  3139. speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
  3140. speed_cntl &= ~LC_CLR_FAILED_SPD_CHANGE_CNT;
  3141. WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
  3142. speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
  3143. speed_cntl |= LC_GEN2_EN_STRAP;
  3144. WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
  3145. } else {
  3146. link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
  3147. /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
  3148. if (1)
  3149. link_width_cntl |= LC_UPCONFIGURE_DIS;
  3150. else
  3151. link_width_cntl &= ~LC_UPCONFIGURE_DIS;
  3152. WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
  3153. }
  3154. }