r600.c 80 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944
  1. /*
  2. * Copyright 2008 Advanced Micro Devices, Inc.
  3. * Copyright 2008 Red Hat Inc.
  4. * Copyright 2009 Jerome Glisse.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the "Software"),
  8. * to deal in the Software without restriction, including without limitation
  9. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10. * and/or sell copies of the Software, and to permit persons to whom the
  11. * Software is furnished to do so, subject to the following conditions:
  12. *
  13. * The above copyright notice and this permission notice shall be included in
  14. * all copies or substantial portions of the Software.
  15. *
  16. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22. * OTHER DEALINGS IN THE SOFTWARE.
  23. *
  24. * Authors: Dave Airlie
  25. * Alex Deucher
  26. * Jerome Glisse
  27. */
  28. #include <linux/seq_file.h>
  29. #include <linux/firmware.h>
  30. #include <linux/platform_device.h>
  31. #include "drmP.h"
  32. #include "radeon_drm.h"
  33. #include "radeon.h"
  34. #include "radeon_mode.h"
  35. #include "r600d.h"
  36. #include "atom.h"
  37. #include "avivod.h"
  38. #define PFP_UCODE_SIZE 576
  39. #define PM4_UCODE_SIZE 1792
  40. #define RLC_UCODE_SIZE 768
  41. #define R700_PFP_UCODE_SIZE 848
  42. #define R700_PM4_UCODE_SIZE 1360
  43. #define R700_RLC_UCODE_SIZE 1024
  44. /* Firmware Names */
  45. MODULE_FIRMWARE("radeon/R600_pfp.bin");
  46. MODULE_FIRMWARE("radeon/R600_me.bin");
  47. MODULE_FIRMWARE("radeon/RV610_pfp.bin");
  48. MODULE_FIRMWARE("radeon/RV610_me.bin");
  49. MODULE_FIRMWARE("radeon/RV630_pfp.bin");
  50. MODULE_FIRMWARE("radeon/RV630_me.bin");
  51. MODULE_FIRMWARE("radeon/RV620_pfp.bin");
  52. MODULE_FIRMWARE("radeon/RV620_me.bin");
  53. MODULE_FIRMWARE("radeon/RV635_pfp.bin");
  54. MODULE_FIRMWARE("radeon/RV635_me.bin");
  55. MODULE_FIRMWARE("radeon/RV670_pfp.bin");
  56. MODULE_FIRMWARE("radeon/RV670_me.bin");
  57. MODULE_FIRMWARE("radeon/RS780_pfp.bin");
  58. MODULE_FIRMWARE("radeon/RS780_me.bin");
  59. MODULE_FIRMWARE("radeon/RV770_pfp.bin");
  60. MODULE_FIRMWARE("radeon/RV770_me.bin");
  61. MODULE_FIRMWARE("radeon/RV730_pfp.bin");
  62. MODULE_FIRMWARE("radeon/RV730_me.bin");
  63. MODULE_FIRMWARE("radeon/RV710_pfp.bin");
  64. MODULE_FIRMWARE("radeon/RV710_me.bin");
  65. MODULE_FIRMWARE("radeon/R600_rlc.bin");
  66. MODULE_FIRMWARE("radeon/R700_rlc.bin");
  67. int r600_debugfs_mc_info_init(struct radeon_device *rdev);
  68. /* r600,rv610,rv630,rv620,rv635,rv670 */
  69. int r600_mc_wait_for_idle(struct radeon_device *rdev);
  70. void r600_gpu_init(struct radeon_device *rdev);
  71. void r600_fini(struct radeon_device *rdev);
  72. /* hpd for digital panel detect/disconnect */
  73. bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
  74. {
  75. bool connected = false;
  76. if (ASIC_IS_DCE3(rdev)) {
  77. switch (hpd) {
  78. case RADEON_HPD_1:
  79. if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
  80. connected = true;
  81. break;
  82. case RADEON_HPD_2:
  83. if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
  84. connected = true;
  85. break;
  86. case RADEON_HPD_3:
  87. if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
  88. connected = true;
  89. break;
  90. case RADEON_HPD_4:
  91. if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
  92. connected = true;
  93. break;
  94. /* DCE 3.2 */
  95. case RADEON_HPD_5:
  96. if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
  97. connected = true;
  98. break;
  99. case RADEON_HPD_6:
  100. if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
  101. connected = true;
  102. break;
  103. default:
  104. break;
  105. }
  106. } else {
  107. switch (hpd) {
  108. case RADEON_HPD_1:
  109. if (RREG32(DC_HOT_PLUG_DETECT1_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
  110. connected = true;
  111. break;
  112. case RADEON_HPD_2:
  113. if (RREG32(DC_HOT_PLUG_DETECT2_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
  114. connected = true;
  115. break;
  116. case RADEON_HPD_3:
  117. if (RREG32(DC_HOT_PLUG_DETECT3_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
  118. connected = true;
  119. break;
  120. default:
  121. break;
  122. }
  123. }
  124. return connected;
  125. }
  126. void r600_hpd_set_polarity(struct radeon_device *rdev,
  127. enum radeon_hpd_id hpd)
  128. {
  129. u32 tmp;
  130. bool connected = r600_hpd_sense(rdev, hpd);
  131. if (ASIC_IS_DCE3(rdev)) {
  132. switch (hpd) {
  133. case RADEON_HPD_1:
  134. tmp = RREG32(DC_HPD1_INT_CONTROL);
  135. if (connected)
  136. tmp &= ~DC_HPDx_INT_POLARITY;
  137. else
  138. tmp |= DC_HPDx_INT_POLARITY;
  139. WREG32(DC_HPD1_INT_CONTROL, tmp);
  140. break;
  141. case RADEON_HPD_2:
  142. tmp = RREG32(DC_HPD2_INT_CONTROL);
  143. if (connected)
  144. tmp &= ~DC_HPDx_INT_POLARITY;
  145. else
  146. tmp |= DC_HPDx_INT_POLARITY;
  147. WREG32(DC_HPD2_INT_CONTROL, tmp);
  148. break;
  149. case RADEON_HPD_3:
  150. tmp = RREG32(DC_HPD3_INT_CONTROL);
  151. if (connected)
  152. tmp &= ~DC_HPDx_INT_POLARITY;
  153. else
  154. tmp |= DC_HPDx_INT_POLARITY;
  155. WREG32(DC_HPD3_INT_CONTROL, tmp);
  156. break;
  157. case RADEON_HPD_4:
  158. tmp = RREG32(DC_HPD4_INT_CONTROL);
  159. if (connected)
  160. tmp &= ~DC_HPDx_INT_POLARITY;
  161. else
  162. tmp |= DC_HPDx_INT_POLARITY;
  163. WREG32(DC_HPD4_INT_CONTROL, tmp);
  164. break;
  165. case RADEON_HPD_5:
  166. tmp = RREG32(DC_HPD5_INT_CONTROL);
  167. if (connected)
  168. tmp &= ~DC_HPDx_INT_POLARITY;
  169. else
  170. tmp |= DC_HPDx_INT_POLARITY;
  171. WREG32(DC_HPD5_INT_CONTROL, tmp);
  172. break;
  173. /* DCE 3.2 */
  174. case RADEON_HPD_6:
  175. tmp = RREG32(DC_HPD6_INT_CONTROL);
  176. if (connected)
  177. tmp &= ~DC_HPDx_INT_POLARITY;
  178. else
  179. tmp |= DC_HPDx_INT_POLARITY;
  180. WREG32(DC_HPD6_INT_CONTROL, tmp);
  181. break;
  182. default:
  183. break;
  184. }
  185. } else {
  186. switch (hpd) {
  187. case RADEON_HPD_1:
  188. tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
  189. if (connected)
  190. tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
  191. else
  192. tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
  193. WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
  194. break;
  195. case RADEON_HPD_2:
  196. tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
  197. if (connected)
  198. tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
  199. else
  200. tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
  201. WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
  202. break;
  203. case RADEON_HPD_3:
  204. tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
  205. if (connected)
  206. tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
  207. else
  208. tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
  209. WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
  210. break;
  211. default:
  212. break;
  213. }
  214. }
  215. }
  216. void r600_hpd_init(struct radeon_device *rdev)
  217. {
  218. struct drm_device *dev = rdev->ddev;
  219. struct drm_connector *connector;
  220. if (ASIC_IS_DCE3(rdev)) {
  221. u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa);
  222. if (ASIC_IS_DCE32(rdev))
  223. tmp |= DC_HPDx_EN;
  224. list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
  225. struct radeon_connector *radeon_connector = to_radeon_connector(connector);
  226. switch (radeon_connector->hpd.hpd) {
  227. case RADEON_HPD_1:
  228. WREG32(DC_HPD1_CONTROL, tmp);
  229. rdev->irq.hpd[0] = true;
  230. break;
  231. case RADEON_HPD_2:
  232. WREG32(DC_HPD2_CONTROL, tmp);
  233. rdev->irq.hpd[1] = true;
  234. break;
  235. case RADEON_HPD_3:
  236. WREG32(DC_HPD3_CONTROL, tmp);
  237. rdev->irq.hpd[2] = true;
  238. break;
  239. case RADEON_HPD_4:
  240. WREG32(DC_HPD4_CONTROL, tmp);
  241. rdev->irq.hpd[3] = true;
  242. break;
  243. /* DCE 3.2 */
  244. case RADEON_HPD_5:
  245. WREG32(DC_HPD5_CONTROL, tmp);
  246. rdev->irq.hpd[4] = true;
  247. break;
  248. case RADEON_HPD_6:
  249. WREG32(DC_HPD6_CONTROL, tmp);
  250. rdev->irq.hpd[5] = true;
  251. break;
  252. default:
  253. break;
  254. }
  255. }
  256. } else {
  257. list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
  258. struct radeon_connector *radeon_connector = to_radeon_connector(connector);
  259. switch (radeon_connector->hpd.hpd) {
  260. case RADEON_HPD_1:
  261. WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN);
  262. rdev->irq.hpd[0] = true;
  263. break;
  264. case RADEON_HPD_2:
  265. WREG32(DC_HOT_PLUG_DETECT2_CONTROL, DC_HOT_PLUG_DETECTx_EN);
  266. rdev->irq.hpd[1] = true;
  267. break;
  268. case RADEON_HPD_3:
  269. WREG32(DC_HOT_PLUG_DETECT3_CONTROL, DC_HOT_PLUG_DETECTx_EN);
  270. rdev->irq.hpd[2] = true;
  271. break;
  272. default:
  273. break;
  274. }
  275. }
  276. }
  277. if (rdev->irq.installed)
  278. r600_irq_set(rdev);
  279. }
  280. void r600_hpd_fini(struct radeon_device *rdev)
  281. {
  282. struct drm_device *dev = rdev->ddev;
  283. struct drm_connector *connector;
  284. if (ASIC_IS_DCE3(rdev)) {
  285. list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
  286. struct radeon_connector *radeon_connector = to_radeon_connector(connector);
  287. switch (radeon_connector->hpd.hpd) {
  288. case RADEON_HPD_1:
  289. WREG32(DC_HPD1_CONTROL, 0);
  290. rdev->irq.hpd[0] = false;
  291. break;
  292. case RADEON_HPD_2:
  293. WREG32(DC_HPD2_CONTROL, 0);
  294. rdev->irq.hpd[1] = false;
  295. break;
  296. case RADEON_HPD_3:
  297. WREG32(DC_HPD3_CONTROL, 0);
  298. rdev->irq.hpd[2] = false;
  299. break;
  300. case RADEON_HPD_4:
  301. WREG32(DC_HPD4_CONTROL, 0);
  302. rdev->irq.hpd[3] = false;
  303. break;
  304. /* DCE 3.2 */
  305. case RADEON_HPD_5:
  306. WREG32(DC_HPD5_CONTROL, 0);
  307. rdev->irq.hpd[4] = false;
  308. break;
  309. case RADEON_HPD_6:
  310. WREG32(DC_HPD6_CONTROL, 0);
  311. rdev->irq.hpd[5] = false;
  312. break;
  313. default:
  314. break;
  315. }
  316. }
  317. } else {
  318. list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
  319. struct radeon_connector *radeon_connector = to_radeon_connector(connector);
  320. switch (radeon_connector->hpd.hpd) {
  321. case RADEON_HPD_1:
  322. WREG32(DC_HOT_PLUG_DETECT1_CONTROL, 0);
  323. rdev->irq.hpd[0] = false;
  324. break;
  325. case RADEON_HPD_2:
  326. WREG32(DC_HOT_PLUG_DETECT2_CONTROL, 0);
  327. rdev->irq.hpd[1] = false;
  328. break;
  329. case RADEON_HPD_3:
  330. WREG32(DC_HOT_PLUG_DETECT3_CONTROL, 0);
  331. rdev->irq.hpd[2] = false;
  332. break;
  333. default:
  334. break;
  335. }
  336. }
  337. }
  338. }
  339. /*
  340. * R600 PCIE GART
  341. */
  342. void r600_pcie_gart_tlb_flush(struct radeon_device *rdev)
  343. {
  344. unsigned i;
  345. u32 tmp;
  346. /* flush hdp cache so updates hit vram */
  347. WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
  348. WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12);
  349. WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12);
  350. WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
  351. for (i = 0; i < rdev->usec_timeout; i++) {
  352. /* read MC_STATUS */
  353. tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE);
  354. tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
  355. if (tmp == 2) {
  356. printk(KERN_WARNING "[drm] r600 flush TLB failed\n");
  357. return;
  358. }
  359. if (tmp) {
  360. return;
  361. }
  362. udelay(1);
  363. }
  364. }
  365. int r600_pcie_gart_init(struct radeon_device *rdev)
  366. {
  367. int r;
  368. if (rdev->gart.table.vram.robj) {
  369. WARN(1, "R600 PCIE GART already initialized.\n");
  370. return 0;
  371. }
  372. /* Initialize common gart structure */
  373. r = radeon_gart_init(rdev);
  374. if (r)
  375. return r;
  376. rdev->gart.table_size = rdev->gart.num_gpu_pages * 8;
  377. return radeon_gart_table_vram_alloc(rdev);
  378. }
  379. int r600_pcie_gart_enable(struct radeon_device *rdev)
  380. {
  381. u32 tmp;
  382. int r, i;
  383. if (rdev->gart.table.vram.robj == NULL) {
  384. dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
  385. return -EINVAL;
  386. }
  387. r = radeon_gart_table_vram_pin(rdev);
  388. if (r)
  389. return r;
  390. radeon_gart_restore(rdev);
  391. /* Setup L2 cache */
  392. WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
  393. ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
  394. EFFECTIVE_L2_QUEUE_SIZE(7));
  395. WREG32(VM_L2_CNTL2, 0);
  396. WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
  397. /* Setup TLB control */
  398. tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
  399. SYSTEM_ACCESS_MODE_NOT_IN_SYS |
  400. EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
  401. ENABLE_WAIT_L2_QUERY;
  402. WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
  403. WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
  404. WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
  405. WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
  406. WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
  407. WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
  408. WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
  409. WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
  410. WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
  411. WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
  412. WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
  413. WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
  414. WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
  415. WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
  416. WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
  417. WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
  418. WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
  419. WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
  420. RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
  421. WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
  422. (u32)(rdev->dummy_page.addr >> 12));
  423. for (i = 1; i < 7; i++)
  424. WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
  425. r600_pcie_gart_tlb_flush(rdev);
  426. rdev->gart.ready = true;
  427. return 0;
  428. }
  429. void r600_pcie_gart_disable(struct radeon_device *rdev)
  430. {
  431. u32 tmp;
  432. int i, r;
  433. /* Disable all tables */
  434. for (i = 0; i < 7; i++)
  435. WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
  436. /* Disable L2 cache */
  437. WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
  438. EFFECTIVE_L2_QUEUE_SIZE(7));
  439. WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
  440. /* Setup L1 TLB control */
  441. tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
  442. ENABLE_WAIT_L2_QUERY;
  443. WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
  444. WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
  445. WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
  446. WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
  447. WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
  448. WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
  449. WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
  450. WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
  451. WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp);
  452. WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp);
  453. WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
  454. WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
  455. WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp);
  456. WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
  457. if (rdev->gart.table.vram.robj) {
  458. r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
  459. if (likely(r == 0)) {
  460. radeon_bo_kunmap(rdev->gart.table.vram.robj);
  461. radeon_bo_unpin(rdev->gart.table.vram.robj);
  462. radeon_bo_unreserve(rdev->gart.table.vram.robj);
  463. }
  464. }
  465. }
  466. void r600_pcie_gart_fini(struct radeon_device *rdev)
  467. {
  468. r600_pcie_gart_disable(rdev);
  469. radeon_gart_table_vram_free(rdev);
  470. radeon_gart_fini(rdev);
  471. }
  472. void r600_agp_enable(struct radeon_device *rdev)
  473. {
  474. u32 tmp;
  475. int i;
  476. /* Setup L2 cache */
  477. WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
  478. ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
  479. EFFECTIVE_L2_QUEUE_SIZE(7));
  480. WREG32(VM_L2_CNTL2, 0);
  481. WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
  482. /* Setup TLB control */
  483. tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
  484. SYSTEM_ACCESS_MODE_NOT_IN_SYS |
  485. EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
  486. ENABLE_WAIT_L2_QUERY;
  487. WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
  488. WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
  489. WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
  490. WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
  491. WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
  492. WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
  493. WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
  494. WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
  495. WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
  496. WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
  497. WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
  498. WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
  499. WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
  500. WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
  501. for (i = 0; i < 7; i++)
  502. WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
  503. }
  504. int r600_mc_wait_for_idle(struct radeon_device *rdev)
  505. {
  506. unsigned i;
  507. u32 tmp;
  508. for (i = 0; i < rdev->usec_timeout; i++) {
  509. /* read MC_STATUS */
  510. tmp = RREG32(R_000E50_SRBM_STATUS) & 0x3F00;
  511. if (!tmp)
  512. return 0;
  513. udelay(1);
  514. }
  515. return -1;
  516. }
  517. static void r600_mc_program(struct radeon_device *rdev)
  518. {
  519. struct rv515_mc_save save;
  520. u32 tmp;
  521. int i, j;
  522. /* Initialize HDP */
  523. for (i = 0, j = 0; i < 32; i++, j += 0x18) {
  524. WREG32((0x2c14 + j), 0x00000000);
  525. WREG32((0x2c18 + j), 0x00000000);
  526. WREG32((0x2c1c + j), 0x00000000);
  527. WREG32((0x2c20 + j), 0x00000000);
  528. WREG32((0x2c24 + j), 0x00000000);
  529. }
  530. WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
  531. rv515_mc_stop(rdev, &save);
  532. if (r600_mc_wait_for_idle(rdev)) {
  533. dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
  534. }
  535. /* Lockout access through VGA aperture (doesn't exist before R600) */
  536. WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
  537. /* Update configuration */
  538. if (rdev->flags & RADEON_IS_AGP) {
  539. if (rdev->mc.vram_start < rdev->mc.gtt_start) {
  540. /* VRAM before AGP */
  541. WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
  542. rdev->mc.vram_start >> 12);
  543. WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
  544. rdev->mc.gtt_end >> 12);
  545. } else {
  546. /* VRAM after AGP */
  547. WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
  548. rdev->mc.gtt_start >> 12);
  549. WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
  550. rdev->mc.vram_end >> 12);
  551. }
  552. } else {
  553. WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12);
  554. WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end >> 12);
  555. }
  556. WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
  557. tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
  558. tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
  559. WREG32(MC_VM_FB_LOCATION, tmp);
  560. WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
  561. WREG32(HDP_NONSURFACE_INFO, (2 << 7));
  562. WREG32(HDP_NONSURFACE_SIZE, rdev->mc.mc_vram_size | 0x3FF);
  563. if (rdev->flags & RADEON_IS_AGP) {
  564. WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22);
  565. WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22);
  566. WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
  567. } else {
  568. WREG32(MC_VM_AGP_BASE, 0);
  569. WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
  570. WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
  571. }
  572. if (r600_mc_wait_for_idle(rdev)) {
  573. dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
  574. }
  575. rv515_mc_resume(rdev, &save);
  576. /* we need to own VRAM, so turn off the VGA renderer here
  577. * to stop it overwriting our objects */
  578. rv515_vga_render_disable(rdev);
  579. }
  580. /**
  581. * r600_vram_gtt_location - try to find VRAM & GTT location
  582. * @rdev: radeon device structure holding all necessary informations
  583. * @mc: memory controller structure holding memory informations
  584. *
  585. * Function will place try to place VRAM at same place as in CPU (PCI)
  586. * address space as some GPU seems to have issue when we reprogram at
  587. * different address space.
  588. *
  589. * If there is not enough space to fit the unvisible VRAM after the
  590. * aperture then we limit the VRAM size to the aperture.
  591. *
  592. * If we are using AGP then place VRAM adjacent to AGP aperture are we need
  593. * them to be in one from GPU point of view so that we can program GPU to
  594. * catch access outside them (weird GPU policy see ??).
  595. *
  596. * This function will never fails, worst case are limiting VRAM or GTT.
  597. *
  598. * Note: GTT start, end, size should be initialized before calling this
  599. * function on AGP platform.
  600. */
  601. void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
  602. {
  603. u64 size_bf, size_af;
  604. if (mc->mc_vram_size > 0xE0000000) {
  605. /* leave room for at least 512M GTT */
  606. dev_warn(rdev->dev, "limiting VRAM\n");
  607. mc->real_vram_size = 0xE0000000;
  608. mc->mc_vram_size = 0xE0000000;
  609. }
  610. if (rdev->flags & RADEON_IS_AGP) {
  611. size_bf = mc->gtt_start;
  612. size_af = 0xFFFFFFFF - mc->gtt_end + 1;
  613. if (size_bf > size_af) {
  614. if (mc->mc_vram_size > size_bf) {
  615. dev_warn(rdev->dev, "limiting VRAM\n");
  616. mc->real_vram_size = size_bf;
  617. mc->mc_vram_size = size_bf;
  618. }
  619. mc->vram_start = mc->gtt_start - mc->mc_vram_size;
  620. } else {
  621. if (mc->mc_vram_size > size_af) {
  622. dev_warn(rdev->dev, "limiting VRAM\n");
  623. mc->real_vram_size = size_af;
  624. mc->mc_vram_size = size_af;
  625. }
  626. mc->vram_start = mc->gtt_end;
  627. }
  628. mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
  629. dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
  630. mc->mc_vram_size >> 20, mc->vram_start,
  631. mc->vram_end, mc->real_vram_size >> 20);
  632. } else {
  633. u64 base = 0;
  634. if (rdev->flags & RADEON_IS_IGP)
  635. base = (RREG32(MC_VM_FB_LOCATION) & 0xFFFF) << 24;
  636. radeon_vram_location(rdev, &rdev->mc, base);
  637. radeon_gtt_location(rdev, mc);
  638. }
  639. }
  640. int r600_mc_init(struct radeon_device *rdev)
  641. {
  642. fixed20_12 a;
  643. u32 tmp;
  644. int chansize, numchan;
  645. /* Get VRAM informations */
  646. rdev->mc.vram_is_ddr = true;
  647. tmp = RREG32(RAMCFG);
  648. if (tmp & CHANSIZE_OVERRIDE) {
  649. chansize = 16;
  650. } else if (tmp & CHANSIZE_MASK) {
  651. chansize = 64;
  652. } else {
  653. chansize = 32;
  654. }
  655. tmp = RREG32(CHMAP);
  656. switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
  657. case 0:
  658. default:
  659. numchan = 1;
  660. break;
  661. case 1:
  662. numchan = 2;
  663. break;
  664. case 2:
  665. numchan = 4;
  666. break;
  667. case 3:
  668. numchan = 8;
  669. break;
  670. }
  671. rdev->mc.vram_width = numchan * chansize;
  672. /* Could aper size report 0 ? */
  673. rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
  674. rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
  675. /* Setup GPU memory space */
  676. rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
  677. rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
  678. rdev->mc.visible_vram_size = rdev->mc.aper_size;
  679. /* FIXME remove this once we support unmappable VRAM */
  680. if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
  681. rdev->mc.mc_vram_size = rdev->mc.aper_size;
  682. rdev->mc.real_vram_size = rdev->mc.aper_size;
  683. }
  684. r600_vram_gtt_location(rdev, &rdev->mc);
  685. /* FIXME: we should enforce default clock in case GPU is not in
  686. * default setup
  687. */
  688. a.full = rfixed_const(100);
  689. rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
  690. rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
  691. if (rdev->flags & RADEON_IS_IGP)
  692. rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
  693. return 0;
  694. }
  695. /* We doesn't check that the GPU really needs a reset we simply do the
  696. * reset, it's up to the caller to determine if the GPU needs one. We
  697. * might add an helper function to check that.
  698. */
  699. int r600_gpu_soft_reset(struct radeon_device *rdev)
  700. {
  701. struct rv515_mc_save save;
  702. u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) |
  703. S_008010_VGT_BUSY(1) | S_008010_TA03_BUSY(1) |
  704. S_008010_TC_BUSY(1) | S_008010_SX_BUSY(1) |
  705. S_008010_SH_BUSY(1) | S_008010_SPI03_BUSY(1) |
  706. S_008010_SMX_BUSY(1) | S_008010_SC_BUSY(1) |
  707. S_008010_PA_BUSY(1) | S_008010_DB03_BUSY(1) |
  708. S_008010_CR_BUSY(1) | S_008010_CB03_BUSY(1) |
  709. S_008010_GUI_ACTIVE(1);
  710. u32 grbm2_busy_mask = S_008014_SPI0_BUSY(1) | S_008014_SPI1_BUSY(1) |
  711. S_008014_SPI2_BUSY(1) | S_008014_SPI3_BUSY(1) |
  712. S_008014_TA0_BUSY(1) | S_008014_TA1_BUSY(1) |
  713. S_008014_TA2_BUSY(1) | S_008014_TA3_BUSY(1) |
  714. S_008014_DB0_BUSY(1) | S_008014_DB1_BUSY(1) |
  715. S_008014_DB2_BUSY(1) | S_008014_DB3_BUSY(1) |
  716. S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) |
  717. S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1);
  718. u32 srbm_reset = 0;
  719. u32 tmp;
  720. dev_info(rdev->dev, "GPU softreset \n");
  721. dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n",
  722. RREG32(R_008010_GRBM_STATUS));
  723. dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n",
  724. RREG32(R_008014_GRBM_STATUS2));
  725. dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n",
  726. RREG32(R_000E50_SRBM_STATUS));
  727. rv515_mc_stop(rdev, &save);
  728. if (r600_mc_wait_for_idle(rdev)) {
  729. dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
  730. }
  731. /* Disable CP parsing/prefetching */
  732. WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(0xff));
  733. /* Check if any of the rendering block is busy and reset it */
  734. if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) ||
  735. (RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) {
  736. tmp = S_008020_SOFT_RESET_CR(1) |
  737. S_008020_SOFT_RESET_DB(1) |
  738. S_008020_SOFT_RESET_CB(1) |
  739. S_008020_SOFT_RESET_PA(1) |
  740. S_008020_SOFT_RESET_SC(1) |
  741. S_008020_SOFT_RESET_SMX(1) |
  742. S_008020_SOFT_RESET_SPI(1) |
  743. S_008020_SOFT_RESET_SX(1) |
  744. S_008020_SOFT_RESET_SH(1) |
  745. S_008020_SOFT_RESET_TC(1) |
  746. S_008020_SOFT_RESET_TA(1) |
  747. S_008020_SOFT_RESET_VC(1) |
  748. S_008020_SOFT_RESET_VGT(1);
  749. dev_info(rdev->dev, " R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
  750. WREG32(R_008020_GRBM_SOFT_RESET, tmp);
  751. (void)RREG32(R_008020_GRBM_SOFT_RESET);
  752. udelay(50);
  753. WREG32(R_008020_GRBM_SOFT_RESET, 0);
  754. (void)RREG32(R_008020_GRBM_SOFT_RESET);
  755. }
  756. /* Reset CP (we always reset CP) */
  757. tmp = S_008020_SOFT_RESET_CP(1);
  758. dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
  759. WREG32(R_008020_GRBM_SOFT_RESET, tmp);
  760. (void)RREG32(R_008020_GRBM_SOFT_RESET);
  761. udelay(50);
  762. WREG32(R_008020_GRBM_SOFT_RESET, 0);
  763. (void)RREG32(R_008020_GRBM_SOFT_RESET);
  764. /* Reset others GPU block if necessary */
  765. if (G_000E50_RLC_BUSY(RREG32(R_000E50_SRBM_STATUS)))
  766. srbm_reset |= S_000E60_SOFT_RESET_RLC(1);
  767. if (G_000E50_GRBM_RQ_PENDING(RREG32(R_000E50_SRBM_STATUS)))
  768. srbm_reset |= S_000E60_SOFT_RESET_GRBM(1);
  769. if (G_000E50_HI_RQ_PENDING(RREG32(R_000E50_SRBM_STATUS)))
  770. srbm_reset |= S_000E60_SOFT_RESET_IH(1);
  771. if (G_000E50_VMC_BUSY(RREG32(R_000E50_SRBM_STATUS)))
  772. srbm_reset |= S_000E60_SOFT_RESET_VMC(1);
  773. if (G_000E50_MCB_BUSY(RREG32(R_000E50_SRBM_STATUS)))
  774. srbm_reset |= S_000E60_SOFT_RESET_MC(1);
  775. if (G_000E50_MCDZ_BUSY(RREG32(R_000E50_SRBM_STATUS)))
  776. srbm_reset |= S_000E60_SOFT_RESET_MC(1);
  777. if (G_000E50_MCDY_BUSY(RREG32(R_000E50_SRBM_STATUS)))
  778. srbm_reset |= S_000E60_SOFT_RESET_MC(1);
  779. if (G_000E50_MCDX_BUSY(RREG32(R_000E50_SRBM_STATUS)))
  780. srbm_reset |= S_000E60_SOFT_RESET_MC(1);
  781. if (G_000E50_MCDW_BUSY(RREG32(R_000E50_SRBM_STATUS)))
  782. srbm_reset |= S_000E60_SOFT_RESET_MC(1);
  783. if (G_000E50_RLC_BUSY(RREG32(R_000E50_SRBM_STATUS)))
  784. srbm_reset |= S_000E60_SOFT_RESET_RLC(1);
  785. if (G_000E50_SEM_BUSY(RREG32(R_000E50_SRBM_STATUS)))
  786. srbm_reset |= S_000E60_SOFT_RESET_SEM(1);
  787. if (G_000E50_BIF_BUSY(RREG32(R_000E50_SRBM_STATUS)))
  788. srbm_reset |= S_000E60_SOFT_RESET_BIF(1);
  789. dev_info(rdev->dev, " R_000E60_SRBM_SOFT_RESET=0x%08X\n", srbm_reset);
  790. WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset);
  791. (void)RREG32(R_000E60_SRBM_SOFT_RESET);
  792. udelay(50);
  793. WREG32(R_000E60_SRBM_SOFT_RESET, 0);
  794. (void)RREG32(R_000E60_SRBM_SOFT_RESET);
  795. WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset);
  796. (void)RREG32(R_000E60_SRBM_SOFT_RESET);
  797. udelay(50);
  798. WREG32(R_000E60_SRBM_SOFT_RESET, 0);
  799. (void)RREG32(R_000E60_SRBM_SOFT_RESET);
  800. /* Wait a little for things to settle down */
  801. udelay(50);
  802. dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n",
  803. RREG32(R_008010_GRBM_STATUS));
  804. dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n",
  805. RREG32(R_008014_GRBM_STATUS2));
  806. dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n",
  807. RREG32(R_000E50_SRBM_STATUS));
  808. /* After reset we need to reinit the asic as GPU often endup in an
  809. * incoherent state.
  810. */
  811. atom_asic_init(rdev->mode_info.atom_context);
  812. rv515_mc_resume(rdev, &save);
  813. return 0;
  814. }
  815. int r600_gpu_reset(struct radeon_device *rdev)
  816. {
  817. return r600_gpu_soft_reset(rdev);
  818. }
  819. static u32 r600_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
  820. u32 num_backends,
  821. u32 backend_disable_mask)
  822. {
  823. u32 backend_map = 0;
  824. u32 enabled_backends_mask;
  825. u32 enabled_backends_count;
  826. u32 cur_pipe;
  827. u32 swizzle_pipe[R6XX_MAX_PIPES];
  828. u32 cur_backend;
  829. u32 i;
  830. if (num_tile_pipes > R6XX_MAX_PIPES)
  831. num_tile_pipes = R6XX_MAX_PIPES;
  832. if (num_tile_pipes < 1)
  833. num_tile_pipes = 1;
  834. if (num_backends > R6XX_MAX_BACKENDS)
  835. num_backends = R6XX_MAX_BACKENDS;
  836. if (num_backends < 1)
  837. num_backends = 1;
  838. enabled_backends_mask = 0;
  839. enabled_backends_count = 0;
  840. for (i = 0; i < R6XX_MAX_BACKENDS; ++i) {
  841. if (((backend_disable_mask >> i) & 1) == 0) {
  842. enabled_backends_mask |= (1 << i);
  843. ++enabled_backends_count;
  844. }
  845. if (enabled_backends_count == num_backends)
  846. break;
  847. }
  848. if (enabled_backends_count == 0) {
  849. enabled_backends_mask = 1;
  850. enabled_backends_count = 1;
  851. }
  852. if (enabled_backends_count != num_backends)
  853. num_backends = enabled_backends_count;
  854. memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R6XX_MAX_PIPES);
  855. switch (num_tile_pipes) {
  856. case 1:
  857. swizzle_pipe[0] = 0;
  858. break;
  859. case 2:
  860. swizzle_pipe[0] = 0;
  861. swizzle_pipe[1] = 1;
  862. break;
  863. case 3:
  864. swizzle_pipe[0] = 0;
  865. swizzle_pipe[1] = 1;
  866. swizzle_pipe[2] = 2;
  867. break;
  868. case 4:
  869. swizzle_pipe[0] = 0;
  870. swizzle_pipe[1] = 1;
  871. swizzle_pipe[2] = 2;
  872. swizzle_pipe[3] = 3;
  873. break;
  874. case 5:
  875. swizzle_pipe[0] = 0;
  876. swizzle_pipe[1] = 1;
  877. swizzle_pipe[2] = 2;
  878. swizzle_pipe[3] = 3;
  879. swizzle_pipe[4] = 4;
  880. break;
  881. case 6:
  882. swizzle_pipe[0] = 0;
  883. swizzle_pipe[1] = 2;
  884. swizzle_pipe[2] = 4;
  885. swizzle_pipe[3] = 5;
  886. swizzle_pipe[4] = 1;
  887. swizzle_pipe[5] = 3;
  888. break;
  889. case 7:
  890. swizzle_pipe[0] = 0;
  891. swizzle_pipe[1] = 2;
  892. swizzle_pipe[2] = 4;
  893. swizzle_pipe[3] = 6;
  894. swizzle_pipe[4] = 1;
  895. swizzle_pipe[5] = 3;
  896. swizzle_pipe[6] = 5;
  897. break;
  898. case 8:
  899. swizzle_pipe[0] = 0;
  900. swizzle_pipe[1] = 2;
  901. swizzle_pipe[2] = 4;
  902. swizzle_pipe[3] = 6;
  903. swizzle_pipe[4] = 1;
  904. swizzle_pipe[5] = 3;
  905. swizzle_pipe[6] = 5;
  906. swizzle_pipe[7] = 7;
  907. break;
  908. }
  909. cur_backend = 0;
  910. for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
  911. while (((1 << cur_backend) & enabled_backends_mask) == 0)
  912. cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
  913. backend_map |= (u32)(((cur_backend & 3) << (swizzle_pipe[cur_pipe] * 2)));
  914. cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
  915. }
  916. return backend_map;
  917. }
  918. int r600_count_pipe_bits(uint32_t val)
  919. {
  920. int i, ret = 0;
  921. for (i = 0; i < 32; i++) {
  922. ret += val & 1;
  923. val >>= 1;
  924. }
  925. return ret;
  926. }
  927. void r600_gpu_init(struct radeon_device *rdev)
  928. {
  929. u32 tiling_config;
  930. u32 ramcfg;
  931. u32 backend_map;
  932. u32 cc_rb_backend_disable;
  933. u32 cc_gc_shader_pipe_config;
  934. u32 tmp;
  935. int i, j;
  936. u32 sq_config;
  937. u32 sq_gpr_resource_mgmt_1 = 0;
  938. u32 sq_gpr_resource_mgmt_2 = 0;
  939. u32 sq_thread_resource_mgmt = 0;
  940. u32 sq_stack_resource_mgmt_1 = 0;
  941. u32 sq_stack_resource_mgmt_2 = 0;
  942. /* FIXME: implement */
  943. switch (rdev->family) {
  944. case CHIP_R600:
  945. rdev->config.r600.max_pipes = 4;
  946. rdev->config.r600.max_tile_pipes = 8;
  947. rdev->config.r600.max_simds = 4;
  948. rdev->config.r600.max_backends = 4;
  949. rdev->config.r600.max_gprs = 256;
  950. rdev->config.r600.max_threads = 192;
  951. rdev->config.r600.max_stack_entries = 256;
  952. rdev->config.r600.max_hw_contexts = 8;
  953. rdev->config.r600.max_gs_threads = 16;
  954. rdev->config.r600.sx_max_export_size = 128;
  955. rdev->config.r600.sx_max_export_pos_size = 16;
  956. rdev->config.r600.sx_max_export_smx_size = 128;
  957. rdev->config.r600.sq_num_cf_insts = 2;
  958. break;
  959. case CHIP_RV630:
  960. case CHIP_RV635:
  961. rdev->config.r600.max_pipes = 2;
  962. rdev->config.r600.max_tile_pipes = 2;
  963. rdev->config.r600.max_simds = 3;
  964. rdev->config.r600.max_backends = 1;
  965. rdev->config.r600.max_gprs = 128;
  966. rdev->config.r600.max_threads = 192;
  967. rdev->config.r600.max_stack_entries = 128;
  968. rdev->config.r600.max_hw_contexts = 8;
  969. rdev->config.r600.max_gs_threads = 4;
  970. rdev->config.r600.sx_max_export_size = 128;
  971. rdev->config.r600.sx_max_export_pos_size = 16;
  972. rdev->config.r600.sx_max_export_smx_size = 128;
  973. rdev->config.r600.sq_num_cf_insts = 2;
  974. break;
  975. case CHIP_RV610:
  976. case CHIP_RV620:
  977. case CHIP_RS780:
  978. case CHIP_RS880:
  979. rdev->config.r600.max_pipes = 1;
  980. rdev->config.r600.max_tile_pipes = 1;
  981. rdev->config.r600.max_simds = 2;
  982. rdev->config.r600.max_backends = 1;
  983. rdev->config.r600.max_gprs = 128;
  984. rdev->config.r600.max_threads = 192;
  985. rdev->config.r600.max_stack_entries = 128;
  986. rdev->config.r600.max_hw_contexts = 4;
  987. rdev->config.r600.max_gs_threads = 4;
  988. rdev->config.r600.sx_max_export_size = 128;
  989. rdev->config.r600.sx_max_export_pos_size = 16;
  990. rdev->config.r600.sx_max_export_smx_size = 128;
  991. rdev->config.r600.sq_num_cf_insts = 1;
  992. break;
  993. case CHIP_RV670:
  994. rdev->config.r600.max_pipes = 4;
  995. rdev->config.r600.max_tile_pipes = 4;
  996. rdev->config.r600.max_simds = 4;
  997. rdev->config.r600.max_backends = 4;
  998. rdev->config.r600.max_gprs = 192;
  999. rdev->config.r600.max_threads = 192;
  1000. rdev->config.r600.max_stack_entries = 256;
  1001. rdev->config.r600.max_hw_contexts = 8;
  1002. rdev->config.r600.max_gs_threads = 16;
  1003. rdev->config.r600.sx_max_export_size = 128;
  1004. rdev->config.r600.sx_max_export_pos_size = 16;
  1005. rdev->config.r600.sx_max_export_smx_size = 128;
  1006. rdev->config.r600.sq_num_cf_insts = 2;
  1007. break;
  1008. default:
  1009. break;
  1010. }
  1011. /* Initialize HDP */
  1012. for (i = 0, j = 0; i < 32; i++, j += 0x18) {
  1013. WREG32((0x2c14 + j), 0x00000000);
  1014. WREG32((0x2c18 + j), 0x00000000);
  1015. WREG32((0x2c1c + j), 0x00000000);
  1016. WREG32((0x2c20 + j), 0x00000000);
  1017. WREG32((0x2c24 + j), 0x00000000);
  1018. }
  1019. WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
  1020. /* Setup tiling */
  1021. tiling_config = 0;
  1022. ramcfg = RREG32(RAMCFG);
  1023. switch (rdev->config.r600.max_tile_pipes) {
  1024. case 1:
  1025. tiling_config |= PIPE_TILING(0);
  1026. break;
  1027. case 2:
  1028. tiling_config |= PIPE_TILING(1);
  1029. break;
  1030. case 4:
  1031. tiling_config |= PIPE_TILING(2);
  1032. break;
  1033. case 8:
  1034. tiling_config |= PIPE_TILING(3);
  1035. break;
  1036. default:
  1037. break;
  1038. }
  1039. rdev->config.r600.tiling_npipes = rdev->config.r600.max_tile_pipes;
  1040. rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
  1041. tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
  1042. tiling_config |= GROUP_SIZE(0);
  1043. rdev->config.r600.tiling_group_size = 256;
  1044. tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
  1045. if (tmp > 3) {
  1046. tiling_config |= ROW_TILING(3);
  1047. tiling_config |= SAMPLE_SPLIT(3);
  1048. } else {
  1049. tiling_config |= ROW_TILING(tmp);
  1050. tiling_config |= SAMPLE_SPLIT(tmp);
  1051. }
  1052. tiling_config |= BANK_SWAPS(1);
  1053. cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
  1054. cc_rb_backend_disable |=
  1055. BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << rdev->config.r600.max_backends) & R6XX_MAX_BACKENDS_MASK);
  1056. cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
  1057. cc_gc_shader_pipe_config |=
  1058. INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << rdev->config.r600.max_pipes) & R6XX_MAX_PIPES_MASK);
  1059. cc_gc_shader_pipe_config |=
  1060. INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << rdev->config.r600.max_simds) & R6XX_MAX_SIMDS_MASK);
  1061. backend_map = r600_get_tile_pipe_to_backend_map(rdev->config.r600.max_tile_pipes,
  1062. (R6XX_MAX_BACKENDS -
  1063. r600_count_pipe_bits((cc_rb_backend_disable &
  1064. R6XX_MAX_BACKENDS_MASK) >> 16)),
  1065. (cc_rb_backend_disable >> 16));
  1066. tiling_config |= BACKEND_MAP(backend_map);
  1067. WREG32(GB_TILING_CONFIG, tiling_config);
  1068. WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
  1069. WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
  1070. /* Setup pipes */
  1071. WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
  1072. WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
  1073. tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
  1074. WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
  1075. WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK);
  1076. /* Setup some CP states */
  1077. WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | ROQ_IB2_START(0x2b)));
  1078. WREG32(CP_MEQ_THRESHOLDS, (MEQ_END(0x40) | ROQ_END(0x40)));
  1079. WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO | SYNC_GRADIENT |
  1080. SYNC_WALKER | SYNC_ALIGNER));
  1081. /* Setup various GPU states */
  1082. if (rdev->family == CHIP_RV670)
  1083. WREG32(ARB_GDEC_RD_CNTL, 0x00000021);
  1084. tmp = RREG32(SX_DEBUG_1);
  1085. tmp |= SMX_EVENT_RELEASE;
  1086. if ((rdev->family > CHIP_R600))
  1087. tmp |= ENABLE_NEW_SMX_ADDRESS;
  1088. WREG32(SX_DEBUG_1, tmp);
  1089. if (((rdev->family) == CHIP_R600) ||
  1090. ((rdev->family) == CHIP_RV630) ||
  1091. ((rdev->family) == CHIP_RV610) ||
  1092. ((rdev->family) == CHIP_RV620) ||
  1093. ((rdev->family) == CHIP_RS780) ||
  1094. ((rdev->family) == CHIP_RS880)) {
  1095. WREG32(DB_DEBUG, PREZ_MUST_WAIT_FOR_POSTZ_DONE);
  1096. } else {
  1097. WREG32(DB_DEBUG, 0);
  1098. }
  1099. WREG32(DB_WATERMARKS, (DEPTH_FREE(4) | DEPTH_CACHELINE_FREE(16) |
  1100. DEPTH_FLUSH(16) | DEPTH_PENDING_FREE(4)));
  1101. WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
  1102. WREG32(VGT_NUM_INSTANCES, 0);
  1103. WREG32(SPI_CONFIG_CNTL, GPR_WRITE_PRIORITY(0));
  1104. WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(0));
  1105. tmp = RREG32(SQ_MS_FIFO_SIZES);
  1106. if (((rdev->family) == CHIP_RV610) ||
  1107. ((rdev->family) == CHIP_RV620) ||
  1108. ((rdev->family) == CHIP_RS780) ||
  1109. ((rdev->family) == CHIP_RS880)) {
  1110. tmp = (CACHE_FIFO_SIZE(0xa) |
  1111. FETCH_FIFO_HIWATER(0xa) |
  1112. DONE_FIFO_HIWATER(0xe0) |
  1113. ALU_UPDATE_FIFO_HIWATER(0x8));
  1114. } else if (((rdev->family) == CHIP_R600) ||
  1115. ((rdev->family) == CHIP_RV630)) {
  1116. tmp &= ~DONE_FIFO_HIWATER(0xff);
  1117. tmp |= DONE_FIFO_HIWATER(0x4);
  1118. }
  1119. WREG32(SQ_MS_FIFO_SIZES, tmp);
  1120. /* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT
  1121. * should be adjusted as needed by the 2D/3D drivers. This just sets default values
  1122. */
  1123. sq_config = RREG32(SQ_CONFIG);
  1124. sq_config &= ~(PS_PRIO(3) |
  1125. VS_PRIO(3) |
  1126. GS_PRIO(3) |
  1127. ES_PRIO(3));
  1128. sq_config |= (DX9_CONSTS |
  1129. VC_ENABLE |
  1130. PS_PRIO(0) |
  1131. VS_PRIO(1) |
  1132. GS_PRIO(2) |
  1133. ES_PRIO(3));
  1134. if ((rdev->family) == CHIP_R600) {
  1135. sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(124) |
  1136. NUM_VS_GPRS(124) |
  1137. NUM_CLAUSE_TEMP_GPRS(4));
  1138. sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(0) |
  1139. NUM_ES_GPRS(0));
  1140. sq_thread_resource_mgmt = (NUM_PS_THREADS(136) |
  1141. NUM_VS_THREADS(48) |
  1142. NUM_GS_THREADS(4) |
  1143. NUM_ES_THREADS(4));
  1144. sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(128) |
  1145. NUM_VS_STACK_ENTRIES(128));
  1146. sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(0) |
  1147. NUM_ES_STACK_ENTRIES(0));
  1148. } else if (((rdev->family) == CHIP_RV610) ||
  1149. ((rdev->family) == CHIP_RV620) ||
  1150. ((rdev->family) == CHIP_RS780) ||
  1151. ((rdev->family) == CHIP_RS880)) {
  1152. /* no vertex cache */
  1153. sq_config &= ~VC_ENABLE;
  1154. sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
  1155. NUM_VS_GPRS(44) |
  1156. NUM_CLAUSE_TEMP_GPRS(2));
  1157. sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
  1158. NUM_ES_GPRS(17));
  1159. sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
  1160. NUM_VS_THREADS(78) |
  1161. NUM_GS_THREADS(4) |
  1162. NUM_ES_THREADS(31));
  1163. sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
  1164. NUM_VS_STACK_ENTRIES(40));
  1165. sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
  1166. NUM_ES_STACK_ENTRIES(16));
  1167. } else if (((rdev->family) == CHIP_RV630) ||
  1168. ((rdev->family) == CHIP_RV635)) {
  1169. sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
  1170. NUM_VS_GPRS(44) |
  1171. NUM_CLAUSE_TEMP_GPRS(2));
  1172. sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(18) |
  1173. NUM_ES_GPRS(18));
  1174. sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
  1175. NUM_VS_THREADS(78) |
  1176. NUM_GS_THREADS(4) |
  1177. NUM_ES_THREADS(31));
  1178. sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
  1179. NUM_VS_STACK_ENTRIES(40));
  1180. sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
  1181. NUM_ES_STACK_ENTRIES(16));
  1182. } else if ((rdev->family) == CHIP_RV670) {
  1183. sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
  1184. NUM_VS_GPRS(44) |
  1185. NUM_CLAUSE_TEMP_GPRS(2));
  1186. sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
  1187. NUM_ES_GPRS(17));
  1188. sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
  1189. NUM_VS_THREADS(78) |
  1190. NUM_GS_THREADS(4) |
  1191. NUM_ES_THREADS(31));
  1192. sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(64) |
  1193. NUM_VS_STACK_ENTRIES(64));
  1194. sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(64) |
  1195. NUM_ES_STACK_ENTRIES(64));
  1196. }
  1197. WREG32(SQ_CONFIG, sq_config);
  1198. WREG32(SQ_GPR_RESOURCE_MGMT_1, sq_gpr_resource_mgmt_1);
  1199. WREG32(SQ_GPR_RESOURCE_MGMT_2, sq_gpr_resource_mgmt_2);
  1200. WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
  1201. WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
  1202. WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
  1203. if (((rdev->family) == CHIP_RV610) ||
  1204. ((rdev->family) == CHIP_RV620) ||
  1205. ((rdev->family) == CHIP_RS780) ||
  1206. ((rdev->family) == CHIP_RS880)) {
  1207. WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(TC_ONLY));
  1208. } else {
  1209. WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC));
  1210. }
  1211. /* More default values. 2D/3D driver should adjust as needed */
  1212. WREG32(PA_SC_AA_SAMPLE_LOCS_2S, (S0_X(0xc) | S0_Y(0x4) |
  1213. S1_X(0x4) | S1_Y(0xc)));
  1214. WREG32(PA_SC_AA_SAMPLE_LOCS_4S, (S0_X(0xe) | S0_Y(0xe) |
  1215. S1_X(0x2) | S1_Y(0x2) |
  1216. S2_X(0xa) | S2_Y(0x6) |
  1217. S3_X(0x6) | S3_Y(0xa)));
  1218. WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD0, (S0_X(0xe) | S0_Y(0xb) |
  1219. S1_X(0x4) | S1_Y(0xc) |
  1220. S2_X(0x1) | S2_Y(0x6) |
  1221. S3_X(0xa) | S3_Y(0xe)));
  1222. WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD1, (S4_X(0x6) | S4_Y(0x1) |
  1223. S5_X(0x0) | S5_Y(0x0) |
  1224. S6_X(0xb) | S6_Y(0x4) |
  1225. S7_X(0x7) | S7_Y(0x8)));
  1226. WREG32(VGT_STRMOUT_EN, 0);
  1227. tmp = rdev->config.r600.max_pipes * 16;
  1228. switch (rdev->family) {
  1229. case CHIP_RV610:
  1230. case CHIP_RV620:
  1231. case CHIP_RS780:
  1232. case CHIP_RS880:
  1233. tmp += 32;
  1234. break;
  1235. case CHIP_RV670:
  1236. tmp += 128;
  1237. break;
  1238. default:
  1239. break;
  1240. }
  1241. if (tmp > 256) {
  1242. tmp = 256;
  1243. }
  1244. WREG32(VGT_ES_PER_GS, 128);
  1245. WREG32(VGT_GS_PER_ES, tmp);
  1246. WREG32(VGT_GS_PER_VS, 2);
  1247. WREG32(VGT_GS_VERTEX_REUSE, 16);
  1248. /* more default values. 2D/3D driver should adjust as needed */
  1249. WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
  1250. WREG32(VGT_STRMOUT_EN, 0);
  1251. WREG32(SX_MISC, 0);
  1252. WREG32(PA_SC_MODE_CNTL, 0);
  1253. WREG32(PA_SC_AA_CONFIG, 0);
  1254. WREG32(PA_SC_LINE_STIPPLE, 0);
  1255. WREG32(SPI_INPUT_Z, 0);
  1256. WREG32(SPI_PS_IN_CONTROL_0, NUM_INTERP(2));
  1257. WREG32(CB_COLOR7_FRAG, 0);
  1258. /* Clear render buffer base addresses */
  1259. WREG32(CB_COLOR0_BASE, 0);
  1260. WREG32(CB_COLOR1_BASE, 0);
  1261. WREG32(CB_COLOR2_BASE, 0);
  1262. WREG32(CB_COLOR3_BASE, 0);
  1263. WREG32(CB_COLOR4_BASE, 0);
  1264. WREG32(CB_COLOR5_BASE, 0);
  1265. WREG32(CB_COLOR6_BASE, 0);
  1266. WREG32(CB_COLOR7_BASE, 0);
  1267. WREG32(CB_COLOR7_FRAG, 0);
  1268. switch (rdev->family) {
  1269. case CHIP_RV610:
  1270. case CHIP_RV620:
  1271. case CHIP_RS780:
  1272. case CHIP_RS880:
  1273. tmp = TC_L2_SIZE(8);
  1274. break;
  1275. case CHIP_RV630:
  1276. case CHIP_RV635:
  1277. tmp = TC_L2_SIZE(4);
  1278. break;
  1279. case CHIP_R600:
  1280. tmp = TC_L2_SIZE(0) | L2_DISABLE_LATE_HIT;
  1281. break;
  1282. default:
  1283. tmp = TC_L2_SIZE(0);
  1284. break;
  1285. }
  1286. WREG32(TC_CNTL, tmp);
  1287. tmp = RREG32(HDP_HOST_PATH_CNTL);
  1288. WREG32(HDP_HOST_PATH_CNTL, tmp);
  1289. tmp = RREG32(ARB_POP);
  1290. tmp |= ENABLE_TC128;
  1291. WREG32(ARB_POP, tmp);
  1292. WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
  1293. WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
  1294. NUM_CLIP_SEQ(3)));
  1295. WREG32(PA_SC_ENHANCE, FORCE_EOV_MAX_CLK_CNT(4095));
  1296. }
  1297. /*
  1298. * Indirect registers accessor
  1299. */
  1300. u32 r600_pciep_rreg(struct radeon_device *rdev, u32 reg)
  1301. {
  1302. u32 r;
  1303. WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
  1304. (void)RREG32(PCIE_PORT_INDEX);
  1305. r = RREG32(PCIE_PORT_DATA);
  1306. return r;
  1307. }
  1308. void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
  1309. {
  1310. WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
  1311. (void)RREG32(PCIE_PORT_INDEX);
  1312. WREG32(PCIE_PORT_DATA, (v));
  1313. (void)RREG32(PCIE_PORT_DATA);
  1314. }
  1315. /*
  1316. * CP & Ring
  1317. */
  1318. void r600_cp_stop(struct radeon_device *rdev)
  1319. {
  1320. WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
  1321. }
  1322. int r600_init_microcode(struct radeon_device *rdev)
  1323. {
  1324. struct platform_device *pdev;
  1325. const char *chip_name;
  1326. const char *rlc_chip_name;
  1327. size_t pfp_req_size, me_req_size, rlc_req_size;
  1328. char fw_name[30];
  1329. int err;
  1330. DRM_DEBUG("\n");
  1331. pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
  1332. err = IS_ERR(pdev);
  1333. if (err) {
  1334. printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
  1335. return -EINVAL;
  1336. }
  1337. switch (rdev->family) {
  1338. case CHIP_R600:
  1339. chip_name = "R600";
  1340. rlc_chip_name = "R600";
  1341. break;
  1342. case CHIP_RV610:
  1343. chip_name = "RV610";
  1344. rlc_chip_name = "R600";
  1345. break;
  1346. case CHIP_RV630:
  1347. chip_name = "RV630";
  1348. rlc_chip_name = "R600";
  1349. break;
  1350. case CHIP_RV620:
  1351. chip_name = "RV620";
  1352. rlc_chip_name = "R600";
  1353. break;
  1354. case CHIP_RV635:
  1355. chip_name = "RV635";
  1356. rlc_chip_name = "R600";
  1357. break;
  1358. case CHIP_RV670:
  1359. chip_name = "RV670";
  1360. rlc_chip_name = "R600";
  1361. break;
  1362. case CHIP_RS780:
  1363. case CHIP_RS880:
  1364. chip_name = "RS780";
  1365. rlc_chip_name = "R600";
  1366. break;
  1367. case CHIP_RV770:
  1368. chip_name = "RV770";
  1369. rlc_chip_name = "R700";
  1370. break;
  1371. case CHIP_RV730:
  1372. case CHIP_RV740:
  1373. chip_name = "RV730";
  1374. rlc_chip_name = "R700";
  1375. break;
  1376. case CHIP_RV710:
  1377. chip_name = "RV710";
  1378. rlc_chip_name = "R700";
  1379. break;
  1380. default: BUG();
  1381. }
  1382. if (rdev->family >= CHIP_RV770) {
  1383. pfp_req_size = R700_PFP_UCODE_SIZE * 4;
  1384. me_req_size = R700_PM4_UCODE_SIZE * 4;
  1385. rlc_req_size = R700_RLC_UCODE_SIZE * 4;
  1386. } else {
  1387. pfp_req_size = PFP_UCODE_SIZE * 4;
  1388. me_req_size = PM4_UCODE_SIZE * 12;
  1389. rlc_req_size = RLC_UCODE_SIZE * 4;
  1390. }
  1391. DRM_INFO("Loading %s Microcode\n", chip_name);
  1392. snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
  1393. err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev);
  1394. if (err)
  1395. goto out;
  1396. if (rdev->pfp_fw->size != pfp_req_size) {
  1397. printk(KERN_ERR
  1398. "r600_cp: Bogus length %zu in firmware \"%s\"\n",
  1399. rdev->pfp_fw->size, fw_name);
  1400. err = -EINVAL;
  1401. goto out;
  1402. }
  1403. snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
  1404. err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev);
  1405. if (err)
  1406. goto out;
  1407. if (rdev->me_fw->size != me_req_size) {
  1408. printk(KERN_ERR
  1409. "r600_cp: Bogus length %zu in firmware \"%s\"\n",
  1410. rdev->me_fw->size, fw_name);
  1411. err = -EINVAL;
  1412. }
  1413. snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
  1414. err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev);
  1415. if (err)
  1416. goto out;
  1417. if (rdev->rlc_fw->size != rlc_req_size) {
  1418. printk(KERN_ERR
  1419. "r600_rlc: Bogus length %zu in firmware \"%s\"\n",
  1420. rdev->rlc_fw->size, fw_name);
  1421. err = -EINVAL;
  1422. }
  1423. out:
  1424. platform_device_unregister(pdev);
  1425. if (err) {
  1426. if (err != -EINVAL)
  1427. printk(KERN_ERR
  1428. "r600_cp: Failed to load firmware \"%s\"\n",
  1429. fw_name);
  1430. release_firmware(rdev->pfp_fw);
  1431. rdev->pfp_fw = NULL;
  1432. release_firmware(rdev->me_fw);
  1433. rdev->me_fw = NULL;
  1434. release_firmware(rdev->rlc_fw);
  1435. rdev->rlc_fw = NULL;
  1436. }
  1437. return err;
  1438. }
  1439. static int r600_cp_load_microcode(struct radeon_device *rdev)
  1440. {
  1441. const __be32 *fw_data;
  1442. int i;
  1443. if (!rdev->me_fw || !rdev->pfp_fw)
  1444. return -EINVAL;
  1445. r600_cp_stop(rdev);
  1446. WREG32(CP_RB_CNTL, RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
  1447. /* Reset cp */
  1448. WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
  1449. RREG32(GRBM_SOFT_RESET);
  1450. mdelay(15);
  1451. WREG32(GRBM_SOFT_RESET, 0);
  1452. WREG32(CP_ME_RAM_WADDR, 0);
  1453. fw_data = (const __be32 *)rdev->me_fw->data;
  1454. WREG32(CP_ME_RAM_WADDR, 0);
  1455. for (i = 0; i < PM4_UCODE_SIZE * 3; i++)
  1456. WREG32(CP_ME_RAM_DATA,
  1457. be32_to_cpup(fw_data++));
  1458. fw_data = (const __be32 *)rdev->pfp_fw->data;
  1459. WREG32(CP_PFP_UCODE_ADDR, 0);
  1460. for (i = 0; i < PFP_UCODE_SIZE; i++)
  1461. WREG32(CP_PFP_UCODE_DATA,
  1462. be32_to_cpup(fw_data++));
  1463. WREG32(CP_PFP_UCODE_ADDR, 0);
  1464. WREG32(CP_ME_RAM_WADDR, 0);
  1465. WREG32(CP_ME_RAM_RADDR, 0);
  1466. return 0;
  1467. }
  1468. int r600_cp_start(struct radeon_device *rdev)
  1469. {
  1470. int r;
  1471. uint32_t cp_me;
  1472. r = radeon_ring_lock(rdev, 7);
  1473. if (r) {
  1474. DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
  1475. return r;
  1476. }
  1477. radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
  1478. radeon_ring_write(rdev, 0x1);
  1479. if (rdev->family < CHIP_RV770) {
  1480. radeon_ring_write(rdev, 0x3);
  1481. radeon_ring_write(rdev, rdev->config.r600.max_hw_contexts - 1);
  1482. } else {
  1483. radeon_ring_write(rdev, 0x0);
  1484. radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1);
  1485. }
  1486. radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
  1487. radeon_ring_write(rdev, 0);
  1488. radeon_ring_write(rdev, 0);
  1489. radeon_ring_unlock_commit(rdev);
  1490. cp_me = 0xff;
  1491. WREG32(R_0086D8_CP_ME_CNTL, cp_me);
  1492. return 0;
  1493. }
  1494. int r600_cp_resume(struct radeon_device *rdev)
  1495. {
  1496. u32 tmp;
  1497. u32 rb_bufsz;
  1498. int r;
  1499. /* Reset cp */
  1500. WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
  1501. RREG32(GRBM_SOFT_RESET);
  1502. mdelay(15);
  1503. WREG32(GRBM_SOFT_RESET, 0);
  1504. /* Set ring buffer size */
  1505. rb_bufsz = drm_order(rdev->cp.ring_size / 8);
  1506. tmp = RB_NO_UPDATE | (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
  1507. #ifdef __BIG_ENDIAN
  1508. tmp |= BUF_SWAP_32BIT;
  1509. #endif
  1510. WREG32(CP_RB_CNTL, tmp);
  1511. WREG32(CP_SEM_WAIT_TIMER, 0x4);
  1512. /* Set the write pointer delay */
  1513. WREG32(CP_RB_WPTR_DELAY, 0);
  1514. /* Initialize the ring buffer's read and write pointers */
  1515. WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
  1516. WREG32(CP_RB_RPTR_WR, 0);
  1517. WREG32(CP_RB_WPTR, 0);
  1518. WREG32(CP_RB_RPTR_ADDR, rdev->cp.gpu_addr & 0xFFFFFFFF);
  1519. WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->cp.gpu_addr));
  1520. mdelay(1);
  1521. WREG32(CP_RB_CNTL, tmp);
  1522. WREG32(CP_RB_BASE, rdev->cp.gpu_addr >> 8);
  1523. WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
  1524. rdev->cp.rptr = RREG32(CP_RB_RPTR);
  1525. rdev->cp.wptr = RREG32(CP_RB_WPTR);
  1526. r600_cp_start(rdev);
  1527. rdev->cp.ready = true;
  1528. r = radeon_ring_test(rdev);
  1529. if (r) {
  1530. rdev->cp.ready = false;
  1531. return r;
  1532. }
  1533. return 0;
  1534. }
  1535. void r600_cp_commit(struct radeon_device *rdev)
  1536. {
  1537. WREG32(CP_RB_WPTR, rdev->cp.wptr);
  1538. (void)RREG32(CP_RB_WPTR);
  1539. }
  1540. void r600_ring_init(struct radeon_device *rdev, unsigned ring_size)
  1541. {
  1542. u32 rb_bufsz;
  1543. /* Align ring size */
  1544. rb_bufsz = drm_order(ring_size / 8);
  1545. ring_size = (1 << (rb_bufsz + 1)) * 4;
  1546. rdev->cp.ring_size = ring_size;
  1547. rdev->cp.align_mask = 16 - 1;
  1548. }
  1549. void r600_cp_fini(struct radeon_device *rdev)
  1550. {
  1551. r600_cp_stop(rdev);
  1552. radeon_ring_fini(rdev);
  1553. }
  1554. /*
  1555. * GPU scratch registers helpers function.
  1556. */
  1557. void r600_scratch_init(struct radeon_device *rdev)
  1558. {
  1559. int i;
  1560. rdev->scratch.num_reg = 7;
  1561. for (i = 0; i < rdev->scratch.num_reg; i++) {
  1562. rdev->scratch.free[i] = true;
  1563. rdev->scratch.reg[i] = SCRATCH_REG0 + (i * 4);
  1564. }
  1565. }
  1566. int r600_ring_test(struct radeon_device *rdev)
  1567. {
  1568. uint32_t scratch;
  1569. uint32_t tmp = 0;
  1570. unsigned i;
  1571. int r;
  1572. r = radeon_scratch_get(rdev, &scratch);
  1573. if (r) {
  1574. DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
  1575. return r;
  1576. }
  1577. WREG32(scratch, 0xCAFEDEAD);
  1578. r = radeon_ring_lock(rdev, 3);
  1579. if (r) {
  1580. DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
  1581. radeon_scratch_free(rdev, scratch);
  1582. return r;
  1583. }
  1584. radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
  1585. radeon_ring_write(rdev, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
  1586. radeon_ring_write(rdev, 0xDEADBEEF);
  1587. radeon_ring_unlock_commit(rdev);
  1588. for (i = 0; i < rdev->usec_timeout; i++) {
  1589. tmp = RREG32(scratch);
  1590. if (tmp == 0xDEADBEEF)
  1591. break;
  1592. DRM_UDELAY(1);
  1593. }
  1594. if (i < rdev->usec_timeout) {
  1595. DRM_INFO("ring test succeeded in %d usecs\n", i);
  1596. } else {
  1597. DRM_ERROR("radeon: ring test failed (scratch(0x%04X)=0x%08X)\n",
  1598. scratch, tmp);
  1599. r = -EINVAL;
  1600. }
  1601. radeon_scratch_free(rdev, scratch);
  1602. return r;
  1603. }
  1604. void r600_wb_disable(struct radeon_device *rdev)
  1605. {
  1606. int r;
  1607. WREG32(SCRATCH_UMSK, 0);
  1608. if (rdev->wb.wb_obj) {
  1609. r = radeon_bo_reserve(rdev->wb.wb_obj, false);
  1610. if (unlikely(r != 0))
  1611. return;
  1612. radeon_bo_kunmap(rdev->wb.wb_obj);
  1613. radeon_bo_unpin(rdev->wb.wb_obj);
  1614. radeon_bo_unreserve(rdev->wb.wb_obj);
  1615. }
  1616. }
  1617. void r600_wb_fini(struct radeon_device *rdev)
  1618. {
  1619. r600_wb_disable(rdev);
  1620. if (rdev->wb.wb_obj) {
  1621. radeon_bo_unref(&rdev->wb.wb_obj);
  1622. rdev->wb.wb = NULL;
  1623. rdev->wb.wb_obj = NULL;
  1624. }
  1625. }
  1626. int r600_wb_enable(struct radeon_device *rdev)
  1627. {
  1628. int r;
  1629. if (rdev->wb.wb_obj == NULL) {
  1630. r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true,
  1631. RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj);
  1632. if (r) {
  1633. dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
  1634. return r;
  1635. }
  1636. r = radeon_bo_reserve(rdev->wb.wb_obj, false);
  1637. if (unlikely(r != 0)) {
  1638. r600_wb_fini(rdev);
  1639. return r;
  1640. }
  1641. r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
  1642. &rdev->wb.gpu_addr);
  1643. if (r) {
  1644. radeon_bo_unreserve(rdev->wb.wb_obj);
  1645. dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
  1646. r600_wb_fini(rdev);
  1647. return r;
  1648. }
  1649. r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
  1650. radeon_bo_unreserve(rdev->wb.wb_obj);
  1651. if (r) {
  1652. dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
  1653. r600_wb_fini(rdev);
  1654. return r;
  1655. }
  1656. }
  1657. WREG32(SCRATCH_ADDR, (rdev->wb.gpu_addr >> 8) & 0xFFFFFFFF);
  1658. WREG32(CP_RB_RPTR_ADDR, (rdev->wb.gpu_addr + 1024) & 0xFFFFFFFC);
  1659. WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + 1024) & 0xFF);
  1660. WREG32(SCRATCH_UMSK, 0xff);
  1661. return 0;
  1662. }
  1663. void r600_fence_ring_emit(struct radeon_device *rdev,
  1664. struct radeon_fence *fence)
  1665. {
  1666. /* Also consider EVENT_WRITE_EOP. it handles the interrupts + timestamps + events */
  1667. radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0));
  1668. radeon_ring_write(rdev, CACHE_FLUSH_AND_INV_EVENT);
  1669. /* wait for 3D idle clean */
  1670. radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
  1671. radeon_ring_write(rdev, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
  1672. radeon_ring_write(rdev, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
  1673. /* Emit fence sequence & fire IRQ */
  1674. radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
  1675. radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
  1676. radeon_ring_write(rdev, fence->seq);
  1677. /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
  1678. radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0));
  1679. radeon_ring_write(rdev, RB_INT_STAT);
  1680. }
  1681. int r600_copy_blit(struct radeon_device *rdev,
  1682. uint64_t src_offset, uint64_t dst_offset,
  1683. unsigned num_pages, struct radeon_fence *fence)
  1684. {
  1685. int r;
  1686. mutex_lock(&rdev->r600_blit.mutex);
  1687. rdev->r600_blit.vb_ib = NULL;
  1688. r = r600_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE);
  1689. if (r) {
  1690. if (rdev->r600_blit.vb_ib)
  1691. radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
  1692. mutex_unlock(&rdev->r600_blit.mutex);
  1693. return r;
  1694. }
  1695. r600_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE);
  1696. r600_blit_done_copy(rdev, fence);
  1697. mutex_unlock(&rdev->r600_blit.mutex);
  1698. return 0;
  1699. }
  1700. int r600_set_surface_reg(struct radeon_device *rdev, int reg,
  1701. uint32_t tiling_flags, uint32_t pitch,
  1702. uint32_t offset, uint32_t obj_size)
  1703. {
  1704. /* FIXME: implement */
  1705. return 0;
  1706. }
  1707. void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
  1708. {
  1709. /* FIXME: implement */
  1710. }
  1711. bool r600_card_posted(struct radeon_device *rdev)
  1712. {
  1713. uint32_t reg;
  1714. /* first check CRTCs */
  1715. reg = RREG32(D1CRTC_CONTROL) |
  1716. RREG32(D2CRTC_CONTROL);
  1717. if (reg & CRTC_EN)
  1718. return true;
  1719. /* then check MEM_SIZE, in case the crtcs are off */
  1720. if (RREG32(CONFIG_MEMSIZE))
  1721. return true;
  1722. return false;
  1723. }
  1724. int r600_startup(struct radeon_device *rdev)
  1725. {
  1726. int r;
  1727. if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
  1728. r = r600_init_microcode(rdev);
  1729. if (r) {
  1730. DRM_ERROR("Failed to load firmware!\n");
  1731. return r;
  1732. }
  1733. }
  1734. r600_mc_program(rdev);
  1735. if (rdev->flags & RADEON_IS_AGP) {
  1736. r600_agp_enable(rdev);
  1737. } else {
  1738. r = r600_pcie_gart_enable(rdev);
  1739. if (r)
  1740. return r;
  1741. }
  1742. r600_gpu_init(rdev);
  1743. r = r600_blit_init(rdev);
  1744. if (r) {
  1745. r600_blit_fini(rdev);
  1746. rdev->asic->copy = NULL;
  1747. dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
  1748. }
  1749. /* pin copy shader into vram */
  1750. if (rdev->r600_blit.shader_obj) {
  1751. r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
  1752. if (unlikely(r != 0))
  1753. return r;
  1754. r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
  1755. &rdev->r600_blit.shader_gpu_addr);
  1756. radeon_bo_unreserve(rdev->r600_blit.shader_obj);
  1757. if (r) {
  1758. dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
  1759. return r;
  1760. }
  1761. }
  1762. /* Enable IRQ */
  1763. r = r600_irq_init(rdev);
  1764. if (r) {
  1765. DRM_ERROR("radeon: IH init failed (%d).\n", r);
  1766. radeon_irq_kms_fini(rdev);
  1767. return r;
  1768. }
  1769. r600_irq_set(rdev);
  1770. r = radeon_ring_init(rdev, rdev->cp.ring_size);
  1771. if (r)
  1772. return r;
  1773. r = r600_cp_load_microcode(rdev);
  1774. if (r)
  1775. return r;
  1776. r = r600_cp_resume(rdev);
  1777. if (r)
  1778. return r;
  1779. /* write back buffer are not vital so don't worry about failure */
  1780. r600_wb_enable(rdev);
  1781. return 0;
  1782. }
  1783. void r600_vga_set_state(struct radeon_device *rdev, bool state)
  1784. {
  1785. uint32_t temp;
  1786. temp = RREG32(CONFIG_CNTL);
  1787. if (state == false) {
  1788. temp &= ~(1<<0);
  1789. temp |= (1<<1);
  1790. } else {
  1791. temp &= ~(1<<1);
  1792. }
  1793. WREG32(CONFIG_CNTL, temp);
  1794. }
  1795. int r600_resume(struct radeon_device *rdev)
  1796. {
  1797. int r;
  1798. /* Do not reset GPU before posting, on r600 hw unlike on r500 hw,
  1799. * posting will perform necessary task to bring back GPU into good
  1800. * shape.
  1801. */
  1802. /* post card */
  1803. atom_asic_init(rdev->mode_info.atom_context);
  1804. /* Initialize clocks */
  1805. r = radeon_clocks_init(rdev);
  1806. if (r) {
  1807. return r;
  1808. }
  1809. r = r600_startup(rdev);
  1810. if (r) {
  1811. DRM_ERROR("r600 startup failed on resume\n");
  1812. return r;
  1813. }
  1814. r = r600_ib_test(rdev);
  1815. if (r) {
  1816. DRM_ERROR("radeon: failled testing IB (%d).\n", r);
  1817. return r;
  1818. }
  1819. return r;
  1820. }
  1821. int r600_suspend(struct radeon_device *rdev)
  1822. {
  1823. int r;
  1824. /* FIXME: we should wait for ring to be empty */
  1825. r600_cp_stop(rdev);
  1826. rdev->cp.ready = false;
  1827. r600_irq_suspend(rdev);
  1828. r600_wb_disable(rdev);
  1829. r600_pcie_gart_disable(rdev);
  1830. /* unpin shaders bo */
  1831. if (rdev->r600_blit.shader_obj) {
  1832. r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
  1833. if (!r) {
  1834. radeon_bo_unpin(rdev->r600_blit.shader_obj);
  1835. radeon_bo_unreserve(rdev->r600_blit.shader_obj);
  1836. }
  1837. }
  1838. return 0;
  1839. }
  1840. /* Plan is to move initialization in that function and use
  1841. * helper function so that radeon_device_init pretty much
  1842. * do nothing more than calling asic specific function. This
  1843. * should also allow to remove a bunch of callback function
  1844. * like vram_info.
  1845. */
  1846. int r600_init(struct radeon_device *rdev)
  1847. {
  1848. int r;
  1849. r = radeon_dummy_page_init(rdev);
  1850. if (r)
  1851. return r;
  1852. if (r600_debugfs_mc_info_init(rdev)) {
  1853. DRM_ERROR("Failed to register debugfs file for mc !\n");
  1854. }
  1855. /* This don't do much */
  1856. r = radeon_gem_init(rdev);
  1857. if (r)
  1858. return r;
  1859. /* Read BIOS */
  1860. if (!radeon_get_bios(rdev)) {
  1861. if (ASIC_IS_AVIVO(rdev))
  1862. return -EINVAL;
  1863. }
  1864. /* Must be an ATOMBIOS */
  1865. if (!rdev->is_atom_bios) {
  1866. dev_err(rdev->dev, "Expecting atombios for R600 GPU\n");
  1867. return -EINVAL;
  1868. }
  1869. r = radeon_atombios_init(rdev);
  1870. if (r)
  1871. return r;
  1872. /* Post card if necessary */
  1873. if (!r600_card_posted(rdev)) {
  1874. if (!rdev->bios) {
  1875. dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
  1876. return -EINVAL;
  1877. }
  1878. DRM_INFO("GPU not posted. posting now...\n");
  1879. atom_asic_init(rdev->mode_info.atom_context);
  1880. }
  1881. /* Initialize scratch registers */
  1882. r600_scratch_init(rdev);
  1883. /* Initialize surface registers */
  1884. radeon_surface_init(rdev);
  1885. /* Initialize clocks */
  1886. radeon_get_clock_info(rdev->ddev);
  1887. r = radeon_clocks_init(rdev);
  1888. if (r)
  1889. return r;
  1890. /* Initialize power management */
  1891. radeon_pm_init(rdev);
  1892. /* Fence driver */
  1893. r = radeon_fence_driver_init(rdev);
  1894. if (r)
  1895. return r;
  1896. if (rdev->flags & RADEON_IS_AGP) {
  1897. r = radeon_agp_init(rdev);
  1898. if (r)
  1899. radeon_agp_disable(rdev);
  1900. }
  1901. r = r600_mc_init(rdev);
  1902. if (r)
  1903. return r;
  1904. /* Memory manager */
  1905. r = radeon_bo_init(rdev);
  1906. if (r)
  1907. return r;
  1908. r = radeon_irq_kms_init(rdev);
  1909. if (r)
  1910. return r;
  1911. rdev->cp.ring_obj = NULL;
  1912. r600_ring_init(rdev, 1024 * 1024);
  1913. rdev->ih.ring_obj = NULL;
  1914. r600_ih_ring_init(rdev, 64 * 1024);
  1915. r = r600_pcie_gart_init(rdev);
  1916. if (r)
  1917. return r;
  1918. rdev->accel_working = true;
  1919. r = r600_startup(rdev);
  1920. if (r) {
  1921. dev_err(rdev->dev, "disabling GPU acceleration\n");
  1922. r600_cp_fini(rdev);
  1923. r600_wb_fini(rdev);
  1924. r600_irq_fini(rdev);
  1925. radeon_irq_kms_fini(rdev);
  1926. r600_pcie_gart_fini(rdev);
  1927. rdev->accel_working = false;
  1928. }
  1929. if (rdev->accel_working) {
  1930. r = radeon_ib_pool_init(rdev);
  1931. if (r) {
  1932. dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
  1933. rdev->accel_working = false;
  1934. } else {
  1935. r = r600_ib_test(rdev);
  1936. if (r) {
  1937. dev_err(rdev->dev, "IB test failed (%d).\n", r);
  1938. rdev->accel_working = false;
  1939. }
  1940. }
  1941. }
  1942. r = r600_audio_init(rdev);
  1943. if (r)
  1944. return r; /* TODO error handling */
  1945. return 0;
  1946. }
  1947. void r600_fini(struct radeon_device *rdev)
  1948. {
  1949. r600_audio_fini(rdev);
  1950. r600_blit_fini(rdev);
  1951. r600_cp_fini(rdev);
  1952. r600_wb_fini(rdev);
  1953. r600_irq_fini(rdev);
  1954. radeon_irq_kms_fini(rdev);
  1955. r600_pcie_gart_fini(rdev);
  1956. radeon_agp_fini(rdev);
  1957. radeon_gem_fini(rdev);
  1958. radeon_fence_driver_fini(rdev);
  1959. radeon_clocks_fini(rdev);
  1960. radeon_bo_fini(rdev);
  1961. radeon_atombios_fini(rdev);
  1962. kfree(rdev->bios);
  1963. rdev->bios = NULL;
  1964. radeon_dummy_page_fini(rdev);
  1965. }
  1966. /*
  1967. * CS stuff
  1968. */
  1969. void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
  1970. {
  1971. /* FIXME: implement */
  1972. radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
  1973. radeon_ring_write(rdev, ib->gpu_addr & 0xFFFFFFFC);
  1974. radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF);
  1975. radeon_ring_write(rdev, ib->length_dw);
  1976. }
  1977. int r600_ib_test(struct radeon_device *rdev)
  1978. {
  1979. struct radeon_ib *ib;
  1980. uint32_t scratch;
  1981. uint32_t tmp = 0;
  1982. unsigned i;
  1983. int r;
  1984. r = radeon_scratch_get(rdev, &scratch);
  1985. if (r) {
  1986. DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
  1987. return r;
  1988. }
  1989. WREG32(scratch, 0xCAFEDEAD);
  1990. r = radeon_ib_get(rdev, &ib);
  1991. if (r) {
  1992. DRM_ERROR("radeon: failed to get ib (%d).\n", r);
  1993. return r;
  1994. }
  1995. ib->ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
  1996. ib->ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
  1997. ib->ptr[2] = 0xDEADBEEF;
  1998. ib->ptr[3] = PACKET2(0);
  1999. ib->ptr[4] = PACKET2(0);
  2000. ib->ptr[5] = PACKET2(0);
  2001. ib->ptr[6] = PACKET2(0);
  2002. ib->ptr[7] = PACKET2(0);
  2003. ib->ptr[8] = PACKET2(0);
  2004. ib->ptr[9] = PACKET2(0);
  2005. ib->ptr[10] = PACKET2(0);
  2006. ib->ptr[11] = PACKET2(0);
  2007. ib->ptr[12] = PACKET2(0);
  2008. ib->ptr[13] = PACKET2(0);
  2009. ib->ptr[14] = PACKET2(0);
  2010. ib->ptr[15] = PACKET2(0);
  2011. ib->length_dw = 16;
  2012. r = radeon_ib_schedule(rdev, ib);
  2013. if (r) {
  2014. radeon_scratch_free(rdev, scratch);
  2015. radeon_ib_free(rdev, &ib);
  2016. DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
  2017. return r;
  2018. }
  2019. r = radeon_fence_wait(ib->fence, false);
  2020. if (r) {
  2021. DRM_ERROR("radeon: fence wait failed (%d).\n", r);
  2022. return r;
  2023. }
  2024. for (i = 0; i < rdev->usec_timeout; i++) {
  2025. tmp = RREG32(scratch);
  2026. if (tmp == 0xDEADBEEF)
  2027. break;
  2028. DRM_UDELAY(1);
  2029. }
  2030. if (i < rdev->usec_timeout) {
  2031. DRM_INFO("ib test succeeded in %u usecs\n", i);
  2032. } else {
  2033. DRM_ERROR("radeon: ib test failed (sracth(0x%04X)=0x%08X)\n",
  2034. scratch, tmp);
  2035. r = -EINVAL;
  2036. }
  2037. radeon_scratch_free(rdev, scratch);
  2038. radeon_ib_free(rdev, &ib);
  2039. return r;
  2040. }
  2041. /*
  2042. * Interrupts
  2043. *
  2044. * Interrupts use a ring buffer on r6xx/r7xx hardware. It works pretty
  2045. * the same as the CP ring buffer, but in reverse. Rather than the CPU
  2046. * writing to the ring and the GPU consuming, the GPU writes to the ring
  2047. * and host consumes. As the host irq handler processes interrupts, it
  2048. * increments the rptr. When the rptr catches up with the wptr, all the
  2049. * current interrupts have been processed.
  2050. */
  2051. void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size)
  2052. {
  2053. u32 rb_bufsz;
  2054. /* Align ring size */
  2055. rb_bufsz = drm_order(ring_size / 4);
  2056. ring_size = (1 << rb_bufsz) * 4;
  2057. rdev->ih.ring_size = ring_size;
  2058. rdev->ih.ptr_mask = rdev->ih.ring_size - 1;
  2059. rdev->ih.rptr = 0;
  2060. }
  2061. static int r600_ih_ring_alloc(struct radeon_device *rdev)
  2062. {
  2063. int r;
  2064. /* Allocate ring buffer */
  2065. if (rdev->ih.ring_obj == NULL) {
  2066. r = radeon_bo_create(rdev, NULL, rdev->ih.ring_size,
  2067. true,
  2068. RADEON_GEM_DOMAIN_GTT,
  2069. &rdev->ih.ring_obj);
  2070. if (r) {
  2071. DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r);
  2072. return r;
  2073. }
  2074. r = radeon_bo_reserve(rdev->ih.ring_obj, false);
  2075. if (unlikely(r != 0))
  2076. return r;
  2077. r = radeon_bo_pin(rdev->ih.ring_obj,
  2078. RADEON_GEM_DOMAIN_GTT,
  2079. &rdev->ih.gpu_addr);
  2080. if (r) {
  2081. radeon_bo_unreserve(rdev->ih.ring_obj);
  2082. DRM_ERROR("radeon: failed to pin ih ring buffer (%d).\n", r);
  2083. return r;
  2084. }
  2085. r = radeon_bo_kmap(rdev->ih.ring_obj,
  2086. (void **)&rdev->ih.ring);
  2087. radeon_bo_unreserve(rdev->ih.ring_obj);
  2088. if (r) {
  2089. DRM_ERROR("radeon: failed to map ih ring buffer (%d).\n", r);
  2090. return r;
  2091. }
  2092. }
  2093. return 0;
  2094. }
  2095. static void r600_ih_ring_fini(struct radeon_device *rdev)
  2096. {
  2097. int r;
  2098. if (rdev->ih.ring_obj) {
  2099. r = radeon_bo_reserve(rdev->ih.ring_obj, false);
  2100. if (likely(r == 0)) {
  2101. radeon_bo_kunmap(rdev->ih.ring_obj);
  2102. radeon_bo_unpin(rdev->ih.ring_obj);
  2103. radeon_bo_unreserve(rdev->ih.ring_obj);
  2104. }
  2105. radeon_bo_unref(&rdev->ih.ring_obj);
  2106. rdev->ih.ring = NULL;
  2107. rdev->ih.ring_obj = NULL;
  2108. }
  2109. }
  2110. static void r600_rlc_stop(struct radeon_device *rdev)
  2111. {
  2112. if (rdev->family >= CHIP_RV770) {
  2113. /* r7xx asics need to soft reset RLC before halting */
  2114. WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC);
  2115. RREG32(SRBM_SOFT_RESET);
  2116. udelay(15000);
  2117. WREG32(SRBM_SOFT_RESET, 0);
  2118. RREG32(SRBM_SOFT_RESET);
  2119. }
  2120. WREG32(RLC_CNTL, 0);
  2121. }
  2122. static void r600_rlc_start(struct radeon_device *rdev)
  2123. {
  2124. WREG32(RLC_CNTL, RLC_ENABLE);
  2125. }
  2126. static int r600_rlc_init(struct radeon_device *rdev)
  2127. {
  2128. u32 i;
  2129. const __be32 *fw_data;
  2130. if (!rdev->rlc_fw)
  2131. return -EINVAL;
  2132. r600_rlc_stop(rdev);
  2133. WREG32(RLC_HB_BASE, 0);
  2134. WREG32(RLC_HB_CNTL, 0);
  2135. WREG32(RLC_HB_RPTR, 0);
  2136. WREG32(RLC_HB_WPTR, 0);
  2137. WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
  2138. WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
  2139. WREG32(RLC_MC_CNTL, 0);
  2140. WREG32(RLC_UCODE_CNTL, 0);
  2141. fw_data = (const __be32 *)rdev->rlc_fw->data;
  2142. if (rdev->family >= CHIP_RV770) {
  2143. for (i = 0; i < R700_RLC_UCODE_SIZE; i++) {
  2144. WREG32(RLC_UCODE_ADDR, i);
  2145. WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
  2146. }
  2147. } else {
  2148. for (i = 0; i < RLC_UCODE_SIZE; i++) {
  2149. WREG32(RLC_UCODE_ADDR, i);
  2150. WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
  2151. }
  2152. }
  2153. WREG32(RLC_UCODE_ADDR, 0);
  2154. r600_rlc_start(rdev);
  2155. return 0;
  2156. }
  2157. static void r600_enable_interrupts(struct radeon_device *rdev)
  2158. {
  2159. u32 ih_cntl = RREG32(IH_CNTL);
  2160. u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
  2161. ih_cntl |= ENABLE_INTR;
  2162. ih_rb_cntl |= IH_RB_ENABLE;
  2163. WREG32(IH_CNTL, ih_cntl);
  2164. WREG32(IH_RB_CNTL, ih_rb_cntl);
  2165. rdev->ih.enabled = true;
  2166. }
  2167. static void r600_disable_interrupts(struct radeon_device *rdev)
  2168. {
  2169. u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
  2170. u32 ih_cntl = RREG32(IH_CNTL);
  2171. ih_rb_cntl &= ~IH_RB_ENABLE;
  2172. ih_cntl &= ~ENABLE_INTR;
  2173. WREG32(IH_RB_CNTL, ih_rb_cntl);
  2174. WREG32(IH_CNTL, ih_cntl);
  2175. /* set rptr, wptr to 0 */
  2176. WREG32(IH_RB_RPTR, 0);
  2177. WREG32(IH_RB_WPTR, 0);
  2178. rdev->ih.enabled = false;
  2179. rdev->ih.wptr = 0;
  2180. rdev->ih.rptr = 0;
  2181. }
  2182. static void r600_disable_interrupt_state(struct radeon_device *rdev)
  2183. {
  2184. u32 tmp;
  2185. WREG32(CP_INT_CNTL, 0);
  2186. WREG32(GRBM_INT_CNTL, 0);
  2187. WREG32(DxMODE_INT_MASK, 0);
  2188. if (ASIC_IS_DCE3(rdev)) {
  2189. WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL, 0);
  2190. WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL, 0);
  2191. tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
  2192. WREG32(DC_HPD1_INT_CONTROL, tmp);
  2193. tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
  2194. WREG32(DC_HPD2_INT_CONTROL, tmp);
  2195. tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
  2196. WREG32(DC_HPD3_INT_CONTROL, tmp);
  2197. tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
  2198. WREG32(DC_HPD4_INT_CONTROL, tmp);
  2199. if (ASIC_IS_DCE32(rdev)) {
  2200. tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
  2201. WREG32(DC_HPD5_INT_CONTROL, 0);
  2202. tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
  2203. WREG32(DC_HPD6_INT_CONTROL, 0);
  2204. }
  2205. } else {
  2206. WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
  2207. WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
  2208. tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
  2209. WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, 0);
  2210. tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
  2211. WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, 0);
  2212. tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
  2213. WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, 0);
  2214. }
  2215. }
  2216. int r600_irq_init(struct radeon_device *rdev)
  2217. {
  2218. int ret = 0;
  2219. int rb_bufsz;
  2220. u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
  2221. /* allocate ring */
  2222. ret = r600_ih_ring_alloc(rdev);
  2223. if (ret)
  2224. return ret;
  2225. /* disable irqs */
  2226. r600_disable_interrupts(rdev);
  2227. /* init rlc */
  2228. ret = r600_rlc_init(rdev);
  2229. if (ret) {
  2230. r600_ih_ring_fini(rdev);
  2231. return ret;
  2232. }
  2233. /* setup interrupt control */
  2234. /* set dummy read address to ring address */
  2235. WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
  2236. interrupt_cntl = RREG32(INTERRUPT_CNTL);
  2237. /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
  2238. * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
  2239. */
  2240. interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
  2241. /* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
  2242. interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
  2243. WREG32(INTERRUPT_CNTL, interrupt_cntl);
  2244. WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
  2245. rb_bufsz = drm_order(rdev->ih.ring_size / 4);
  2246. ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
  2247. IH_WPTR_OVERFLOW_CLEAR |
  2248. (rb_bufsz << 1));
  2249. /* WPTR writeback, not yet */
  2250. /*ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;*/
  2251. WREG32(IH_RB_WPTR_ADDR_LO, 0);
  2252. WREG32(IH_RB_WPTR_ADDR_HI, 0);
  2253. WREG32(IH_RB_CNTL, ih_rb_cntl);
  2254. /* set rptr, wptr to 0 */
  2255. WREG32(IH_RB_RPTR, 0);
  2256. WREG32(IH_RB_WPTR, 0);
  2257. /* Default settings for IH_CNTL (disabled at first) */
  2258. ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10);
  2259. /* RPTR_REARM only works if msi's are enabled */
  2260. if (rdev->msi_enabled)
  2261. ih_cntl |= RPTR_REARM;
  2262. #ifdef __BIG_ENDIAN
  2263. ih_cntl |= IH_MC_SWAP(IH_MC_SWAP_32BIT);
  2264. #endif
  2265. WREG32(IH_CNTL, ih_cntl);
  2266. /* force the active interrupt state to all disabled */
  2267. r600_disable_interrupt_state(rdev);
  2268. /* enable irqs */
  2269. r600_enable_interrupts(rdev);
  2270. return ret;
  2271. }
  2272. void r600_irq_suspend(struct radeon_device *rdev)
  2273. {
  2274. r600_disable_interrupts(rdev);
  2275. r600_rlc_stop(rdev);
  2276. }
  2277. void r600_irq_fini(struct radeon_device *rdev)
  2278. {
  2279. r600_irq_suspend(rdev);
  2280. r600_ih_ring_fini(rdev);
  2281. }
  2282. int r600_irq_set(struct radeon_device *rdev)
  2283. {
  2284. u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
  2285. u32 mode_int = 0;
  2286. u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
  2287. if (!rdev->irq.installed) {
  2288. WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
  2289. return -EINVAL;
  2290. }
  2291. /* don't enable anything if the ih is disabled */
  2292. if (!rdev->ih.enabled) {
  2293. r600_disable_interrupts(rdev);
  2294. /* force the active interrupt state to all disabled */
  2295. r600_disable_interrupt_state(rdev);
  2296. return 0;
  2297. }
  2298. if (ASIC_IS_DCE3(rdev)) {
  2299. hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
  2300. hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
  2301. hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
  2302. hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
  2303. if (ASIC_IS_DCE32(rdev)) {
  2304. hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
  2305. hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
  2306. }
  2307. } else {
  2308. hpd1 = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & ~DC_HPDx_INT_EN;
  2309. hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN;
  2310. hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN;
  2311. }
  2312. if (rdev->irq.sw_int) {
  2313. DRM_DEBUG("r600_irq_set: sw int\n");
  2314. cp_int_cntl |= RB_INT_ENABLE;
  2315. }
  2316. if (rdev->irq.crtc_vblank_int[0]) {
  2317. DRM_DEBUG("r600_irq_set: vblank 0\n");
  2318. mode_int |= D1MODE_VBLANK_INT_MASK;
  2319. }
  2320. if (rdev->irq.crtc_vblank_int[1]) {
  2321. DRM_DEBUG("r600_irq_set: vblank 1\n");
  2322. mode_int |= D2MODE_VBLANK_INT_MASK;
  2323. }
  2324. if (rdev->irq.hpd[0]) {
  2325. DRM_DEBUG("r600_irq_set: hpd 1\n");
  2326. hpd1 |= DC_HPDx_INT_EN;
  2327. }
  2328. if (rdev->irq.hpd[1]) {
  2329. DRM_DEBUG("r600_irq_set: hpd 2\n");
  2330. hpd2 |= DC_HPDx_INT_EN;
  2331. }
  2332. if (rdev->irq.hpd[2]) {
  2333. DRM_DEBUG("r600_irq_set: hpd 3\n");
  2334. hpd3 |= DC_HPDx_INT_EN;
  2335. }
  2336. if (rdev->irq.hpd[3]) {
  2337. DRM_DEBUG("r600_irq_set: hpd 4\n");
  2338. hpd4 |= DC_HPDx_INT_EN;
  2339. }
  2340. if (rdev->irq.hpd[4]) {
  2341. DRM_DEBUG("r600_irq_set: hpd 5\n");
  2342. hpd5 |= DC_HPDx_INT_EN;
  2343. }
  2344. if (rdev->irq.hpd[5]) {
  2345. DRM_DEBUG("r600_irq_set: hpd 6\n");
  2346. hpd6 |= DC_HPDx_INT_EN;
  2347. }
  2348. WREG32(CP_INT_CNTL, cp_int_cntl);
  2349. WREG32(DxMODE_INT_MASK, mode_int);
  2350. if (ASIC_IS_DCE3(rdev)) {
  2351. WREG32(DC_HPD1_INT_CONTROL, hpd1);
  2352. WREG32(DC_HPD2_INT_CONTROL, hpd2);
  2353. WREG32(DC_HPD3_INT_CONTROL, hpd3);
  2354. WREG32(DC_HPD4_INT_CONTROL, hpd4);
  2355. if (ASIC_IS_DCE32(rdev)) {
  2356. WREG32(DC_HPD5_INT_CONTROL, hpd5);
  2357. WREG32(DC_HPD6_INT_CONTROL, hpd6);
  2358. }
  2359. } else {
  2360. WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
  2361. WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
  2362. WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, hpd3);
  2363. }
  2364. return 0;
  2365. }
  2366. static inline void r600_irq_ack(struct radeon_device *rdev,
  2367. u32 *disp_int,
  2368. u32 *disp_int_cont,
  2369. u32 *disp_int_cont2)
  2370. {
  2371. u32 tmp;
  2372. if (ASIC_IS_DCE3(rdev)) {
  2373. *disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS);
  2374. *disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE);
  2375. *disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2);
  2376. } else {
  2377. *disp_int = RREG32(DISP_INTERRUPT_STATUS);
  2378. *disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
  2379. *disp_int_cont2 = 0;
  2380. }
  2381. if (*disp_int & LB_D1_VBLANK_INTERRUPT)
  2382. WREG32(D1MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
  2383. if (*disp_int & LB_D1_VLINE_INTERRUPT)
  2384. WREG32(D1MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
  2385. if (*disp_int & LB_D2_VBLANK_INTERRUPT)
  2386. WREG32(D2MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
  2387. if (*disp_int & LB_D2_VLINE_INTERRUPT)
  2388. WREG32(D2MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
  2389. if (*disp_int & DC_HPD1_INTERRUPT) {
  2390. if (ASIC_IS_DCE3(rdev)) {
  2391. tmp = RREG32(DC_HPD1_INT_CONTROL);
  2392. tmp |= DC_HPDx_INT_ACK;
  2393. WREG32(DC_HPD1_INT_CONTROL, tmp);
  2394. } else {
  2395. tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
  2396. tmp |= DC_HPDx_INT_ACK;
  2397. WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
  2398. }
  2399. }
  2400. if (*disp_int & DC_HPD2_INTERRUPT) {
  2401. if (ASIC_IS_DCE3(rdev)) {
  2402. tmp = RREG32(DC_HPD2_INT_CONTROL);
  2403. tmp |= DC_HPDx_INT_ACK;
  2404. WREG32(DC_HPD2_INT_CONTROL, tmp);
  2405. } else {
  2406. tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
  2407. tmp |= DC_HPDx_INT_ACK;
  2408. WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
  2409. }
  2410. }
  2411. if (*disp_int_cont & DC_HPD3_INTERRUPT) {
  2412. if (ASIC_IS_DCE3(rdev)) {
  2413. tmp = RREG32(DC_HPD3_INT_CONTROL);
  2414. tmp |= DC_HPDx_INT_ACK;
  2415. WREG32(DC_HPD3_INT_CONTROL, tmp);
  2416. } else {
  2417. tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
  2418. tmp |= DC_HPDx_INT_ACK;
  2419. WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
  2420. }
  2421. }
  2422. if (*disp_int_cont & DC_HPD4_INTERRUPT) {
  2423. tmp = RREG32(DC_HPD4_INT_CONTROL);
  2424. tmp |= DC_HPDx_INT_ACK;
  2425. WREG32(DC_HPD4_INT_CONTROL, tmp);
  2426. }
  2427. if (ASIC_IS_DCE32(rdev)) {
  2428. if (*disp_int_cont2 & DC_HPD5_INTERRUPT) {
  2429. tmp = RREG32(DC_HPD5_INT_CONTROL);
  2430. tmp |= DC_HPDx_INT_ACK;
  2431. WREG32(DC_HPD5_INT_CONTROL, tmp);
  2432. }
  2433. if (*disp_int_cont2 & DC_HPD6_INTERRUPT) {
  2434. tmp = RREG32(DC_HPD5_INT_CONTROL);
  2435. tmp |= DC_HPDx_INT_ACK;
  2436. WREG32(DC_HPD6_INT_CONTROL, tmp);
  2437. }
  2438. }
  2439. }
  2440. void r600_irq_disable(struct radeon_device *rdev)
  2441. {
  2442. u32 disp_int, disp_int_cont, disp_int_cont2;
  2443. r600_disable_interrupts(rdev);
  2444. /* Wait and acknowledge irq */
  2445. mdelay(1);
  2446. r600_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2);
  2447. r600_disable_interrupt_state(rdev);
  2448. }
  2449. static inline u32 r600_get_ih_wptr(struct radeon_device *rdev)
  2450. {
  2451. u32 wptr, tmp;
  2452. /* XXX use writeback */
  2453. wptr = RREG32(IH_RB_WPTR);
  2454. if (wptr & RB_OVERFLOW) {
  2455. /* When a ring buffer overflow happen start parsing interrupt
  2456. * from the last not overwritten vector (wptr + 16). Hopefully
  2457. * this should allow us to catchup.
  2458. */
  2459. dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
  2460. wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
  2461. rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
  2462. tmp = RREG32(IH_RB_CNTL);
  2463. tmp |= IH_WPTR_OVERFLOW_CLEAR;
  2464. WREG32(IH_RB_CNTL, tmp);
  2465. }
  2466. return (wptr & rdev->ih.ptr_mask);
  2467. }
  2468. /* r600 IV Ring
  2469. * Each IV ring entry is 128 bits:
  2470. * [7:0] - interrupt source id
  2471. * [31:8] - reserved
  2472. * [59:32] - interrupt source data
  2473. * [127:60] - reserved
  2474. *
  2475. * The basic interrupt vector entries
  2476. * are decoded as follows:
  2477. * src_id src_data description
  2478. * 1 0 D1 Vblank
  2479. * 1 1 D1 Vline
  2480. * 5 0 D2 Vblank
  2481. * 5 1 D2 Vline
  2482. * 19 0 FP Hot plug detection A
  2483. * 19 1 FP Hot plug detection B
  2484. * 19 2 DAC A auto-detection
  2485. * 19 3 DAC B auto-detection
  2486. * 176 - CP_INT RB
  2487. * 177 - CP_INT IB1
  2488. * 178 - CP_INT IB2
  2489. * 181 - EOP Interrupt
  2490. * 233 - GUI Idle
  2491. *
  2492. * Note, these are based on r600 and may need to be
  2493. * adjusted or added to on newer asics
  2494. */
  2495. int r600_irq_process(struct radeon_device *rdev)
  2496. {
  2497. u32 wptr = r600_get_ih_wptr(rdev);
  2498. u32 rptr = rdev->ih.rptr;
  2499. u32 src_id, src_data;
  2500. u32 ring_index, disp_int, disp_int_cont, disp_int_cont2;
  2501. unsigned long flags;
  2502. bool queue_hotplug = false;
  2503. DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
  2504. if (!rdev->ih.enabled)
  2505. return IRQ_NONE;
  2506. spin_lock_irqsave(&rdev->ih.lock, flags);
  2507. if (rptr == wptr) {
  2508. spin_unlock_irqrestore(&rdev->ih.lock, flags);
  2509. return IRQ_NONE;
  2510. }
  2511. if (rdev->shutdown) {
  2512. spin_unlock_irqrestore(&rdev->ih.lock, flags);
  2513. return IRQ_NONE;
  2514. }
  2515. restart_ih:
  2516. /* display interrupts */
  2517. r600_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2);
  2518. rdev->ih.wptr = wptr;
  2519. while (rptr != wptr) {
  2520. /* wptr/rptr are in bytes! */
  2521. ring_index = rptr / 4;
  2522. src_id = rdev->ih.ring[ring_index] & 0xff;
  2523. src_data = rdev->ih.ring[ring_index + 1] & 0xfffffff;
  2524. switch (src_id) {
  2525. case 1: /* D1 vblank/vline */
  2526. switch (src_data) {
  2527. case 0: /* D1 vblank */
  2528. if (disp_int & LB_D1_VBLANK_INTERRUPT) {
  2529. drm_handle_vblank(rdev->ddev, 0);
  2530. wake_up(&rdev->irq.vblank_queue);
  2531. disp_int &= ~LB_D1_VBLANK_INTERRUPT;
  2532. DRM_DEBUG("IH: D1 vblank\n");
  2533. }
  2534. break;
  2535. case 1: /* D1 vline */
  2536. if (disp_int & LB_D1_VLINE_INTERRUPT) {
  2537. disp_int &= ~LB_D1_VLINE_INTERRUPT;
  2538. DRM_DEBUG("IH: D1 vline\n");
  2539. }
  2540. break;
  2541. default:
  2542. DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
  2543. break;
  2544. }
  2545. break;
  2546. case 5: /* D2 vblank/vline */
  2547. switch (src_data) {
  2548. case 0: /* D2 vblank */
  2549. if (disp_int & LB_D2_VBLANK_INTERRUPT) {
  2550. drm_handle_vblank(rdev->ddev, 1);
  2551. wake_up(&rdev->irq.vblank_queue);
  2552. disp_int &= ~LB_D2_VBLANK_INTERRUPT;
  2553. DRM_DEBUG("IH: D2 vblank\n");
  2554. }
  2555. break;
  2556. case 1: /* D1 vline */
  2557. if (disp_int & LB_D2_VLINE_INTERRUPT) {
  2558. disp_int &= ~LB_D2_VLINE_INTERRUPT;
  2559. DRM_DEBUG("IH: D2 vline\n");
  2560. }
  2561. break;
  2562. default:
  2563. DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
  2564. break;
  2565. }
  2566. break;
  2567. case 19: /* HPD/DAC hotplug */
  2568. switch (src_data) {
  2569. case 0:
  2570. if (disp_int & DC_HPD1_INTERRUPT) {
  2571. disp_int &= ~DC_HPD1_INTERRUPT;
  2572. queue_hotplug = true;
  2573. DRM_DEBUG("IH: HPD1\n");
  2574. }
  2575. break;
  2576. case 1:
  2577. if (disp_int & DC_HPD2_INTERRUPT) {
  2578. disp_int &= ~DC_HPD2_INTERRUPT;
  2579. queue_hotplug = true;
  2580. DRM_DEBUG("IH: HPD2\n");
  2581. }
  2582. break;
  2583. case 4:
  2584. if (disp_int_cont & DC_HPD3_INTERRUPT) {
  2585. disp_int_cont &= ~DC_HPD3_INTERRUPT;
  2586. queue_hotplug = true;
  2587. DRM_DEBUG("IH: HPD3\n");
  2588. }
  2589. break;
  2590. case 5:
  2591. if (disp_int_cont & DC_HPD4_INTERRUPT) {
  2592. disp_int_cont &= ~DC_HPD4_INTERRUPT;
  2593. queue_hotplug = true;
  2594. DRM_DEBUG("IH: HPD4\n");
  2595. }
  2596. break;
  2597. case 10:
  2598. if (disp_int_cont2 & DC_HPD5_INTERRUPT) {
  2599. disp_int_cont &= ~DC_HPD5_INTERRUPT;
  2600. queue_hotplug = true;
  2601. DRM_DEBUG("IH: HPD5\n");
  2602. }
  2603. break;
  2604. case 12:
  2605. if (disp_int_cont2 & DC_HPD6_INTERRUPT) {
  2606. disp_int_cont &= ~DC_HPD6_INTERRUPT;
  2607. queue_hotplug = true;
  2608. DRM_DEBUG("IH: HPD6\n");
  2609. }
  2610. break;
  2611. default:
  2612. DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
  2613. break;
  2614. }
  2615. break;
  2616. case 176: /* CP_INT in ring buffer */
  2617. case 177: /* CP_INT in IB1 */
  2618. case 178: /* CP_INT in IB2 */
  2619. DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
  2620. radeon_fence_process(rdev);
  2621. break;
  2622. case 181: /* CP EOP event */
  2623. DRM_DEBUG("IH: CP EOP\n");
  2624. break;
  2625. default:
  2626. DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
  2627. break;
  2628. }
  2629. /* wptr/rptr are in bytes! */
  2630. rptr += 16;
  2631. rptr &= rdev->ih.ptr_mask;
  2632. }
  2633. /* make sure wptr hasn't changed while processing */
  2634. wptr = r600_get_ih_wptr(rdev);
  2635. if (wptr != rdev->ih.wptr)
  2636. goto restart_ih;
  2637. if (queue_hotplug)
  2638. queue_work(rdev->wq, &rdev->hotplug_work);
  2639. rdev->ih.rptr = rptr;
  2640. WREG32(IH_RB_RPTR, rdev->ih.rptr);
  2641. spin_unlock_irqrestore(&rdev->ih.lock, flags);
  2642. return IRQ_HANDLED;
  2643. }
  2644. /*
  2645. * Debugfs info
  2646. */
  2647. #if defined(CONFIG_DEBUG_FS)
  2648. static int r600_debugfs_cp_ring_info(struct seq_file *m, void *data)
  2649. {
  2650. struct drm_info_node *node = (struct drm_info_node *) m->private;
  2651. struct drm_device *dev = node->minor->dev;
  2652. struct radeon_device *rdev = dev->dev_private;
  2653. unsigned count, i, j;
  2654. radeon_ring_free_size(rdev);
  2655. count = (rdev->cp.ring_size / 4) - rdev->cp.ring_free_dw;
  2656. seq_printf(m, "CP_STAT 0x%08x\n", RREG32(CP_STAT));
  2657. seq_printf(m, "CP_RB_WPTR 0x%08x\n", RREG32(CP_RB_WPTR));
  2658. seq_printf(m, "CP_RB_RPTR 0x%08x\n", RREG32(CP_RB_RPTR));
  2659. seq_printf(m, "driver's copy of the CP_RB_WPTR 0x%08x\n", rdev->cp.wptr);
  2660. seq_printf(m, "driver's copy of the CP_RB_RPTR 0x%08x\n", rdev->cp.rptr);
  2661. seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw);
  2662. seq_printf(m, "%u dwords in ring\n", count);
  2663. i = rdev->cp.rptr;
  2664. for (j = 0; j <= count; j++) {
  2665. seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]);
  2666. i = (i + 1) & rdev->cp.ptr_mask;
  2667. }
  2668. return 0;
  2669. }
  2670. static int r600_debugfs_mc_info(struct seq_file *m, void *data)
  2671. {
  2672. struct drm_info_node *node = (struct drm_info_node *) m->private;
  2673. struct drm_device *dev = node->minor->dev;
  2674. struct radeon_device *rdev = dev->dev_private;
  2675. DREG32_SYS(m, rdev, R_000E50_SRBM_STATUS);
  2676. DREG32_SYS(m, rdev, VM_L2_STATUS);
  2677. return 0;
  2678. }
  2679. static struct drm_info_list r600_mc_info_list[] = {
  2680. {"r600_mc_info", r600_debugfs_mc_info, 0, NULL},
  2681. {"r600_ring_info", r600_debugfs_cp_ring_info, 0, NULL},
  2682. };
  2683. #endif
  2684. int r600_debugfs_mc_info_init(struct radeon_device *rdev)
  2685. {
  2686. #if defined(CONFIG_DEBUG_FS)
  2687. return radeon_debugfs_add_files(rdev, r600_mc_info_list, ARRAY_SIZE(r600_mc_info_list));
  2688. #else
  2689. return 0;
  2690. #endif
  2691. }
  2692. /**
  2693. * r600_ioctl_wait_idle - flush host path cache on wait idle ioctl
  2694. * rdev: radeon device structure
  2695. * bo: buffer object struct which userspace is waiting for idle
  2696. *
  2697. * Some R6XX/R7XX doesn't seems to take into account HDP flush performed
  2698. * through ring buffer, this leads to corruption in rendering, see
  2699. * http://bugzilla.kernel.org/show_bug.cgi?id=15186 to avoid this we
  2700. * directly perform HDP flush by writing register through MMIO.
  2701. */
  2702. void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
  2703. {
  2704. WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
  2705. }