rv770.c 37 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364
  1. /*
  2. * Copyright 2008 Advanced Micro Devices, Inc.
  3. * Copyright 2008 Red Hat Inc.
  4. * Copyright 2009 Jerome Glisse.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the "Software"),
  8. * to deal in the Software without restriction, including without limitation
  9. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10. * and/or sell copies of the Software, and to permit persons to whom the
  11. * Software is furnished to do so, subject to the following conditions:
  12. *
  13. * The above copyright notice and this permission notice shall be included in
  14. * all copies or substantial portions of the Software.
  15. *
  16. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22. * OTHER DEALINGS IN THE SOFTWARE.
  23. *
  24. * Authors: Dave Airlie
  25. * Alex Deucher
  26. * Jerome Glisse
  27. */
  28. #include <linux/firmware.h>
  29. #include <linux/platform_device.h>
  30. #include <linux/slab.h>
  31. #include "drmP.h"
  32. #include "radeon.h"
  33. #include "radeon_asic.h"
  34. #include "radeon_drm.h"
  35. #include "rv770d.h"
  36. #include "atom.h"
  37. #include "avivod.h"
  38. #define R700_PFP_UCODE_SIZE 848
  39. #define R700_PM4_UCODE_SIZE 1360
  40. static void rv770_gpu_init(struct radeon_device *rdev);
  41. void rv770_fini(struct radeon_device *rdev);
  42. u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
  43. {
  44. struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
  45. u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset);
  46. /* Lock the graphics update lock */
  47. tmp |= AVIVO_D1GRPH_UPDATE_LOCK;
  48. WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
  49. /* update the scanout addresses */
  50. if (radeon_crtc->crtc_id) {
  51. WREG32(D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base));
  52. WREG32(D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base));
  53. } else {
  54. WREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base));
  55. WREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base));
  56. }
  57. WREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
  58. (u32)crtc_base);
  59. WREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
  60. (u32)crtc_base);
  61. /* Wait for update_pending to go high. */
  62. while (!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING));
  63. DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
  64. /* Unlock the lock, so double-buffering can take place inside vblank */
  65. tmp &= ~AVIVO_D1GRPH_UPDATE_LOCK;
  66. WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
  67. /* Return current update_pending status: */
  68. return RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING;
  69. }
  70. /* get temperature in millidegrees */
  71. u32 rv770_get_temp(struct radeon_device *rdev)
  72. {
  73. u32 temp = (RREG32(CG_MULT_THERMAL_STATUS) & ASIC_T_MASK) >>
  74. ASIC_T_SHIFT;
  75. u32 actual_temp = 0;
  76. if ((temp >> 9) & 1)
  77. actual_temp = 0;
  78. else
  79. actual_temp = (temp >> 1) & 0xff;
  80. return actual_temp * 1000;
  81. }
  82. void rv770_pm_misc(struct radeon_device *rdev)
  83. {
  84. int req_ps_idx = rdev->pm.requested_power_state_index;
  85. int req_cm_idx = rdev->pm.requested_clock_mode_index;
  86. struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx];
  87. struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
  88. if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
  89. if (voltage->voltage != rdev->pm.current_vddc) {
  90. radeon_atom_set_voltage(rdev, voltage->voltage);
  91. rdev->pm.current_vddc = voltage->voltage;
  92. DRM_DEBUG("Setting: v: %d\n", voltage->voltage);
  93. }
  94. }
  95. }
  96. /*
  97. * GART
  98. */
  99. int rv770_pcie_gart_enable(struct radeon_device *rdev)
  100. {
  101. u32 tmp;
  102. int r, i;
  103. if (rdev->gart.table.vram.robj == NULL) {
  104. dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
  105. return -EINVAL;
  106. }
  107. r = radeon_gart_table_vram_pin(rdev);
  108. if (r)
  109. return r;
  110. radeon_gart_restore(rdev);
  111. /* Setup L2 cache */
  112. WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
  113. ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
  114. EFFECTIVE_L2_QUEUE_SIZE(7));
  115. WREG32(VM_L2_CNTL2, 0);
  116. WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
  117. /* Setup TLB control */
  118. tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
  119. SYSTEM_ACCESS_MODE_NOT_IN_SYS |
  120. SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
  121. EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
  122. WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
  123. WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
  124. WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
  125. WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
  126. WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
  127. WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
  128. WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
  129. WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
  130. WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
  131. WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
  132. WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
  133. RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
  134. WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
  135. (u32)(rdev->dummy_page.addr >> 12));
  136. for (i = 1; i < 7; i++)
  137. WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
  138. r600_pcie_gart_tlb_flush(rdev);
  139. rdev->gart.ready = true;
  140. return 0;
  141. }
  142. void rv770_pcie_gart_disable(struct radeon_device *rdev)
  143. {
  144. u32 tmp;
  145. int i, r;
  146. /* Disable all tables */
  147. for (i = 0; i < 7; i++)
  148. WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
  149. /* Setup L2 cache */
  150. WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
  151. EFFECTIVE_L2_QUEUE_SIZE(7));
  152. WREG32(VM_L2_CNTL2, 0);
  153. WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
  154. /* Setup TLB control */
  155. tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
  156. WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
  157. WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
  158. WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
  159. WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
  160. WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
  161. WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
  162. WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
  163. if (rdev->gart.table.vram.robj) {
  164. r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
  165. if (likely(r == 0)) {
  166. radeon_bo_kunmap(rdev->gart.table.vram.robj);
  167. radeon_bo_unpin(rdev->gart.table.vram.robj);
  168. radeon_bo_unreserve(rdev->gart.table.vram.robj);
  169. }
  170. }
  171. }
  172. void rv770_pcie_gart_fini(struct radeon_device *rdev)
  173. {
  174. radeon_gart_fini(rdev);
  175. rv770_pcie_gart_disable(rdev);
  176. radeon_gart_table_vram_free(rdev);
  177. }
  178. void rv770_agp_enable(struct radeon_device *rdev)
  179. {
  180. u32 tmp;
  181. int i;
  182. /* Setup L2 cache */
  183. WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
  184. ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
  185. EFFECTIVE_L2_QUEUE_SIZE(7));
  186. WREG32(VM_L2_CNTL2, 0);
  187. WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
  188. /* Setup TLB control */
  189. tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
  190. SYSTEM_ACCESS_MODE_NOT_IN_SYS |
  191. SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
  192. EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
  193. WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
  194. WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
  195. WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
  196. WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
  197. WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
  198. WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
  199. WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
  200. for (i = 0; i < 7; i++)
  201. WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
  202. }
  203. static void rv770_mc_program(struct radeon_device *rdev)
  204. {
  205. struct rv515_mc_save save;
  206. u32 tmp;
  207. int i, j;
  208. /* Initialize HDP */
  209. for (i = 0, j = 0; i < 32; i++, j += 0x18) {
  210. WREG32((0x2c14 + j), 0x00000000);
  211. WREG32((0x2c18 + j), 0x00000000);
  212. WREG32((0x2c1c + j), 0x00000000);
  213. WREG32((0x2c20 + j), 0x00000000);
  214. WREG32((0x2c24 + j), 0x00000000);
  215. }
  216. /* r7xx hw bug. Read from HDP_DEBUG1 rather
  217. * than writing to HDP_REG_COHERENCY_FLUSH_CNTL
  218. */
  219. tmp = RREG32(HDP_DEBUG1);
  220. rv515_mc_stop(rdev, &save);
  221. if (r600_mc_wait_for_idle(rdev)) {
  222. dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
  223. }
  224. /* Lockout access through VGA aperture*/
  225. WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
  226. /* Update configuration */
  227. if (rdev->flags & RADEON_IS_AGP) {
  228. if (rdev->mc.vram_start < rdev->mc.gtt_start) {
  229. /* VRAM before AGP */
  230. WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
  231. rdev->mc.vram_start >> 12);
  232. WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
  233. rdev->mc.gtt_end >> 12);
  234. } else {
  235. /* VRAM after AGP */
  236. WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
  237. rdev->mc.gtt_start >> 12);
  238. WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
  239. rdev->mc.vram_end >> 12);
  240. }
  241. } else {
  242. WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
  243. rdev->mc.vram_start >> 12);
  244. WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
  245. rdev->mc.vram_end >> 12);
  246. }
  247. WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
  248. tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
  249. tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
  250. WREG32(MC_VM_FB_LOCATION, tmp);
  251. WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
  252. WREG32(HDP_NONSURFACE_INFO, (2 << 7));
  253. WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
  254. if (rdev->flags & RADEON_IS_AGP) {
  255. WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16);
  256. WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16);
  257. WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
  258. } else {
  259. WREG32(MC_VM_AGP_BASE, 0);
  260. WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
  261. WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
  262. }
  263. if (r600_mc_wait_for_idle(rdev)) {
  264. dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
  265. }
  266. rv515_mc_resume(rdev, &save);
  267. /* we need to own VRAM, so turn off the VGA renderer here
  268. * to stop it overwriting our objects */
  269. rv515_vga_render_disable(rdev);
  270. }
  271. /*
  272. * CP.
  273. */
  274. void r700_cp_stop(struct radeon_device *rdev)
  275. {
  276. rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
  277. WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
  278. WREG32(SCRATCH_UMSK, 0);
  279. }
  280. static int rv770_cp_load_microcode(struct radeon_device *rdev)
  281. {
  282. const __be32 *fw_data;
  283. int i;
  284. if (!rdev->me_fw || !rdev->pfp_fw)
  285. return -EINVAL;
  286. r700_cp_stop(rdev);
  287. WREG32(CP_RB_CNTL, RB_NO_UPDATE | (15 << 8) | (3 << 0));
  288. /* Reset cp */
  289. WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
  290. RREG32(GRBM_SOFT_RESET);
  291. mdelay(15);
  292. WREG32(GRBM_SOFT_RESET, 0);
  293. fw_data = (const __be32 *)rdev->pfp_fw->data;
  294. WREG32(CP_PFP_UCODE_ADDR, 0);
  295. for (i = 0; i < R700_PFP_UCODE_SIZE; i++)
  296. WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
  297. WREG32(CP_PFP_UCODE_ADDR, 0);
  298. fw_data = (const __be32 *)rdev->me_fw->data;
  299. WREG32(CP_ME_RAM_WADDR, 0);
  300. for (i = 0; i < R700_PM4_UCODE_SIZE; i++)
  301. WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
  302. WREG32(CP_PFP_UCODE_ADDR, 0);
  303. WREG32(CP_ME_RAM_WADDR, 0);
  304. WREG32(CP_ME_RAM_RADDR, 0);
  305. return 0;
  306. }
  307. void r700_cp_fini(struct radeon_device *rdev)
  308. {
  309. r700_cp_stop(rdev);
  310. radeon_ring_fini(rdev);
  311. }
  312. /*
  313. * Core functions
  314. */
  315. static u32 r700_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
  316. u32 num_tile_pipes,
  317. u32 num_backends,
  318. u32 backend_disable_mask)
  319. {
  320. u32 backend_map = 0;
  321. u32 enabled_backends_mask;
  322. u32 enabled_backends_count;
  323. u32 cur_pipe;
  324. u32 swizzle_pipe[R7XX_MAX_PIPES];
  325. u32 cur_backend;
  326. u32 i;
  327. bool force_no_swizzle;
  328. if (num_tile_pipes > R7XX_MAX_PIPES)
  329. num_tile_pipes = R7XX_MAX_PIPES;
  330. if (num_tile_pipes < 1)
  331. num_tile_pipes = 1;
  332. if (num_backends > R7XX_MAX_BACKENDS)
  333. num_backends = R7XX_MAX_BACKENDS;
  334. if (num_backends < 1)
  335. num_backends = 1;
  336. enabled_backends_mask = 0;
  337. enabled_backends_count = 0;
  338. for (i = 0; i < R7XX_MAX_BACKENDS; ++i) {
  339. if (((backend_disable_mask >> i) & 1) == 0) {
  340. enabled_backends_mask |= (1 << i);
  341. ++enabled_backends_count;
  342. }
  343. if (enabled_backends_count == num_backends)
  344. break;
  345. }
  346. if (enabled_backends_count == 0) {
  347. enabled_backends_mask = 1;
  348. enabled_backends_count = 1;
  349. }
  350. if (enabled_backends_count != num_backends)
  351. num_backends = enabled_backends_count;
  352. switch (rdev->family) {
  353. case CHIP_RV770:
  354. case CHIP_RV730:
  355. force_no_swizzle = false;
  356. break;
  357. case CHIP_RV710:
  358. case CHIP_RV740:
  359. default:
  360. force_no_swizzle = true;
  361. break;
  362. }
  363. memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R7XX_MAX_PIPES);
  364. switch (num_tile_pipes) {
  365. case 1:
  366. swizzle_pipe[0] = 0;
  367. break;
  368. case 2:
  369. swizzle_pipe[0] = 0;
  370. swizzle_pipe[1] = 1;
  371. break;
  372. case 3:
  373. if (force_no_swizzle) {
  374. swizzle_pipe[0] = 0;
  375. swizzle_pipe[1] = 1;
  376. swizzle_pipe[2] = 2;
  377. } else {
  378. swizzle_pipe[0] = 0;
  379. swizzle_pipe[1] = 2;
  380. swizzle_pipe[2] = 1;
  381. }
  382. break;
  383. case 4:
  384. if (force_no_swizzle) {
  385. swizzle_pipe[0] = 0;
  386. swizzle_pipe[1] = 1;
  387. swizzle_pipe[2] = 2;
  388. swizzle_pipe[3] = 3;
  389. } else {
  390. swizzle_pipe[0] = 0;
  391. swizzle_pipe[1] = 2;
  392. swizzle_pipe[2] = 3;
  393. swizzle_pipe[3] = 1;
  394. }
  395. break;
  396. case 5:
  397. if (force_no_swizzle) {
  398. swizzle_pipe[0] = 0;
  399. swizzle_pipe[1] = 1;
  400. swizzle_pipe[2] = 2;
  401. swizzle_pipe[3] = 3;
  402. swizzle_pipe[4] = 4;
  403. } else {
  404. swizzle_pipe[0] = 0;
  405. swizzle_pipe[1] = 2;
  406. swizzle_pipe[2] = 4;
  407. swizzle_pipe[3] = 1;
  408. swizzle_pipe[4] = 3;
  409. }
  410. break;
  411. case 6:
  412. if (force_no_swizzle) {
  413. swizzle_pipe[0] = 0;
  414. swizzle_pipe[1] = 1;
  415. swizzle_pipe[2] = 2;
  416. swizzle_pipe[3] = 3;
  417. swizzle_pipe[4] = 4;
  418. swizzle_pipe[5] = 5;
  419. } else {
  420. swizzle_pipe[0] = 0;
  421. swizzle_pipe[1] = 2;
  422. swizzle_pipe[2] = 4;
  423. swizzle_pipe[3] = 5;
  424. swizzle_pipe[4] = 3;
  425. swizzle_pipe[5] = 1;
  426. }
  427. break;
  428. case 7:
  429. if (force_no_swizzle) {
  430. swizzle_pipe[0] = 0;
  431. swizzle_pipe[1] = 1;
  432. swizzle_pipe[2] = 2;
  433. swizzle_pipe[3] = 3;
  434. swizzle_pipe[4] = 4;
  435. swizzle_pipe[5] = 5;
  436. swizzle_pipe[6] = 6;
  437. } else {
  438. swizzle_pipe[0] = 0;
  439. swizzle_pipe[1] = 2;
  440. swizzle_pipe[2] = 4;
  441. swizzle_pipe[3] = 6;
  442. swizzle_pipe[4] = 3;
  443. swizzle_pipe[5] = 1;
  444. swizzle_pipe[6] = 5;
  445. }
  446. break;
  447. case 8:
  448. if (force_no_swizzle) {
  449. swizzle_pipe[0] = 0;
  450. swizzle_pipe[1] = 1;
  451. swizzle_pipe[2] = 2;
  452. swizzle_pipe[3] = 3;
  453. swizzle_pipe[4] = 4;
  454. swizzle_pipe[5] = 5;
  455. swizzle_pipe[6] = 6;
  456. swizzle_pipe[7] = 7;
  457. } else {
  458. swizzle_pipe[0] = 0;
  459. swizzle_pipe[1] = 2;
  460. swizzle_pipe[2] = 4;
  461. swizzle_pipe[3] = 6;
  462. swizzle_pipe[4] = 3;
  463. swizzle_pipe[5] = 1;
  464. swizzle_pipe[6] = 7;
  465. swizzle_pipe[7] = 5;
  466. }
  467. break;
  468. }
  469. cur_backend = 0;
  470. for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
  471. while (((1 << cur_backend) & enabled_backends_mask) == 0)
  472. cur_backend = (cur_backend + 1) % R7XX_MAX_BACKENDS;
  473. backend_map |= (u32)(((cur_backend & 3) << (swizzle_pipe[cur_pipe] * 2)));
  474. cur_backend = (cur_backend + 1) % R7XX_MAX_BACKENDS;
  475. }
  476. return backend_map;
  477. }
  478. static void rv770_program_channel_remap(struct radeon_device *rdev)
  479. {
  480. u32 tcp_chan_steer, mc_shared_chremap, tmp;
  481. bool force_no_swizzle;
  482. switch (rdev->family) {
  483. case CHIP_RV770:
  484. case CHIP_RV730:
  485. force_no_swizzle = false;
  486. break;
  487. case CHIP_RV710:
  488. case CHIP_RV740:
  489. default:
  490. force_no_swizzle = true;
  491. break;
  492. }
  493. tmp = RREG32(MC_SHARED_CHMAP);
  494. switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
  495. case 0:
  496. case 1:
  497. default:
  498. /* default mapping */
  499. mc_shared_chremap = 0x00fac688;
  500. break;
  501. case 2:
  502. case 3:
  503. if (force_no_swizzle)
  504. mc_shared_chremap = 0x00fac688;
  505. else
  506. mc_shared_chremap = 0x00bbc298;
  507. break;
  508. }
  509. if (rdev->family == CHIP_RV740)
  510. tcp_chan_steer = 0x00ef2a60;
  511. else
  512. tcp_chan_steer = 0x00fac688;
  513. WREG32(TCP_CHAN_STEER, tcp_chan_steer);
  514. WREG32(MC_SHARED_CHREMAP, mc_shared_chremap);
  515. }
  516. static void rv770_gpu_init(struct radeon_device *rdev)
  517. {
  518. int i, j, num_qd_pipes;
  519. u32 ta_aux_cntl;
  520. u32 sx_debug_1;
  521. u32 smx_dc_ctl0;
  522. u32 db_debug3;
  523. u32 num_gs_verts_per_thread;
  524. u32 vgt_gs_per_es;
  525. u32 gs_prim_buffer_depth = 0;
  526. u32 sq_ms_fifo_sizes;
  527. u32 sq_config;
  528. u32 sq_thread_resource_mgmt;
  529. u32 hdp_host_path_cntl;
  530. u32 sq_dyn_gpr_size_simd_ab_0;
  531. u32 backend_map;
  532. u32 gb_tiling_config = 0;
  533. u32 cc_rb_backend_disable = 0;
  534. u32 cc_gc_shader_pipe_config = 0;
  535. u32 mc_arb_ramcfg;
  536. u32 db_debug4;
  537. /* setup chip specs */
  538. switch (rdev->family) {
  539. case CHIP_RV770:
  540. rdev->config.rv770.max_pipes = 4;
  541. rdev->config.rv770.max_tile_pipes = 8;
  542. rdev->config.rv770.max_simds = 10;
  543. rdev->config.rv770.max_backends = 4;
  544. rdev->config.rv770.max_gprs = 256;
  545. rdev->config.rv770.max_threads = 248;
  546. rdev->config.rv770.max_stack_entries = 512;
  547. rdev->config.rv770.max_hw_contexts = 8;
  548. rdev->config.rv770.max_gs_threads = 16 * 2;
  549. rdev->config.rv770.sx_max_export_size = 128;
  550. rdev->config.rv770.sx_max_export_pos_size = 16;
  551. rdev->config.rv770.sx_max_export_smx_size = 112;
  552. rdev->config.rv770.sq_num_cf_insts = 2;
  553. rdev->config.rv770.sx_num_of_sets = 7;
  554. rdev->config.rv770.sc_prim_fifo_size = 0xF9;
  555. rdev->config.rv770.sc_hiz_tile_fifo_size = 0x30;
  556. rdev->config.rv770.sc_earlyz_tile_fifo_fize = 0x130;
  557. break;
  558. case CHIP_RV730:
  559. rdev->config.rv770.max_pipes = 2;
  560. rdev->config.rv770.max_tile_pipes = 4;
  561. rdev->config.rv770.max_simds = 8;
  562. rdev->config.rv770.max_backends = 2;
  563. rdev->config.rv770.max_gprs = 128;
  564. rdev->config.rv770.max_threads = 248;
  565. rdev->config.rv770.max_stack_entries = 256;
  566. rdev->config.rv770.max_hw_contexts = 8;
  567. rdev->config.rv770.max_gs_threads = 16 * 2;
  568. rdev->config.rv770.sx_max_export_size = 256;
  569. rdev->config.rv770.sx_max_export_pos_size = 32;
  570. rdev->config.rv770.sx_max_export_smx_size = 224;
  571. rdev->config.rv770.sq_num_cf_insts = 2;
  572. rdev->config.rv770.sx_num_of_sets = 7;
  573. rdev->config.rv770.sc_prim_fifo_size = 0xf9;
  574. rdev->config.rv770.sc_hiz_tile_fifo_size = 0x30;
  575. rdev->config.rv770.sc_earlyz_tile_fifo_fize = 0x130;
  576. if (rdev->config.rv770.sx_max_export_pos_size > 16) {
  577. rdev->config.rv770.sx_max_export_pos_size -= 16;
  578. rdev->config.rv770.sx_max_export_smx_size += 16;
  579. }
  580. break;
  581. case CHIP_RV710:
  582. rdev->config.rv770.max_pipes = 2;
  583. rdev->config.rv770.max_tile_pipes = 2;
  584. rdev->config.rv770.max_simds = 2;
  585. rdev->config.rv770.max_backends = 1;
  586. rdev->config.rv770.max_gprs = 256;
  587. rdev->config.rv770.max_threads = 192;
  588. rdev->config.rv770.max_stack_entries = 256;
  589. rdev->config.rv770.max_hw_contexts = 4;
  590. rdev->config.rv770.max_gs_threads = 8 * 2;
  591. rdev->config.rv770.sx_max_export_size = 128;
  592. rdev->config.rv770.sx_max_export_pos_size = 16;
  593. rdev->config.rv770.sx_max_export_smx_size = 112;
  594. rdev->config.rv770.sq_num_cf_insts = 1;
  595. rdev->config.rv770.sx_num_of_sets = 7;
  596. rdev->config.rv770.sc_prim_fifo_size = 0x40;
  597. rdev->config.rv770.sc_hiz_tile_fifo_size = 0x30;
  598. rdev->config.rv770.sc_earlyz_tile_fifo_fize = 0x130;
  599. break;
  600. case CHIP_RV740:
  601. rdev->config.rv770.max_pipes = 4;
  602. rdev->config.rv770.max_tile_pipes = 4;
  603. rdev->config.rv770.max_simds = 8;
  604. rdev->config.rv770.max_backends = 4;
  605. rdev->config.rv770.max_gprs = 256;
  606. rdev->config.rv770.max_threads = 248;
  607. rdev->config.rv770.max_stack_entries = 512;
  608. rdev->config.rv770.max_hw_contexts = 8;
  609. rdev->config.rv770.max_gs_threads = 16 * 2;
  610. rdev->config.rv770.sx_max_export_size = 256;
  611. rdev->config.rv770.sx_max_export_pos_size = 32;
  612. rdev->config.rv770.sx_max_export_smx_size = 224;
  613. rdev->config.rv770.sq_num_cf_insts = 2;
  614. rdev->config.rv770.sx_num_of_sets = 7;
  615. rdev->config.rv770.sc_prim_fifo_size = 0x100;
  616. rdev->config.rv770.sc_hiz_tile_fifo_size = 0x30;
  617. rdev->config.rv770.sc_earlyz_tile_fifo_fize = 0x130;
  618. if (rdev->config.rv770.sx_max_export_pos_size > 16) {
  619. rdev->config.rv770.sx_max_export_pos_size -= 16;
  620. rdev->config.rv770.sx_max_export_smx_size += 16;
  621. }
  622. break;
  623. default:
  624. break;
  625. }
  626. /* Initialize HDP */
  627. j = 0;
  628. for (i = 0; i < 32; i++) {
  629. WREG32((0x2c14 + j), 0x00000000);
  630. WREG32((0x2c18 + j), 0x00000000);
  631. WREG32((0x2c1c + j), 0x00000000);
  632. WREG32((0x2c20 + j), 0x00000000);
  633. WREG32((0x2c24 + j), 0x00000000);
  634. j += 0x18;
  635. }
  636. WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
  637. /* setup tiling, simd, pipe config */
  638. mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
  639. switch (rdev->config.rv770.max_tile_pipes) {
  640. case 1:
  641. default:
  642. gb_tiling_config |= PIPE_TILING(0);
  643. break;
  644. case 2:
  645. gb_tiling_config |= PIPE_TILING(1);
  646. break;
  647. case 4:
  648. gb_tiling_config |= PIPE_TILING(2);
  649. break;
  650. case 8:
  651. gb_tiling_config |= PIPE_TILING(3);
  652. break;
  653. }
  654. rdev->config.rv770.tiling_npipes = rdev->config.rv770.max_tile_pipes;
  655. if (rdev->family == CHIP_RV770)
  656. gb_tiling_config |= BANK_TILING(1);
  657. else
  658. gb_tiling_config |= BANK_TILING((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
  659. rdev->config.rv770.tiling_nbanks = 4 << ((gb_tiling_config >> 4) & 0x3);
  660. gb_tiling_config |= GROUP_SIZE((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
  661. if ((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT)
  662. rdev->config.rv770.tiling_group_size = 512;
  663. else
  664. rdev->config.rv770.tiling_group_size = 256;
  665. if (((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT) > 3) {
  666. gb_tiling_config |= ROW_TILING(3);
  667. gb_tiling_config |= SAMPLE_SPLIT(3);
  668. } else {
  669. gb_tiling_config |=
  670. ROW_TILING(((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT));
  671. gb_tiling_config |=
  672. SAMPLE_SPLIT(((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT));
  673. }
  674. gb_tiling_config |= BANK_SWAPS(1);
  675. cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
  676. cc_rb_backend_disable |=
  677. BACKEND_DISABLE((R7XX_MAX_BACKENDS_MASK << rdev->config.rv770.max_backends) & R7XX_MAX_BACKENDS_MASK);
  678. cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
  679. cc_gc_shader_pipe_config |=
  680. INACTIVE_QD_PIPES((R7XX_MAX_PIPES_MASK << rdev->config.rv770.max_pipes) & R7XX_MAX_PIPES_MASK);
  681. cc_gc_shader_pipe_config |=
  682. INACTIVE_SIMDS((R7XX_MAX_SIMDS_MASK << rdev->config.rv770.max_simds) & R7XX_MAX_SIMDS_MASK);
  683. if (rdev->family == CHIP_RV740)
  684. backend_map = 0x28;
  685. else
  686. backend_map = r700_get_tile_pipe_to_backend_map(rdev,
  687. rdev->config.rv770.max_tile_pipes,
  688. (R7XX_MAX_BACKENDS -
  689. r600_count_pipe_bits((cc_rb_backend_disable &
  690. R7XX_MAX_BACKENDS_MASK) >> 16)),
  691. (cc_rb_backend_disable >> 16));
  692. rdev->config.rv770.tile_config = gb_tiling_config;
  693. gb_tiling_config |= BACKEND_MAP(backend_map);
  694. WREG32(GB_TILING_CONFIG, gb_tiling_config);
  695. WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff));
  696. WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff));
  697. rv770_program_channel_remap(rdev);
  698. WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
  699. WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
  700. WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
  701. WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
  702. WREG32(CGTS_SYS_TCC_DISABLE, 0);
  703. WREG32(CGTS_TCC_DISABLE, 0);
  704. WREG32(CGTS_USER_SYS_TCC_DISABLE, 0);
  705. WREG32(CGTS_USER_TCC_DISABLE, 0);
  706. num_qd_pipes =
  707. R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
  708. WREG32(VGT_OUT_DEALLOC_CNTL, (num_qd_pipes * 4) & DEALLOC_DIST_MASK);
  709. WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((num_qd_pipes * 4) - 2) & VTX_REUSE_DEPTH_MASK);
  710. /* set HW defaults for 3D engine */
  711. WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) |
  712. ROQ_IB2_START(0x2b)));
  713. WREG32(CP_MEQ_THRESHOLDS, STQ_SPLIT(0x30));
  714. ta_aux_cntl = RREG32(TA_CNTL_AUX);
  715. WREG32(TA_CNTL_AUX, ta_aux_cntl | DISABLE_CUBE_ANISO);
  716. sx_debug_1 = RREG32(SX_DEBUG_1);
  717. sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS;
  718. WREG32(SX_DEBUG_1, sx_debug_1);
  719. smx_dc_ctl0 = RREG32(SMX_DC_CTL0);
  720. smx_dc_ctl0 &= ~CACHE_DEPTH(0x1ff);
  721. smx_dc_ctl0 |= CACHE_DEPTH((rdev->config.rv770.sx_num_of_sets * 64) - 1);
  722. WREG32(SMX_DC_CTL0, smx_dc_ctl0);
  723. if (rdev->family != CHIP_RV740)
  724. WREG32(SMX_EVENT_CTL, (ES_FLUSH_CTL(4) |
  725. GS_FLUSH_CTL(4) |
  726. ACK_FLUSH_CTL(3) |
  727. SYNC_FLUSH_CTL));
  728. db_debug3 = RREG32(DB_DEBUG3);
  729. db_debug3 &= ~DB_CLK_OFF_DELAY(0x1f);
  730. switch (rdev->family) {
  731. case CHIP_RV770:
  732. case CHIP_RV740:
  733. db_debug3 |= DB_CLK_OFF_DELAY(0x1f);
  734. break;
  735. case CHIP_RV710:
  736. case CHIP_RV730:
  737. default:
  738. db_debug3 |= DB_CLK_OFF_DELAY(2);
  739. break;
  740. }
  741. WREG32(DB_DEBUG3, db_debug3);
  742. if (rdev->family != CHIP_RV770) {
  743. db_debug4 = RREG32(DB_DEBUG4);
  744. db_debug4 |= DISABLE_TILE_COVERED_FOR_PS_ITER;
  745. WREG32(DB_DEBUG4, db_debug4);
  746. }
  747. WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.rv770.sx_max_export_size / 4) - 1) |
  748. POSITION_BUFFER_SIZE((rdev->config.rv770.sx_max_export_pos_size / 4) - 1) |
  749. SMX_BUFFER_SIZE((rdev->config.rv770.sx_max_export_smx_size / 4) - 1)));
  750. WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.rv770.sc_prim_fifo_size) |
  751. SC_HIZ_TILE_FIFO_SIZE(rdev->config.rv770.sc_hiz_tile_fifo_size) |
  752. SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.rv770.sc_earlyz_tile_fifo_fize)));
  753. WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
  754. WREG32(VGT_NUM_INSTANCES, 1);
  755. WREG32(SPI_CONFIG_CNTL, GPR_WRITE_PRIORITY(0));
  756. WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
  757. WREG32(CP_PERFMON_CNTL, 0);
  758. sq_ms_fifo_sizes = (CACHE_FIFO_SIZE(16 * rdev->config.rv770.sq_num_cf_insts) |
  759. DONE_FIFO_HIWATER(0xe0) |
  760. ALU_UPDATE_FIFO_HIWATER(0x8));
  761. switch (rdev->family) {
  762. case CHIP_RV770:
  763. case CHIP_RV730:
  764. case CHIP_RV710:
  765. sq_ms_fifo_sizes |= FETCH_FIFO_HIWATER(0x1);
  766. break;
  767. case CHIP_RV740:
  768. default:
  769. sq_ms_fifo_sizes |= FETCH_FIFO_HIWATER(0x4);
  770. break;
  771. }
  772. WREG32(SQ_MS_FIFO_SIZES, sq_ms_fifo_sizes);
  773. /* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT
  774. * should be adjusted as needed by the 2D/3D drivers. This just sets default values
  775. */
  776. sq_config = RREG32(SQ_CONFIG);
  777. sq_config &= ~(PS_PRIO(3) |
  778. VS_PRIO(3) |
  779. GS_PRIO(3) |
  780. ES_PRIO(3));
  781. sq_config |= (DX9_CONSTS |
  782. VC_ENABLE |
  783. EXPORT_SRC_C |
  784. PS_PRIO(0) |
  785. VS_PRIO(1) |
  786. GS_PRIO(2) |
  787. ES_PRIO(3));
  788. if (rdev->family == CHIP_RV710)
  789. /* no vertex cache */
  790. sq_config &= ~VC_ENABLE;
  791. WREG32(SQ_CONFIG, sq_config);
  792. WREG32(SQ_GPR_RESOURCE_MGMT_1, (NUM_PS_GPRS((rdev->config.rv770.max_gprs * 24)/64) |
  793. NUM_VS_GPRS((rdev->config.rv770.max_gprs * 24)/64) |
  794. NUM_CLAUSE_TEMP_GPRS(((rdev->config.rv770.max_gprs * 24)/64)/2)));
  795. WREG32(SQ_GPR_RESOURCE_MGMT_2, (NUM_GS_GPRS((rdev->config.rv770.max_gprs * 7)/64) |
  796. NUM_ES_GPRS((rdev->config.rv770.max_gprs * 7)/64)));
  797. sq_thread_resource_mgmt = (NUM_PS_THREADS((rdev->config.rv770.max_threads * 4)/8) |
  798. NUM_VS_THREADS((rdev->config.rv770.max_threads * 2)/8) |
  799. NUM_ES_THREADS((rdev->config.rv770.max_threads * 1)/8));
  800. if (((rdev->config.rv770.max_threads * 1) / 8) > rdev->config.rv770.max_gs_threads)
  801. sq_thread_resource_mgmt |= NUM_GS_THREADS(rdev->config.rv770.max_gs_threads);
  802. else
  803. sq_thread_resource_mgmt |= NUM_GS_THREADS((rdev->config.rv770.max_gs_threads * 1)/8);
  804. WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
  805. WREG32(SQ_STACK_RESOURCE_MGMT_1, (NUM_PS_STACK_ENTRIES((rdev->config.rv770.max_stack_entries * 1)/4) |
  806. NUM_VS_STACK_ENTRIES((rdev->config.rv770.max_stack_entries * 1)/4)));
  807. WREG32(SQ_STACK_RESOURCE_MGMT_2, (NUM_GS_STACK_ENTRIES((rdev->config.rv770.max_stack_entries * 1)/4) |
  808. NUM_ES_STACK_ENTRIES((rdev->config.rv770.max_stack_entries * 1)/4)));
  809. sq_dyn_gpr_size_simd_ab_0 = (SIMDA_RING0((rdev->config.rv770.max_gprs * 38)/64) |
  810. SIMDA_RING1((rdev->config.rv770.max_gprs * 38)/64) |
  811. SIMDB_RING0((rdev->config.rv770.max_gprs * 38)/64) |
  812. SIMDB_RING1((rdev->config.rv770.max_gprs * 38)/64));
  813. WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_0, sq_dyn_gpr_size_simd_ab_0);
  814. WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_1, sq_dyn_gpr_size_simd_ab_0);
  815. WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_2, sq_dyn_gpr_size_simd_ab_0);
  816. WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_3, sq_dyn_gpr_size_simd_ab_0);
  817. WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_4, sq_dyn_gpr_size_simd_ab_0);
  818. WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_5, sq_dyn_gpr_size_simd_ab_0);
  819. WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_6, sq_dyn_gpr_size_simd_ab_0);
  820. WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_7, sq_dyn_gpr_size_simd_ab_0);
  821. WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
  822. FORCE_EOV_MAX_REZ_CNT(255)));
  823. if (rdev->family == CHIP_RV710)
  824. WREG32(VGT_CACHE_INVALIDATION, (CACHE_INVALIDATION(TC_ONLY) |
  825. AUTO_INVLD_EN(ES_AND_GS_AUTO)));
  826. else
  827. WREG32(VGT_CACHE_INVALIDATION, (CACHE_INVALIDATION(VC_AND_TC) |
  828. AUTO_INVLD_EN(ES_AND_GS_AUTO)));
  829. switch (rdev->family) {
  830. case CHIP_RV770:
  831. case CHIP_RV730:
  832. case CHIP_RV740:
  833. gs_prim_buffer_depth = 384;
  834. break;
  835. case CHIP_RV710:
  836. gs_prim_buffer_depth = 128;
  837. break;
  838. default:
  839. break;
  840. }
  841. num_gs_verts_per_thread = rdev->config.rv770.max_pipes * 16;
  842. vgt_gs_per_es = gs_prim_buffer_depth + num_gs_verts_per_thread;
  843. /* Max value for this is 256 */
  844. if (vgt_gs_per_es > 256)
  845. vgt_gs_per_es = 256;
  846. WREG32(VGT_ES_PER_GS, 128);
  847. WREG32(VGT_GS_PER_ES, vgt_gs_per_es);
  848. WREG32(VGT_GS_PER_VS, 2);
  849. /* more default values. 2D/3D driver should adjust as needed */
  850. WREG32(VGT_GS_VERTEX_REUSE, 16);
  851. WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
  852. WREG32(VGT_STRMOUT_EN, 0);
  853. WREG32(SX_MISC, 0);
  854. WREG32(PA_SC_MODE_CNTL, 0);
  855. WREG32(PA_SC_EDGERULE, 0xaaaaaaaa);
  856. WREG32(PA_SC_AA_CONFIG, 0);
  857. WREG32(PA_SC_CLIPRECT_RULE, 0xffff);
  858. WREG32(PA_SC_LINE_STIPPLE, 0);
  859. WREG32(SPI_INPUT_Z, 0);
  860. WREG32(SPI_PS_IN_CONTROL_0, NUM_INTERP(2));
  861. WREG32(CB_COLOR7_FRAG, 0);
  862. /* clear render buffer base addresses */
  863. WREG32(CB_COLOR0_BASE, 0);
  864. WREG32(CB_COLOR1_BASE, 0);
  865. WREG32(CB_COLOR2_BASE, 0);
  866. WREG32(CB_COLOR3_BASE, 0);
  867. WREG32(CB_COLOR4_BASE, 0);
  868. WREG32(CB_COLOR5_BASE, 0);
  869. WREG32(CB_COLOR6_BASE, 0);
  870. WREG32(CB_COLOR7_BASE, 0);
  871. WREG32(TCP_CNTL, 0);
  872. hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
  873. WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
  874. WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
  875. WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
  876. NUM_CLIP_SEQ(3)));
  877. }
  878. static int rv770_vram_scratch_init(struct radeon_device *rdev)
  879. {
  880. int r;
  881. u64 gpu_addr;
  882. if (rdev->vram_scratch.robj == NULL) {
  883. r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE,
  884. PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
  885. &rdev->vram_scratch.robj);
  886. if (r) {
  887. return r;
  888. }
  889. }
  890. r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
  891. if (unlikely(r != 0))
  892. return r;
  893. r = radeon_bo_pin(rdev->vram_scratch.robj,
  894. RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
  895. if (r) {
  896. radeon_bo_unreserve(rdev->vram_scratch.robj);
  897. return r;
  898. }
  899. r = radeon_bo_kmap(rdev->vram_scratch.robj,
  900. (void **)&rdev->vram_scratch.ptr);
  901. if (r)
  902. radeon_bo_unpin(rdev->vram_scratch.robj);
  903. radeon_bo_unreserve(rdev->vram_scratch.robj);
  904. return r;
  905. }
  906. static void rv770_vram_scratch_fini(struct radeon_device *rdev)
  907. {
  908. int r;
  909. if (rdev->vram_scratch.robj == NULL) {
  910. return;
  911. }
  912. r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
  913. if (likely(r == 0)) {
  914. radeon_bo_kunmap(rdev->vram_scratch.robj);
  915. radeon_bo_unpin(rdev->vram_scratch.robj);
  916. radeon_bo_unreserve(rdev->vram_scratch.robj);
  917. }
  918. radeon_bo_unref(&rdev->vram_scratch.robj);
  919. }
  920. void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
  921. {
  922. u64 size_bf, size_af;
  923. if (mc->mc_vram_size > 0xE0000000) {
  924. /* leave room for at least 512M GTT */
  925. dev_warn(rdev->dev, "limiting VRAM\n");
  926. mc->real_vram_size = 0xE0000000;
  927. mc->mc_vram_size = 0xE0000000;
  928. }
  929. if (rdev->flags & RADEON_IS_AGP) {
  930. size_bf = mc->gtt_start;
  931. size_af = 0xFFFFFFFF - mc->gtt_end + 1;
  932. if (size_bf > size_af) {
  933. if (mc->mc_vram_size > size_bf) {
  934. dev_warn(rdev->dev, "limiting VRAM\n");
  935. mc->real_vram_size = size_bf;
  936. mc->mc_vram_size = size_bf;
  937. }
  938. mc->vram_start = mc->gtt_start - mc->mc_vram_size;
  939. } else {
  940. if (mc->mc_vram_size > size_af) {
  941. dev_warn(rdev->dev, "limiting VRAM\n");
  942. mc->real_vram_size = size_af;
  943. mc->mc_vram_size = size_af;
  944. }
  945. mc->vram_start = mc->gtt_end;
  946. }
  947. mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
  948. dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
  949. mc->mc_vram_size >> 20, mc->vram_start,
  950. mc->vram_end, mc->real_vram_size >> 20);
  951. } else {
  952. radeon_vram_location(rdev, &rdev->mc, 0);
  953. rdev->mc.gtt_base_align = 0;
  954. radeon_gtt_location(rdev, mc);
  955. }
  956. }
  957. int rv770_mc_init(struct radeon_device *rdev)
  958. {
  959. u32 tmp;
  960. int chansize, numchan;
  961. /* Get VRAM informations */
  962. rdev->mc.vram_is_ddr = true;
  963. tmp = RREG32(MC_ARB_RAMCFG);
  964. if (tmp & CHANSIZE_OVERRIDE) {
  965. chansize = 16;
  966. } else if (tmp & CHANSIZE_MASK) {
  967. chansize = 64;
  968. } else {
  969. chansize = 32;
  970. }
  971. tmp = RREG32(MC_SHARED_CHMAP);
  972. switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
  973. case 0:
  974. default:
  975. numchan = 1;
  976. break;
  977. case 1:
  978. numchan = 2;
  979. break;
  980. case 2:
  981. numchan = 4;
  982. break;
  983. case 3:
  984. numchan = 8;
  985. break;
  986. }
  987. rdev->mc.vram_width = numchan * chansize;
  988. /* Could aper size report 0 ? */
  989. rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
  990. rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
  991. /* Setup GPU memory space */
  992. rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
  993. rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
  994. rdev->mc.visible_vram_size = rdev->mc.aper_size;
  995. rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
  996. r700_vram_gtt_location(rdev, &rdev->mc);
  997. radeon_update_bandwidth_info(rdev);
  998. return 0;
  999. }
  1000. static int rv770_startup(struct radeon_device *rdev)
  1001. {
  1002. int r;
  1003. if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
  1004. r = r600_init_microcode(rdev);
  1005. if (r) {
  1006. DRM_ERROR("Failed to load firmware!\n");
  1007. return r;
  1008. }
  1009. }
  1010. rv770_mc_program(rdev);
  1011. if (rdev->flags & RADEON_IS_AGP) {
  1012. rv770_agp_enable(rdev);
  1013. } else {
  1014. r = rv770_pcie_gart_enable(rdev);
  1015. if (r)
  1016. return r;
  1017. }
  1018. r = rv770_vram_scratch_init(rdev);
  1019. if (r)
  1020. return r;
  1021. rv770_gpu_init(rdev);
  1022. r = r600_blit_init(rdev);
  1023. if (r) {
  1024. r600_blit_fini(rdev);
  1025. rdev->asic->copy = NULL;
  1026. dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
  1027. }
  1028. /* allocate wb buffer */
  1029. r = radeon_wb_init(rdev);
  1030. if (r)
  1031. return r;
  1032. /* Enable IRQ */
  1033. r = r600_irq_init(rdev);
  1034. if (r) {
  1035. DRM_ERROR("radeon: IH init failed (%d).\n", r);
  1036. radeon_irq_kms_fini(rdev);
  1037. return r;
  1038. }
  1039. r600_irq_set(rdev);
  1040. r = radeon_ring_init(rdev, rdev->cp.ring_size);
  1041. if (r)
  1042. return r;
  1043. r = rv770_cp_load_microcode(rdev);
  1044. if (r)
  1045. return r;
  1046. r = r600_cp_resume(rdev);
  1047. if (r)
  1048. return r;
  1049. return 0;
  1050. }
  1051. int rv770_resume(struct radeon_device *rdev)
  1052. {
  1053. int r;
  1054. /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
  1055. * posting will perform necessary task to bring back GPU into good
  1056. * shape.
  1057. */
  1058. /* post card */
  1059. atom_asic_init(rdev->mode_info.atom_context);
  1060. r = rv770_startup(rdev);
  1061. if (r) {
  1062. DRM_ERROR("r600 startup failed on resume\n");
  1063. return r;
  1064. }
  1065. r = r600_ib_test(rdev);
  1066. if (r) {
  1067. DRM_ERROR("radeon: failled testing IB (%d).\n", r);
  1068. return r;
  1069. }
  1070. r = r600_audio_init(rdev);
  1071. if (r) {
  1072. dev_err(rdev->dev, "radeon: audio init failed\n");
  1073. return r;
  1074. }
  1075. return r;
  1076. }
  1077. int rv770_suspend(struct radeon_device *rdev)
  1078. {
  1079. int r;
  1080. r600_audio_fini(rdev);
  1081. /* FIXME: we should wait for ring to be empty */
  1082. r700_cp_stop(rdev);
  1083. rdev->cp.ready = false;
  1084. r600_irq_suspend(rdev);
  1085. radeon_wb_disable(rdev);
  1086. rv770_pcie_gart_disable(rdev);
  1087. /* unpin shaders bo */
  1088. if (rdev->r600_blit.shader_obj) {
  1089. r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
  1090. if (likely(r == 0)) {
  1091. radeon_bo_unpin(rdev->r600_blit.shader_obj);
  1092. radeon_bo_unreserve(rdev->r600_blit.shader_obj);
  1093. }
  1094. }
  1095. return 0;
  1096. }
  1097. /* Plan is to move initialization in that function and use
  1098. * helper function so that radeon_device_init pretty much
  1099. * do nothing more than calling asic specific function. This
  1100. * should also allow to remove a bunch of callback function
  1101. * like vram_info.
  1102. */
  1103. int rv770_init(struct radeon_device *rdev)
  1104. {
  1105. int r;
  1106. r = radeon_dummy_page_init(rdev);
  1107. if (r)
  1108. return r;
  1109. /* This don't do much */
  1110. r = radeon_gem_init(rdev);
  1111. if (r)
  1112. return r;
  1113. /* Read BIOS */
  1114. if (!radeon_get_bios(rdev)) {
  1115. if (ASIC_IS_AVIVO(rdev))
  1116. return -EINVAL;
  1117. }
  1118. /* Must be an ATOMBIOS */
  1119. if (!rdev->is_atom_bios) {
  1120. dev_err(rdev->dev, "Expecting atombios for R600 GPU\n");
  1121. return -EINVAL;
  1122. }
  1123. r = radeon_atombios_init(rdev);
  1124. if (r)
  1125. return r;
  1126. /* Post card if necessary */
  1127. if (!r600_card_posted(rdev)) {
  1128. if (!rdev->bios) {
  1129. dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
  1130. return -EINVAL;
  1131. }
  1132. DRM_INFO("GPU not posted. posting now...\n");
  1133. atom_asic_init(rdev->mode_info.atom_context);
  1134. }
  1135. /* Initialize scratch registers */
  1136. r600_scratch_init(rdev);
  1137. /* Initialize surface registers */
  1138. radeon_surface_init(rdev);
  1139. /* Initialize clocks */
  1140. radeon_get_clock_info(rdev->ddev);
  1141. /* Fence driver */
  1142. r = radeon_fence_driver_init(rdev);
  1143. if (r)
  1144. return r;
  1145. /* initialize AGP */
  1146. if (rdev->flags & RADEON_IS_AGP) {
  1147. r = radeon_agp_init(rdev);
  1148. if (r)
  1149. radeon_agp_disable(rdev);
  1150. }
  1151. r = rv770_mc_init(rdev);
  1152. if (r)
  1153. return r;
  1154. /* Memory manager */
  1155. r = radeon_bo_init(rdev);
  1156. if (r)
  1157. return r;
  1158. r = radeon_irq_kms_init(rdev);
  1159. if (r)
  1160. return r;
  1161. rdev->cp.ring_obj = NULL;
  1162. r600_ring_init(rdev, 1024 * 1024);
  1163. rdev->ih.ring_obj = NULL;
  1164. r600_ih_ring_init(rdev, 64 * 1024);
  1165. r = r600_pcie_gart_init(rdev);
  1166. if (r)
  1167. return r;
  1168. rdev->accel_working = true;
  1169. r = rv770_startup(rdev);
  1170. if (r) {
  1171. dev_err(rdev->dev, "disabling GPU acceleration\n");
  1172. r700_cp_fini(rdev);
  1173. r600_irq_fini(rdev);
  1174. radeon_wb_fini(rdev);
  1175. radeon_irq_kms_fini(rdev);
  1176. rv770_pcie_gart_fini(rdev);
  1177. rdev->accel_working = false;
  1178. }
  1179. if (rdev->accel_working) {
  1180. r = radeon_ib_pool_init(rdev);
  1181. if (r) {
  1182. dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
  1183. rdev->accel_working = false;
  1184. } else {
  1185. r = r600_ib_test(rdev);
  1186. if (r) {
  1187. dev_err(rdev->dev, "IB test failed (%d).\n", r);
  1188. rdev->accel_working = false;
  1189. }
  1190. }
  1191. }
  1192. r = r600_audio_init(rdev);
  1193. if (r) {
  1194. dev_err(rdev->dev, "radeon: audio init failed\n");
  1195. return r;
  1196. }
  1197. return 0;
  1198. }
  1199. void rv770_fini(struct radeon_device *rdev)
  1200. {
  1201. r600_blit_fini(rdev);
  1202. r700_cp_fini(rdev);
  1203. r600_irq_fini(rdev);
  1204. radeon_wb_fini(rdev);
  1205. radeon_irq_kms_fini(rdev);
  1206. rv770_pcie_gart_fini(rdev);
  1207. rv770_vram_scratch_fini(rdev);
  1208. radeon_gem_fini(rdev);
  1209. radeon_fence_driver_fini(rdev);
  1210. radeon_agp_fini(rdev);
  1211. radeon_bo_fini(rdev);
  1212. radeon_atombios_fini(rdev);
  1213. kfree(rdev->bios);
  1214. rdev->bios = NULL;
  1215. radeon_dummy_page_fini(rdev);
  1216. }