r520.c 8.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336
  1. /*
  2. * Copyright 2008 Advanced Micro Devices, Inc.
  3. * Copyright 2008 Red Hat Inc.
  4. * Copyright 2009 Jerome Glisse.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the "Software"),
  8. * to deal in the Software without restriction, including without limitation
  9. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10. * and/or sell copies of the Software, and to permit persons to whom the
  11. * Software is furnished to do so, subject to the following conditions:
  12. *
  13. * The above copyright notice and this permission notice shall be included in
  14. * all copies or substantial portions of the Software.
  15. *
  16. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22. * OTHER DEALINGS IN THE SOFTWARE.
  23. *
  24. * Authors: Dave Airlie
  25. * Alex Deucher
  26. * Jerome Glisse
  27. */
  28. #include "drmP.h"
  29. #include "radeon.h"
  30. #include "radeon_asic.h"
  31. #include "atom.h"
  32. #include "r520d.h"
  33. /* This files gather functions specifics to: r520,rv530,rv560,rv570,r580 */
  34. int r520_mc_wait_for_idle(struct radeon_device *rdev)
  35. {
  36. unsigned i;
  37. uint32_t tmp;
  38. for (i = 0; i < rdev->usec_timeout; i++) {
  39. /* read MC_STATUS */
  40. tmp = RREG32_MC(R520_MC_STATUS);
  41. if (tmp & R520_MC_STATUS_IDLE) {
  42. return 0;
  43. }
  44. DRM_UDELAY(1);
  45. }
  46. return -1;
  47. }
  48. static void r520_gpu_init(struct radeon_device *rdev)
  49. {
  50. unsigned pipe_select_current, gb_pipe_select, tmp;
  51. rv515_vga_render_disable(rdev);
  52. /*
  53. * DST_PIPE_CONFIG 0x170C
  54. * GB_TILE_CONFIG 0x4018
  55. * GB_FIFO_SIZE 0x4024
  56. * GB_PIPE_SELECT 0x402C
  57. * GB_PIPE_SELECT2 0x4124
  58. * Z_PIPE_SHIFT 0
  59. * Z_PIPE_MASK 0x000000003
  60. * GB_FIFO_SIZE2 0x4128
  61. * SC_SFIFO_SIZE_SHIFT 0
  62. * SC_SFIFO_SIZE_MASK 0x000000003
  63. * SC_MFIFO_SIZE_SHIFT 2
  64. * SC_MFIFO_SIZE_MASK 0x00000000C
  65. * FG_SFIFO_SIZE_SHIFT 4
  66. * FG_SFIFO_SIZE_MASK 0x000000030
  67. * ZB_MFIFO_SIZE_SHIFT 6
  68. * ZB_MFIFO_SIZE_MASK 0x0000000C0
  69. * GA_ENHANCE 0x4274
  70. * SU_REG_DEST 0x42C8
  71. */
  72. /* workaround for RV530 */
  73. if (rdev->family == CHIP_RV530) {
  74. WREG32(0x4128, 0xFF);
  75. }
  76. r420_pipes_init(rdev);
  77. gb_pipe_select = RREG32(R400_GB_PIPE_SELECT);
  78. tmp = RREG32(R300_DST_PIPE_CONFIG);
  79. pipe_select_current = (tmp >> 2) & 3;
  80. tmp = (1 << pipe_select_current) |
  81. (((gb_pipe_select >> 8) & 0xF) << 4);
  82. WREG32_PLL(0x000D, tmp);
  83. if (r520_mc_wait_for_idle(rdev)) {
  84. printk(KERN_WARNING "Failed to wait MC idle while "
  85. "programming pipes. Bad things might happen.\n");
  86. }
  87. }
  88. static void r520_vram_get_type(struct radeon_device *rdev)
  89. {
  90. uint32_t tmp;
  91. rdev->mc.vram_width = 128;
  92. rdev->mc.vram_is_ddr = true;
  93. tmp = RREG32_MC(R520_MC_CNTL0);
  94. switch ((tmp & R520_MEM_NUM_CHANNELS_MASK) >> R520_MEM_NUM_CHANNELS_SHIFT) {
  95. case 0:
  96. rdev->mc.vram_width = 32;
  97. break;
  98. case 1:
  99. rdev->mc.vram_width = 64;
  100. break;
  101. case 2:
  102. rdev->mc.vram_width = 128;
  103. break;
  104. case 3:
  105. rdev->mc.vram_width = 256;
  106. break;
  107. default:
  108. rdev->mc.vram_width = 128;
  109. break;
  110. }
  111. if (tmp & R520_MC_CHANNEL_SIZE)
  112. rdev->mc.vram_width *= 2;
  113. }
  114. void r520_mc_init(struct radeon_device *rdev)
  115. {
  116. r520_vram_get_type(rdev);
  117. r100_vram_init_sizes(rdev);
  118. radeon_vram_location(rdev, &rdev->mc, 0);
  119. rdev->mc.gtt_base_align = 0;
  120. if (!(rdev->flags & RADEON_IS_AGP))
  121. radeon_gtt_location(rdev, &rdev->mc);
  122. radeon_update_bandwidth_info(rdev);
  123. }
  124. void r520_mc_program(struct radeon_device *rdev)
  125. {
  126. struct rv515_mc_save save;
  127. /* Stops all mc clients */
  128. rv515_mc_stop(rdev, &save);
  129. /* Wait for mc idle */
  130. if (r520_mc_wait_for_idle(rdev))
  131. dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
  132. /* Write VRAM size in case we are limiting it */
  133. WREG32(R_0000F8_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
  134. /* Program MC, should be a 32bits limited address space */
  135. WREG32_MC(R_000004_MC_FB_LOCATION,
  136. S_000004_MC_FB_START(rdev->mc.vram_start >> 16) |
  137. S_000004_MC_FB_TOP(rdev->mc.vram_end >> 16));
  138. WREG32(R_000134_HDP_FB_LOCATION,
  139. S_000134_HDP_FB_START(rdev->mc.vram_start >> 16));
  140. if (rdev->flags & RADEON_IS_AGP) {
  141. WREG32_MC(R_000005_MC_AGP_LOCATION,
  142. S_000005_MC_AGP_START(rdev->mc.gtt_start >> 16) |
  143. S_000005_MC_AGP_TOP(rdev->mc.gtt_end >> 16));
  144. WREG32_MC(R_000006_AGP_BASE, lower_32_bits(rdev->mc.agp_base));
  145. WREG32_MC(R_000007_AGP_BASE_2,
  146. S_000007_AGP_BASE_ADDR_2(upper_32_bits(rdev->mc.agp_base)));
  147. } else {
  148. WREG32_MC(R_000005_MC_AGP_LOCATION, 0xFFFFFFFF);
  149. WREG32_MC(R_000006_AGP_BASE, 0);
  150. WREG32_MC(R_000007_AGP_BASE_2, 0);
  151. }
  152. rv515_mc_resume(rdev, &save);
  153. }
  154. static int r520_startup(struct radeon_device *rdev)
  155. {
  156. int r;
  157. r520_mc_program(rdev);
  158. /* Resume clock */
  159. rv515_clock_startup(rdev);
  160. /* Initialize GPU configuration (# pipes, ...) */
  161. r520_gpu_init(rdev);
  162. /* Initialize GART (initialize after TTM so we can allocate
  163. * memory through TTM but finalize after TTM) */
  164. if (rdev->flags & RADEON_IS_PCIE) {
  165. r = rv370_pcie_gart_enable(rdev);
  166. if (r)
  167. return r;
  168. }
  169. /* allocate wb buffer */
  170. r = radeon_wb_init(rdev);
  171. if (r)
  172. return r;
  173. r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
  174. if (r) {
  175. dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
  176. return r;
  177. }
  178. /* Enable IRQ */
  179. rs600_irq_set(rdev);
  180. rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
  181. /* 1M ring buffer */
  182. r = r100_cp_init(rdev, 1024 * 1024);
  183. if (r) {
  184. dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
  185. return r;
  186. }
  187. r = radeon_ib_pool_start(rdev);
  188. if (r)
  189. return r;
  190. r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
  191. if (r) {
  192. dev_err(rdev->dev, "failed testing IB (%d).\n", r);
  193. rdev->accel_working = false;
  194. return r;
  195. }
  196. return 0;
  197. }
  198. int r520_resume(struct radeon_device *rdev)
  199. {
  200. int r;
  201. /* Make sur GART are not working */
  202. if (rdev->flags & RADEON_IS_PCIE)
  203. rv370_pcie_gart_disable(rdev);
  204. /* Resume clock before doing reset */
  205. rv515_clock_startup(rdev);
  206. /* Reset gpu before posting otherwise ATOM will enter infinite loop */
  207. if (radeon_asic_reset(rdev)) {
  208. dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
  209. RREG32(R_000E40_RBBM_STATUS),
  210. RREG32(R_0007C0_CP_STAT));
  211. }
  212. /* post */
  213. atom_asic_init(rdev->mode_info.atom_context);
  214. /* Resume clock after posting */
  215. rv515_clock_startup(rdev);
  216. /* Initialize surface registers */
  217. radeon_surface_init(rdev);
  218. rdev->accel_working = true;
  219. r = r520_startup(rdev);
  220. if (r) {
  221. rdev->accel_working = false;
  222. }
  223. return r;
  224. }
  225. int r520_init(struct radeon_device *rdev)
  226. {
  227. int r;
  228. /* Initialize scratch registers */
  229. radeon_scratch_init(rdev);
  230. /* Initialize surface registers */
  231. radeon_surface_init(rdev);
  232. /* restore some register to sane defaults */
  233. r100_restore_sanity(rdev);
  234. /* TODO: disable VGA need to use VGA request */
  235. /* BIOS*/
  236. if (!radeon_get_bios(rdev)) {
  237. if (ASIC_IS_AVIVO(rdev))
  238. return -EINVAL;
  239. }
  240. if (rdev->is_atom_bios) {
  241. r = radeon_atombios_init(rdev);
  242. if (r)
  243. return r;
  244. } else {
  245. dev_err(rdev->dev, "Expecting atombios for RV515 GPU\n");
  246. return -EINVAL;
  247. }
  248. /* Reset gpu before posting otherwise ATOM will enter infinite loop */
  249. if (radeon_asic_reset(rdev)) {
  250. dev_warn(rdev->dev,
  251. "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
  252. RREG32(R_000E40_RBBM_STATUS),
  253. RREG32(R_0007C0_CP_STAT));
  254. }
  255. /* check if cards are posted or not */
  256. if (radeon_boot_test_post_card(rdev) == false)
  257. return -EINVAL;
  258. if (!radeon_card_posted(rdev) && rdev->bios) {
  259. DRM_INFO("GPU not posted. posting now...\n");
  260. atom_asic_init(rdev->mode_info.atom_context);
  261. }
  262. /* Initialize clocks */
  263. radeon_get_clock_info(rdev->ddev);
  264. /* initialize AGP */
  265. if (rdev->flags & RADEON_IS_AGP) {
  266. r = radeon_agp_init(rdev);
  267. if (r) {
  268. radeon_agp_disable(rdev);
  269. }
  270. }
  271. /* initialize memory controller */
  272. r520_mc_init(rdev);
  273. rv515_debugfs(rdev);
  274. /* Fence driver */
  275. r = radeon_fence_driver_init(rdev);
  276. if (r)
  277. return r;
  278. r = radeon_irq_kms_init(rdev);
  279. if (r)
  280. return r;
  281. /* Memory manager */
  282. r = radeon_bo_init(rdev);
  283. if (r)
  284. return r;
  285. r = rv370_pcie_gart_init(rdev);
  286. if (r)
  287. return r;
  288. rv515_set_safe_registers(rdev);
  289. r = radeon_ib_pool_init(rdev);
  290. rdev->accel_working = true;
  291. if (r) {
  292. dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
  293. rdev->accel_working = false;
  294. }
  295. r = r520_startup(rdev);
  296. if (r) {
  297. /* Somethings want wront with the accel init stop accel */
  298. dev_err(rdev->dev, "Disabling GPU acceleration\n");
  299. r100_cp_fini(rdev);
  300. radeon_wb_fini(rdev);
  301. r100_ib_fini(rdev);
  302. radeon_irq_kms_fini(rdev);
  303. rv370_pcie_gart_fini(rdev);
  304. radeon_agp_fini(rdev);
  305. rdev->accel_working = false;
  306. }
  307. return 0;
  308. }