rs400.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576
  1. /*
  2. * Copyright 2008 Advanced Micro Devices, Inc.
  3. * Copyright 2008 Red Hat Inc.
  4. * Copyright 2009 Jerome Glisse.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the "Software"),
  8. * to deal in the Software without restriction, including without limitation
  9. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10. * and/or sell copies of the Software, and to permit persons to whom the
  11. * Software is furnished to do so, subject to the following conditions:
  12. *
  13. * The above copyright notice and this permission notice shall be included in
  14. * all copies or substantial portions of the Software.
  15. *
  16. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22. * OTHER DEALINGS IN THE SOFTWARE.
  23. *
  24. * Authors: Dave Airlie
  25. * Alex Deucher
  26. * Jerome Glisse
  27. */
  28. #include <linux/seq_file.h>
  29. #include <linux/slab.h>
  30. #include <drm/drmP.h>
  31. #include "radeon.h"
  32. #include "radeon_asic.h"
  33. #include "rs400d.h"
  34. /* This files gather functions specifics to : rs400,rs480 */
  35. static int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev);
  36. void rs400_gart_adjust_size(struct radeon_device *rdev)
  37. {
  38. /* Check gart size */
  39. switch (rdev->mc.gtt_size/(1024*1024)) {
  40. case 32:
  41. case 64:
  42. case 128:
  43. case 256:
  44. case 512:
  45. case 1024:
  46. case 2048:
  47. break;
  48. default:
  49. DRM_ERROR("Unable to use IGP GART size %uM\n",
  50. (unsigned)(rdev->mc.gtt_size >> 20));
  51. DRM_ERROR("Valid GART size for IGP are 32M,64M,128M,256M,512M,1G,2G\n");
  52. DRM_ERROR("Forcing to 32M GART size\n");
  53. rdev->mc.gtt_size = 32 * 1024 * 1024;
  54. return;
  55. }
  56. }
  57. void rs400_gart_tlb_flush(struct radeon_device *rdev)
  58. {
  59. uint32_t tmp;
  60. unsigned int timeout = rdev->usec_timeout;
  61. WREG32_MC(RS480_GART_CACHE_CNTRL, RS480_GART_CACHE_INVALIDATE);
  62. do {
  63. tmp = RREG32_MC(RS480_GART_CACHE_CNTRL);
  64. if ((tmp & RS480_GART_CACHE_INVALIDATE) == 0)
  65. break;
  66. DRM_UDELAY(1);
  67. timeout--;
  68. } while (timeout > 0);
  69. WREG32_MC(RS480_GART_CACHE_CNTRL, 0);
  70. }
  71. int rs400_gart_init(struct radeon_device *rdev)
  72. {
  73. int r;
  74. if (rdev->gart.ptr) {
  75. WARN(1, "RS400 GART already initialized\n");
  76. return 0;
  77. }
  78. /* Check gart size */
  79. switch(rdev->mc.gtt_size / (1024 * 1024)) {
  80. case 32:
  81. case 64:
  82. case 128:
  83. case 256:
  84. case 512:
  85. case 1024:
  86. case 2048:
  87. break;
  88. default:
  89. return -EINVAL;
  90. }
  91. /* Initialize common gart structure */
  92. r = radeon_gart_init(rdev);
  93. if (r)
  94. return r;
  95. if (rs400_debugfs_pcie_gart_info_init(rdev))
  96. DRM_ERROR("Failed to register debugfs file for RS400 GART !\n");
  97. rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
  98. return radeon_gart_table_ram_alloc(rdev);
  99. }
  100. int rs400_gart_enable(struct radeon_device *rdev)
  101. {
  102. uint32_t size_reg;
  103. uint32_t tmp;
  104. radeon_gart_restore(rdev);
  105. tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH);
  106. tmp |= RS690_DIS_OUT_OF_PCI_GART_ACCESS;
  107. WREG32_MC(RS690_AIC_CTRL_SCRATCH, tmp);
  108. /* Check gart size */
  109. switch(rdev->mc.gtt_size / (1024 * 1024)) {
  110. case 32:
  111. size_reg = RS480_VA_SIZE_32MB;
  112. break;
  113. case 64:
  114. size_reg = RS480_VA_SIZE_64MB;
  115. break;
  116. case 128:
  117. size_reg = RS480_VA_SIZE_128MB;
  118. break;
  119. case 256:
  120. size_reg = RS480_VA_SIZE_256MB;
  121. break;
  122. case 512:
  123. size_reg = RS480_VA_SIZE_512MB;
  124. break;
  125. case 1024:
  126. size_reg = RS480_VA_SIZE_1GB;
  127. break;
  128. case 2048:
  129. size_reg = RS480_VA_SIZE_2GB;
  130. break;
  131. default:
  132. return -EINVAL;
  133. }
  134. /* It should be fine to program it to max value */
  135. if (rdev->family == CHIP_RS690 || (rdev->family == CHIP_RS740)) {
  136. WREG32_MC(RS690_MCCFG_AGP_BASE, 0xFFFFFFFF);
  137. WREG32_MC(RS690_MCCFG_AGP_BASE_2, 0);
  138. } else {
  139. WREG32(RADEON_AGP_BASE, 0xFFFFFFFF);
  140. WREG32(RS480_AGP_BASE_2, 0);
  141. }
  142. tmp = REG_SET(RS690_MC_AGP_TOP, rdev->mc.gtt_end >> 16);
  143. tmp |= REG_SET(RS690_MC_AGP_START, rdev->mc.gtt_start >> 16);
  144. if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) {
  145. WREG32_MC(RS690_MCCFG_AGP_LOCATION, tmp);
  146. tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS;
  147. WREG32(RADEON_BUS_CNTL, tmp);
  148. } else {
  149. WREG32(RADEON_MC_AGP_LOCATION, tmp);
  150. tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
  151. WREG32(RADEON_BUS_CNTL, tmp);
  152. }
  153. /* Table should be in 32bits address space so ignore bits above. */
  154. tmp = (u32)rdev->gart.table_addr & 0xfffff000;
  155. tmp |= (upper_32_bits(rdev->gart.table_addr) & 0xff) << 4;
  156. WREG32_MC(RS480_GART_BASE, tmp);
  157. /* TODO: more tweaking here */
  158. WREG32_MC(RS480_GART_FEATURE_ID,
  159. (RS480_TLB_ENABLE |
  160. RS480_GTW_LAC_EN | RS480_1LEVEL_GART));
  161. /* Disable snooping */
  162. WREG32_MC(RS480_AGP_MODE_CNTL,
  163. (1 << RS480_REQ_TYPE_SNOOP_SHIFT) | RS480_REQ_TYPE_SNOOP_DIS);
  164. /* Disable AGP mode */
  165. /* FIXME: according to doc we should set HIDE_MMCFG_BAR=0,
  166. * AGPMODE30=0 & AGP30ENHANCED=0 in NB_CNTL */
  167. if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) {
  168. tmp = RREG32_MC(RS480_MC_MISC_CNTL);
  169. tmp |= RS480_GART_INDEX_REG_EN | RS690_BLOCK_GFX_D3_EN;
  170. WREG32_MC(RS480_MC_MISC_CNTL, tmp);
  171. } else {
  172. tmp = RREG32_MC(RS480_MC_MISC_CNTL);
  173. tmp |= RS480_GART_INDEX_REG_EN;
  174. WREG32_MC(RS480_MC_MISC_CNTL, tmp);
  175. }
  176. /* Enable gart */
  177. WREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN | size_reg));
  178. rs400_gart_tlb_flush(rdev);
  179. DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
  180. (unsigned)(rdev->mc.gtt_size >> 20),
  181. (unsigned long long)rdev->gart.table_addr);
  182. rdev->gart.ready = true;
  183. return 0;
  184. }
  185. void rs400_gart_disable(struct radeon_device *rdev)
  186. {
  187. uint32_t tmp;
  188. tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH);
  189. tmp |= RS690_DIS_OUT_OF_PCI_GART_ACCESS;
  190. WREG32_MC(RS690_AIC_CTRL_SCRATCH, tmp);
  191. WREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE, 0);
  192. }
  193. void rs400_gart_fini(struct radeon_device *rdev)
  194. {
  195. radeon_gart_fini(rdev);
  196. rs400_gart_disable(rdev);
  197. radeon_gart_table_ram_free(rdev);
  198. }
  199. #define RS400_PTE_WRITEABLE (1 << 2)
  200. #define RS400_PTE_READABLE (1 << 3)
  201. int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
  202. {
  203. uint32_t entry;
  204. u32 *gtt = rdev->gart.ptr;
  205. if (i < 0 || i > rdev->gart.num_gpu_pages) {
  206. return -EINVAL;
  207. }
  208. entry = (lower_32_bits(addr) & PAGE_MASK) |
  209. ((upper_32_bits(addr) & 0xff) << 4) |
  210. RS400_PTE_WRITEABLE | RS400_PTE_READABLE;
  211. entry = cpu_to_le32(entry);
  212. gtt[i] = entry;
  213. return 0;
  214. }
  215. int rs400_mc_wait_for_idle(struct radeon_device *rdev)
  216. {
  217. unsigned i;
  218. uint32_t tmp;
  219. for (i = 0; i < rdev->usec_timeout; i++) {
  220. /* read MC_STATUS */
  221. tmp = RREG32(RADEON_MC_STATUS);
  222. if (tmp & RADEON_MC_IDLE) {
  223. return 0;
  224. }
  225. DRM_UDELAY(1);
  226. }
  227. return -1;
  228. }
  229. static void rs400_gpu_init(struct radeon_device *rdev)
  230. {
  231. /* FIXME: is this correct ? */
  232. r420_pipes_init(rdev);
  233. if (rs400_mc_wait_for_idle(rdev)) {
  234. printk(KERN_WARNING "rs400: Failed to wait MC idle while "
  235. "programming pipes. Bad things might happen. %08x\n", RREG32(RADEON_MC_STATUS));
  236. }
  237. }
  238. static void rs400_mc_init(struct radeon_device *rdev)
  239. {
  240. u64 base;
  241. rs400_gart_adjust_size(rdev);
  242. rdev->mc.igp_sideport_enabled = radeon_combios_sideport_present(rdev);
  243. /* DDR for all card after R300 & IGP */
  244. rdev->mc.vram_is_ddr = true;
  245. rdev->mc.vram_width = 128;
  246. r100_vram_init_sizes(rdev);
  247. base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16;
  248. radeon_vram_location(rdev, &rdev->mc, base);
  249. rdev->mc.gtt_base_align = rdev->mc.gtt_size - 1;
  250. radeon_gtt_location(rdev, &rdev->mc);
  251. radeon_update_bandwidth_info(rdev);
  252. }
  253. uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg)
  254. {
  255. unsigned long flags;
  256. uint32_t r;
  257. spin_lock_irqsave(&rdev->mc_idx_lock, flags);
  258. WREG32(RS480_NB_MC_INDEX, reg & 0xff);
  259. r = RREG32(RS480_NB_MC_DATA);
  260. WREG32(RS480_NB_MC_INDEX, 0xff);
  261. spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
  262. return r;
  263. }
  264. void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
  265. {
  266. unsigned long flags;
  267. spin_lock_irqsave(&rdev->mc_idx_lock, flags);
  268. WREG32(RS480_NB_MC_INDEX, ((reg) & 0xff) | RS480_NB_MC_IND_WR_EN);
  269. WREG32(RS480_NB_MC_DATA, (v));
  270. WREG32(RS480_NB_MC_INDEX, 0xff);
  271. spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
  272. }
  273. #if defined(CONFIG_DEBUG_FS)
  274. static int rs400_debugfs_gart_info(struct seq_file *m, void *data)
  275. {
  276. struct drm_info_node *node = (struct drm_info_node *) m->private;
  277. struct drm_device *dev = node->minor->dev;
  278. struct radeon_device *rdev = dev->dev_private;
  279. uint32_t tmp;
  280. tmp = RREG32(RADEON_HOST_PATH_CNTL);
  281. seq_printf(m, "HOST_PATH_CNTL 0x%08x\n", tmp);
  282. tmp = RREG32(RADEON_BUS_CNTL);
  283. seq_printf(m, "BUS_CNTL 0x%08x\n", tmp);
  284. tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH);
  285. seq_printf(m, "AIC_CTRL_SCRATCH 0x%08x\n", tmp);
  286. if (rdev->family == CHIP_RS690 || (rdev->family == CHIP_RS740)) {
  287. tmp = RREG32_MC(RS690_MCCFG_AGP_BASE);
  288. seq_printf(m, "MCCFG_AGP_BASE 0x%08x\n", tmp);
  289. tmp = RREG32_MC(RS690_MCCFG_AGP_BASE_2);
  290. seq_printf(m, "MCCFG_AGP_BASE_2 0x%08x\n", tmp);
  291. tmp = RREG32_MC(RS690_MCCFG_AGP_LOCATION);
  292. seq_printf(m, "MCCFG_AGP_LOCATION 0x%08x\n", tmp);
  293. tmp = RREG32_MC(RS690_MCCFG_FB_LOCATION);
  294. seq_printf(m, "MCCFG_FB_LOCATION 0x%08x\n", tmp);
  295. tmp = RREG32(RS690_HDP_FB_LOCATION);
  296. seq_printf(m, "HDP_FB_LOCATION 0x%08x\n", tmp);
  297. } else {
  298. tmp = RREG32(RADEON_AGP_BASE);
  299. seq_printf(m, "AGP_BASE 0x%08x\n", tmp);
  300. tmp = RREG32(RS480_AGP_BASE_2);
  301. seq_printf(m, "AGP_BASE_2 0x%08x\n", tmp);
  302. tmp = RREG32(RADEON_MC_AGP_LOCATION);
  303. seq_printf(m, "MC_AGP_LOCATION 0x%08x\n", tmp);
  304. }
  305. tmp = RREG32_MC(RS480_GART_BASE);
  306. seq_printf(m, "GART_BASE 0x%08x\n", tmp);
  307. tmp = RREG32_MC(RS480_GART_FEATURE_ID);
  308. seq_printf(m, "GART_FEATURE_ID 0x%08x\n", tmp);
  309. tmp = RREG32_MC(RS480_AGP_MODE_CNTL);
  310. seq_printf(m, "AGP_MODE_CONTROL 0x%08x\n", tmp);
  311. tmp = RREG32_MC(RS480_MC_MISC_CNTL);
  312. seq_printf(m, "MC_MISC_CNTL 0x%08x\n", tmp);
  313. tmp = RREG32_MC(0x5F);
  314. seq_printf(m, "MC_MISC_UMA_CNTL 0x%08x\n", tmp);
  315. tmp = RREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE);
  316. seq_printf(m, "AGP_ADDRESS_SPACE_SIZE 0x%08x\n", tmp);
  317. tmp = RREG32_MC(RS480_GART_CACHE_CNTRL);
  318. seq_printf(m, "GART_CACHE_CNTRL 0x%08x\n", tmp);
  319. tmp = RREG32_MC(0x3B);
  320. seq_printf(m, "MC_GART_ERROR_ADDRESS 0x%08x\n", tmp);
  321. tmp = RREG32_MC(0x3C);
  322. seq_printf(m, "MC_GART_ERROR_ADDRESS_HI 0x%08x\n", tmp);
  323. tmp = RREG32_MC(0x30);
  324. seq_printf(m, "GART_ERROR_0 0x%08x\n", tmp);
  325. tmp = RREG32_MC(0x31);
  326. seq_printf(m, "GART_ERROR_1 0x%08x\n", tmp);
  327. tmp = RREG32_MC(0x32);
  328. seq_printf(m, "GART_ERROR_2 0x%08x\n", tmp);
  329. tmp = RREG32_MC(0x33);
  330. seq_printf(m, "GART_ERROR_3 0x%08x\n", tmp);
  331. tmp = RREG32_MC(0x34);
  332. seq_printf(m, "GART_ERROR_4 0x%08x\n", tmp);
  333. tmp = RREG32_MC(0x35);
  334. seq_printf(m, "GART_ERROR_5 0x%08x\n", tmp);
  335. tmp = RREG32_MC(0x36);
  336. seq_printf(m, "GART_ERROR_6 0x%08x\n", tmp);
  337. tmp = RREG32_MC(0x37);
  338. seq_printf(m, "GART_ERROR_7 0x%08x\n", tmp);
  339. return 0;
  340. }
  341. static struct drm_info_list rs400_gart_info_list[] = {
  342. {"rs400_gart_info", rs400_debugfs_gart_info, 0, NULL},
  343. };
  344. #endif
  345. static int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev)
  346. {
  347. #if defined(CONFIG_DEBUG_FS)
  348. return radeon_debugfs_add_files(rdev, rs400_gart_info_list, 1);
  349. #else
  350. return 0;
  351. #endif
  352. }
  353. static void rs400_mc_program(struct radeon_device *rdev)
  354. {
  355. struct r100_mc_save save;
  356. /* Stops all mc clients */
  357. r100_mc_stop(rdev, &save);
  358. /* Wait for mc idle */
  359. if (rs400_mc_wait_for_idle(rdev))
  360. dev_warn(rdev->dev, "rs400: Wait MC idle timeout before updating MC.\n");
  361. WREG32(R_000148_MC_FB_LOCATION,
  362. S_000148_MC_FB_START(rdev->mc.vram_start >> 16) |
  363. S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16));
  364. r100_mc_resume(rdev, &save);
  365. }
  366. static int rs400_startup(struct radeon_device *rdev)
  367. {
  368. int r;
  369. r100_set_common_regs(rdev);
  370. rs400_mc_program(rdev);
  371. /* Resume clock */
  372. r300_clock_startup(rdev);
  373. /* Initialize GPU configuration (# pipes, ...) */
  374. rs400_gpu_init(rdev);
  375. r100_enable_bm(rdev);
  376. /* Initialize GART (initialize after TTM so we can allocate
  377. * memory through TTM but finalize after TTM) */
  378. r = rs400_gart_enable(rdev);
  379. if (r)
  380. return r;
  381. /* allocate wb buffer */
  382. r = radeon_wb_init(rdev);
  383. if (r)
  384. return r;
  385. r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
  386. if (r) {
  387. dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
  388. return r;
  389. }
  390. /* Enable IRQ */
  391. if (!rdev->irq.installed) {
  392. r = radeon_irq_kms_init(rdev);
  393. if (r)
  394. return r;
  395. }
  396. r100_irq_set(rdev);
  397. rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
  398. /* 1M ring buffer */
  399. r = r100_cp_init(rdev, 1024 * 1024);
  400. if (r) {
  401. dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
  402. return r;
  403. }
  404. r = radeon_ib_pool_init(rdev);
  405. if (r) {
  406. dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
  407. return r;
  408. }
  409. return 0;
  410. }
  411. int rs400_resume(struct radeon_device *rdev)
  412. {
  413. int r;
  414. /* Make sur GART are not working */
  415. rs400_gart_disable(rdev);
  416. /* Resume clock before doing reset */
  417. r300_clock_startup(rdev);
  418. /* setup MC before calling post tables */
  419. rs400_mc_program(rdev);
  420. /* Reset gpu before posting otherwise ATOM will enter infinite loop */
  421. if (radeon_asic_reset(rdev)) {
  422. dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
  423. RREG32(R_000E40_RBBM_STATUS),
  424. RREG32(R_0007C0_CP_STAT));
  425. }
  426. /* post */
  427. radeon_combios_asic_init(rdev->ddev);
  428. /* Resume clock after posting */
  429. r300_clock_startup(rdev);
  430. /* Initialize surface registers */
  431. radeon_surface_init(rdev);
  432. rdev->accel_working = true;
  433. r = rs400_startup(rdev);
  434. if (r) {
  435. rdev->accel_working = false;
  436. }
  437. return r;
  438. }
  439. int rs400_suspend(struct radeon_device *rdev)
  440. {
  441. r100_cp_disable(rdev);
  442. radeon_wb_disable(rdev);
  443. r100_irq_disable(rdev);
  444. rs400_gart_disable(rdev);
  445. return 0;
  446. }
  447. void rs400_fini(struct radeon_device *rdev)
  448. {
  449. r100_cp_fini(rdev);
  450. radeon_wb_fini(rdev);
  451. radeon_ib_pool_fini(rdev);
  452. radeon_gem_fini(rdev);
  453. rs400_gart_fini(rdev);
  454. radeon_irq_kms_fini(rdev);
  455. radeon_fence_driver_fini(rdev);
  456. radeon_bo_fini(rdev);
  457. radeon_atombios_fini(rdev);
  458. kfree(rdev->bios);
  459. rdev->bios = NULL;
  460. }
  461. int rs400_init(struct radeon_device *rdev)
  462. {
  463. int r;
  464. /* Disable VGA */
  465. r100_vga_render_disable(rdev);
  466. /* Initialize scratch registers */
  467. radeon_scratch_init(rdev);
  468. /* Initialize surface registers */
  469. radeon_surface_init(rdev);
  470. /* TODO: disable VGA need to use VGA request */
  471. /* restore some register to sane defaults */
  472. r100_restore_sanity(rdev);
  473. /* BIOS*/
  474. if (!radeon_get_bios(rdev)) {
  475. if (ASIC_IS_AVIVO(rdev))
  476. return -EINVAL;
  477. }
  478. if (rdev->is_atom_bios) {
  479. dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n");
  480. return -EINVAL;
  481. } else {
  482. r = radeon_combios_init(rdev);
  483. if (r)
  484. return r;
  485. }
  486. /* Reset gpu before posting otherwise ATOM will enter infinite loop */
  487. if (radeon_asic_reset(rdev)) {
  488. dev_warn(rdev->dev,
  489. "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
  490. RREG32(R_000E40_RBBM_STATUS),
  491. RREG32(R_0007C0_CP_STAT));
  492. }
  493. /* check if cards are posted or not */
  494. if (radeon_boot_test_post_card(rdev) == false)
  495. return -EINVAL;
  496. /* Initialize clocks */
  497. radeon_get_clock_info(rdev->ddev);
  498. /* initialize memory controller */
  499. rs400_mc_init(rdev);
  500. /* Fence driver */
  501. r = radeon_fence_driver_init(rdev);
  502. if (r)
  503. return r;
  504. /* Memory manager */
  505. r = radeon_bo_init(rdev);
  506. if (r)
  507. return r;
  508. r = rs400_gart_init(rdev);
  509. if (r)
  510. return r;
  511. r300_set_reg_safe(rdev);
  512. rdev->accel_working = true;
  513. r = rs400_startup(rdev);
  514. if (r) {
  515. /* Somethings want wront with the accel init stop accel */
  516. dev_err(rdev->dev, "Disabling GPU acceleration\n");
  517. r100_cp_fini(rdev);
  518. radeon_wb_fini(rdev);
  519. radeon_ib_pool_fini(rdev);
  520. rs400_gart_fini(rdev);
  521. radeon_irq_kms_fini(rdev);
  522. rdev->accel_working = false;
  523. }
  524. return 0;
  525. }