rs400.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539
  1. /*
  2. * Copyright 2008 Advanced Micro Devices, Inc.
  3. * Copyright 2008 Red Hat Inc.
  4. * Copyright 2009 Jerome Glisse.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the "Software"),
  8. * to deal in the Software without restriction, including without limitation
  9. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10. * and/or sell copies of the Software, and to permit persons to whom the
  11. * Software is furnished to do so, subject to the following conditions:
  12. *
  13. * The above copyright notice and this permission notice shall be included in
  14. * all copies or substantial portions of the Software.
  15. *
  16. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22. * OTHER DEALINGS IN THE SOFTWARE.
  23. *
  24. * Authors: Dave Airlie
  25. * Alex Deucher
  26. * Jerome Glisse
  27. */
  28. #include <linux/seq_file.h>
  29. #include <linux/slab.h>
  30. #include <drm/drmP.h>
  31. #include "radeon.h"
  32. #include "radeon_asic.h"
  33. #include "rs400d.h"
  34. /* This files gather functions specifics to : rs400,rs480 */
  35. static int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev);
  36. void rs400_gart_adjust_size(struct radeon_device *rdev)
  37. {
  38. /* Check gart size */
  39. switch (rdev->mc.gtt_size/(1024*1024)) {
  40. case 32:
  41. case 64:
  42. case 128:
  43. case 256:
  44. case 512:
  45. case 1024:
  46. case 2048:
  47. break;
  48. default:
  49. DRM_ERROR("Unable to use IGP GART size %uM\n",
  50. (unsigned)(rdev->mc.gtt_size >> 20));
  51. DRM_ERROR("Valid GART size for IGP are 32M,64M,128M,256M,512M,1G,2G\n");
  52. DRM_ERROR("Forcing to 32M GART size\n");
  53. rdev->mc.gtt_size = 32 * 1024 * 1024;
  54. return;
  55. }
  56. if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) {
  57. /* FIXME: RS400 & RS480 seems to have issue with GART size
  58. * if 4G of system memory (needs more testing) */
  59. rdev->mc.gtt_size = 32 * 1024 * 1024;
  60. DRM_ERROR("Forcing to 32M GART size (because of ASIC bug ?)\n");
  61. }
  62. }
  63. void rs400_gart_tlb_flush(struct radeon_device *rdev)
  64. {
  65. uint32_t tmp;
  66. unsigned int timeout = rdev->usec_timeout;
  67. WREG32_MC(RS480_GART_CACHE_CNTRL, RS480_GART_CACHE_INVALIDATE);
  68. do {
  69. tmp = RREG32_MC(RS480_GART_CACHE_CNTRL);
  70. if ((tmp & RS480_GART_CACHE_INVALIDATE) == 0)
  71. break;
  72. DRM_UDELAY(1);
  73. timeout--;
  74. } while (timeout > 0);
  75. WREG32_MC(RS480_GART_CACHE_CNTRL, 0);
  76. }
  77. int rs400_gart_init(struct radeon_device *rdev)
  78. {
  79. int r;
  80. if (rdev->gart.table.ram.ptr) {
  81. WARN(1, "RS400 GART already initialized.\n");
  82. return 0;
  83. }
  84. /* Check gart size */
  85. switch(rdev->mc.gtt_size / (1024 * 1024)) {
  86. case 32:
  87. case 64:
  88. case 128:
  89. case 256:
  90. case 512:
  91. case 1024:
  92. case 2048:
  93. break;
  94. default:
  95. return -EINVAL;
  96. }
  97. /* Initialize common gart structure */
  98. r = radeon_gart_init(rdev);
  99. if (r)
  100. return r;
  101. if (rs400_debugfs_pcie_gart_info_init(rdev))
  102. DRM_ERROR("Failed to register debugfs file for RS400 GART !\n");
  103. rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
  104. return radeon_gart_table_ram_alloc(rdev);
  105. }
  106. int rs400_gart_enable(struct radeon_device *rdev)
  107. {
  108. uint32_t size_reg;
  109. uint32_t tmp;
  110. radeon_gart_restore(rdev);
  111. tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH);
  112. tmp |= RS690_DIS_OUT_OF_PCI_GART_ACCESS;
  113. WREG32_MC(RS690_AIC_CTRL_SCRATCH, tmp);
  114. /* Check gart size */
  115. switch(rdev->mc.gtt_size / (1024 * 1024)) {
  116. case 32:
  117. size_reg = RS480_VA_SIZE_32MB;
  118. break;
  119. case 64:
  120. size_reg = RS480_VA_SIZE_64MB;
  121. break;
  122. case 128:
  123. size_reg = RS480_VA_SIZE_128MB;
  124. break;
  125. case 256:
  126. size_reg = RS480_VA_SIZE_256MB;
  127. break;
  128. case 512:
  129. size_reg = RS480_VA_SIZE_512MB;
  130. break;
  131. case 1024:
  132. size_reg = RS480_VA_SIZE_1GB;
  133. break;
  134. case 2048:
  135. size_reg = RS480_VA_SIZE_2GB;
  136. break;
  137. default:
  138. return -EINVAL;
  139. }
  140. /* It should be fine to program it to max value */
  141. if (rdev->family == CHIP_RS690 || (rdev->family == CHIP_RS740)) {
  142. WREG32_MC(RS690_MCCFG_AGP_BASE, 0xFFFFFFFF);
  143. WREG32_MC(RS690_MCCFG_AGP_BASE_2, 0);
  144. } else {
  145. WREG32(RADEON_AGP_BASE, 0xFFFFFFFF);
  146. WREG32(RS480_AGP_BASE_2, 0);
  147. }
  148. tmp = REG_SET(RS690_MC_AGP_TOP, rdev->mc.gtt_end >> 16);
  149. tmp |= REG_SET(RS690_MC_AGP_START, rdev->mc.gtt_start >> 16);
  150. if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) {
  151. WREG32_MC(RS690_MCCFG_AGP_LOCATION, tmp);
  152. tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS;
  153. WREG32(RADEON_BUS_CNTL, tmp);
  154. } else {
  155. WREG32(RADEON_MC_AGP_LOCATION, tmp);
  156. tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
  157. WREG32(RADEON_BUS_CNTL, tmp);
  158. }
  159. /* Table should be in 32bits address space so ignore bits above. */
  160. tmp = (u32)rdev->gart.table_addr & 0xfffff000;
  161. tmp |= (upper_32_bits(rdev->gart.table_addr) & 0xff) << 4;
  162. WREG32_MC(RS480_GART_BASE, tmp);
  163. /* TODO: more tweaking here */
  164. WREG32_MC(RS480_GART_FEATURE_ID,
  165. (RS480_TLB_ENABLE |
  166. RS480_GTW_LAC_EN | RS480_1LEVEL_GART));
  167. /* Disable snooping */
  168. WREG32_MC(RS480_AGP_MODE_CNTL,
  169. (1 << RS480_REQ_TYPE_SNOOP_SHIFT) | RS480_REQ_TYPE_SNOOP_DIS);
  170. /* Disable AGP mode */
  171. /* FIXME: according to doc we should set HIDE_MMCFG_BAR=0,
  172. * AGPMODE30=0 & AGP30ENHANCED=0 in NB_CNTL */
  173. if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) {
  174. WREG32_MC(RS480_MC_MISC_CNTL,
  175. (RS480_GART_INDEX_REG_EN | RS690_BLOCK_GFX_D3_EN));
  176. } else {
  177. WREG32_MC(RS480_MC_MISC_CNTL, RS480_GART_INDEX_REG_EN);
  178. }
  179. /* Enable gart */
  180. WREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN | size_reg));
  181. rs400_gart_tlb_flush(rdev);
  182. rdev->gart.ready = true;
  183. return 0;
  184. }
  185. void rs400_gart_disable(struct radeon_device *rdev)
  186. {
  187. uint32_t tmp;
  188. tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH);
  189. tmp |= RS690_DIS_OUT_OF_PCI_GART_ACCESS;
  190. WREG32_MC(RS690_AIC_CTRL_SCRATCH, tmp);
  191. WREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE, 0);
  192. }
  193. void rs400_gart_fini(struct radeon_device *rdev)
  194. {
  195. radeon_gart_fini(rdev);
  196. rs400_gart_disable(rdev);
  197. radeon_gart_table_ram_free(rdev);
  198. }
  199. int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
  200. {
  201. uint32_t entry;
  202. if (i < 0 || i > rdev->gart.num_gpu_pages) {
  203. return -EINVAL;
  204. }
  205. entry = (lower_32_bits(addr) & PAGE_MASK) |
  206. ((upper_32_bits(addr) & 0xff) << 4) |
  207. 0xc;
  208. entry = cpu_to_le32(entry);
  209. rdev->gart.table.ram.ptr[i] = entry;
  210. return 0;
  211. }
  212. int rs400_mc_wait_for_idle(struct radeon_device *rdev)
  213. {
  214. unsigned i;
  215. uint32_t tmp;
  216. for (i = 0; i < rdev->usec_timeout; i++) {
  217. /* read MC_STATUS */
  218. tmp = RREG32(0x0150);
  219. if (tmp & (1 << 2)) {
  220. return 0;
  221. }
  222. DRM_UDELAY(1);
  223. }
  224. return -1;
  225. }
  226. void rs400_gpu_init(struct radeon_device *rdev)
  227. {
  228. /* FIXME: is this correct ? */
  229. r420_pipes_init(rdev);
  230. if (rs400_mc_wait_for_idle(rdev)) {
  231. printk(KERN_WARNING "rs400: Failed to wait MC idle while "
  232. "programming pipes. Bad things might happen. %08x\n", RREG32(0x150));
  233. }
  234. }
  235. void rs400_mc_init(struct radeon_device *rdev)
  236. {
  237. u64 base;
  238. rs400_gart_adjust_size(rdev);
  239. rdev->mc.igp_sideport_enabled = radeon_combios_sideport_present(rdev);
  240. /* DDR for all card after R300 & IGP */
  241. rdev->mc.vram_is_ddr = true;
  242. rdev->mc.vram_width = 128;
  243. r100_vram_init_sizes(rdev);
  244. base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16;
  245. radeon_vram_location(rdev, &rdev->mc, base);
  246. radeon_gtt_location(rdev, &rdev->mc);
  247. radeon_update_bandwidth_info(rdev);
  248. }
  249. uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg)
  250. {
  251. uint32_t r;
  252. WREG32(RS480_NB_MC_INDEX, reg & 0xff);
  253. r = RREG32(RS480_NB_MC_DATA);
  254. WREG32(RS480_NB_MC_INDEX, 0xff);
  255. return r;
  256. }
  257. void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
  258. {
  259. WREG32(RS480_NB_MC_INDEX, ((reg) & 0xff) | RS480_NB_MC_IND_WR_EN);
  260. WREG32(RS480_NB_MC_DATA, (v));
  261. WREG32(RS480_NB_MC_INDEX, 0xff);
  262. }
  263. #if defined(CONFIG_DEBUG_FS)
  264. static int rs400_debugfs_gart_info(struct seq_file *m, void *data)
  265. {
  266. struct drm_info_node *node = (struct drm_info_node *) m->private;
  267. struct drm_device *dev = node->minor->dev;
  268. struct radeon_device *rdev = dev->dev_private;
  269. uint32_t tmp;
  270. tmp = RREG32(RADEON_HOST_PATH_CNTL);
  271. seq_printf(m, "HOST_PATH_CNTL 0x%08x\n", tmp);
  272. tmp = RREG32(RADEON_BUS_CNTL);
  273. seq_printf(m, "BUS_CNTL 0x%08x\n", tmp);
  274. tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH);
  275. seq_printf(m, "AIC_CTRL_SCRATCH 0x%08x\n", tmp);
  276. if (rdev->family == CHIP_RS690 || (rdev->family == CHIP_RS740)) {
  277. tmp = RREG32_MC(RS690_MCCFG_AGP_BASE);
  278. seq_printf(m, "MCCFG_AGP_BASE 0x%08x\n", tmp);
  279. tmp = RREG32_MC(RS690_MCCFG_AGP_BASE_2);
  280. seq_printf(m, "MCCFG_AGP_BASE_2 0x%08x\n", tmp);
  281. tmp = RREG32_MC(RS690_MCCFG_AGP_LOCATION);
  282. seq_printf(m, "MCCFG_AGP_LOCATION 0x%08x\n", tmp);
  283. tmp = RREG32_MC(0x100);
  284. seq_printf(m, "MCCFG_FB_LOCATION 0x%08x\n", tmp);
  285. tmp = RREG32(0x134);
  286. seq_printf(m, "HDP_FB_LOCATION 0x%08x\n", tmp);
  287. } else {
  288. tmp = RREG32(RADEON_AGP_BASE);
  289. seq_printf(m, "AGP_BASE 0x%08x\n", tmp);
  290. tmp = RREG32(RS480_AGP_BASE_2);
  291. seq_printf(m, "AGP_BASE_2 0x%08x\n", tmp);
  292. tmp = RREG32(RADEON_MC_AGP_LOCATION);
  293. seq_printf(m, "MC_AGP_LOCATION 0x%08x\n", tmp);
  294. }
  295. tmp = RREG32_MC(RS480_GART_BASE);
  296. seq_printf(m, "GART_BASE 0x%08x\n", tmp);
  297. tmp = RREG32_MC(RS480_GART_FEATURE_ID);
  298. seq_printf(m, "GART_FEATURE_ID 0x%08x\n", tmp);
  299. tmp = RREG32_MC(RS480_AGP_MODE_CNTL);
  300. seq_printf(m, "AGP_MODE_CONTROL 0x%08x\n", tmp);
  301. tmp = RREG32_MC(RS480_MC_MISC_CNTL);
  302. seq_printf(m, "MC_MISC_CNTL 0x%08x\n", tmp);
  303. tmp = RREG32_MC(0x5F);
  304. seq_printf(m, "MC_MISC_UMA_CNTL 0x%08x\n", tmp);
  305. tmp = RREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE);
  306. seq_printf(m, "AGP_ADDRESS_SPACE_SIZE 0x%08x\n", tmp);
  307. tmp = RREG32_MC(RS480_GART_CACHE_CNTRL);
  308. seq_printf(m, "GART_CACHE_CNTRL 0x%08x\n", tmp);
  309. tmp = RREG32_MC(0x3B);
  310. seq_printf(m, "MC_GART_ERROR_ADDRESS 0x%08x\n", tmp);
  311. tmp = RREG32_MC(0x3C);
  312. seq_printf(m, "MC_GART_ERROR_ADDRESS_HI 0x%08x\n", tmp);
  313. tmp = RREG32_MC(0x30);
  314. seq_printf(m, "GART_ERROR_0 0x%08x\n", tmp);
  315. tmp = RREG32_MC(0x31);
  316. seq_printf(m, "GART_ERROR_1 0x%08x\n", tmp);
  317. tmp = RREG32_MC(0x32);
  318. seq_printf(m, "GART_ERROR_2 0x%08x\n", tmp);
  319. tmp = RREG32_MC(0x33);
  320. seq_printf(m, "GART_ERROR_3 0x%08x\n", tmp);
  321. tmp = RREG32_MC(0x34);
  322. seq_printf(m, "GART_ERROR_4 0x%08x\n", tmp);
  323. tmp = RREG32_MC(0x35);
  324. seq_printf(m, "GART_ERROR_5 0x%08x\n", tmp);
  325. tmp = RREG32_MC(0x36);
  326. seq_printf(m, "GART_ERROR_6 0x%08x\n", tmp);
  327. tmp = RREG32_MC(0x37);
  328. seq_printf(m, "GART_ERROR_7 0x%08x\n", tmp);
  329. return 0;
  330. }
  331. static struct drm_info_list rs400_gart_info_list[] = {
  332. {"rs400_gart_info", rs400_debugfs_gart_info, 0, NULL},
  333. };
  334. #endif
  335. static int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev)
  336. {
  337. #if defined(CONFIG_DEBUG_FS)
  338. return radeon_debugfs_add_files(rdev, rs400_gart_info_list, 1);
  339. #else
  340. return 0;
  341. #endif
  342. }
  343. void rs400_mc_program(struct radeon_device *rdev)
  344. {
  345. struct r100_mc_save save;
  346. /* Stops all mc clients */
  347. r100_mc_stop(rdev, &save);
  348. /* Wait for mc idle */
  349. if (rs400_mc_wait_for_idle(rdev))
  350. dev_warn(rdev->dev, "rs400: Wait MC idle timeout before updating MC.\n");
  351. WREG32(R_000148_MC_FB_LOCATION,
  352. S_000148_MC_FB_START(rdev->mc.vram_start >> 16) |
  353. S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16));
  354. r100_mc_resume(rdev, &save);
  355. }
  356. static int rs400_startup(struct radeon_device *rdev)
  357. {
  358. int r;
  359. r100_set_common_regs(rdev);
  360. rs400_mc_program(rdev);
  361. /* Resume clock */
  362. r300_clock_startup(rdev);
  363. /* Initialize GPU configuration (# pipes, ...) */
  364. rs400_gpu_init(rdev);
  365. r100_enable_bm(rdev);
  366. /* Initialize GART (initialize after TTM so we can allocate
  367. * memory through TTM but finalize after TTM) */
  368. r = rs400_gart_enable(rdev);
  369. if (r)
  370. return r;
  371. /* Enable IRQ */
  372. r100_irq_set(rdev);
  373. rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
  374. /* 1M ring buffer */
  375. r = r100_cp_init(rdev, 1024 * 1024);
  376. if (r) {
  377. dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
  378. return r;
  379. }
  380. r = r100_wb_init(rdev);
  381. if (r)
  382. dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
  383. r = r100_ib_init(rdev);
  384. if (r) {
  385. dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
  386. return r;
  387. }
  388. return 0;
  389. }
  390. int rs400_resume(struct radeon_device *rdev)
  391. {
  392. /* Make sur GART are not working */
  393. rs400_gart_disable(rdev);
  394. /* Resume clock before doing reset */
  395. r300_clock_startup(rdev);
  396. /* setup MC before calling post tables */
  397. rs400_mc_program(rdev);
  398. /* Reset gpu before posting otherwise ATOM will enter infinite loop */
  399. if (radeon_asic_reset(rdev)) {
  400. dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
  401. RREG32(R_000E40_RBBM_STATUS),
  402. RREG32(R_0007C0_CP_STAT));
  403. }
  404. /* post */
  405. radeon_combios_asic_init(rdev->ddev);
  406. /* Resume clock after posting */
  407. r300_clock_startup(rdev);
  408. /* Initialize surface registers */
  409. radeon_surface_init(rdev);
  410. return rs400_startup(rdev);
  411. }
  412. int rs400_suspend(struct radeon_device *rdev)
  413. {
  414. r100_cp_disable(rdev);
  415. r100_wb_disable(rdev);
  416. r100_irq_disable(rdev);
  417. rs400_gart_disable(rdev);
  418. return 0;
  419. }
  420. void rs400_fini(struct radeon_device *rdev)
  421. {
  422. r100_cp_fini(rdev);
  423. r100_wb_fini(rdev);
  424. r100_ib_fini(rdev);
  425. radeon_gem_fini(rdev);
  426. rs400_gart_fini(rdev);
  427. radeon_irq_kms_fini(rdev);
  428. radeon_fence_driver_fini(rdev);
  429. radeon_bo_fini(rdev);
  430. radeon_atombios_fini(rdev);
  431. kfree(rdev->bios);
  432. rdev->bios = NULL;
  433. }
  434. int rs400_init(struct radeon_device *rdev)
  435. {
  436. int r;
  437. /* Disable VGA */
  438. r100_vga_render_disable(rdev);
  439. /* Initialize scratch registers */
  440. radeon_scratch_init(rdev);
  441. /* Initialize surface registers */
  442. radeon_surface_init(rdev);
  443. /* TODO: disable VGA need to use VGA request */
  444. /* BIOS*/
  445. if (!radeon_get_bios(rdev)) {
  446. if (ASIC_IS_AVIVO(rdev))
  447. return -EINVAL;
  448. }
  449. if (rdev->is_atom_bios) {
  450. dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n");
  451. return -EINVAL;
  452. } else {
  453. r = radeon_combios_init(rdev);
  454. if (r)
  455. return r;
  456. }
  457. /* Reset gpu before posting otherwise ATOM will enter infinite loop */
  458. if (radeon_asic_reset(rdev)) {
  459. dev_warn(rdev->dev,
  460. "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
  461. RREG32(R_000E40_RBBM_STATUS),
  462. RREG32(R_0007C0_CP_STAT));
  463. }
  464. /* check if cards are posted or not */
  465. if (radeon_boot_test_post_card(rdev) == false)
  466. return -EINVAL;
  467. /* Initialize clocks */
  468. radeon_get_clock_info(rdev->ddev);
  469. /* initialize memory controller */
  470. rs400_mc_init(rdev);
  471. /* Fence driver */
  472. r = radeon_fence_driver_init(rdev);
  473. if (r)
  474. return r;
  475. r = radeon_irq_kms_init(rdev);
  476. if (r)
  477. return r;
  478. /* Memory manager */
  479. r = radeon_bo_init(rdev);
  480. if (r)
  481. return r;
  482. r = rs400_gart_init(rdev);
  483. if (r)
  484. return r;
  485. r300_set_reg_safe(rdev);
  486. rdev->accel_working = true;
  487. r = rs400_startup(rdev);
  488. if (r) {
  489. /* Somethings want wront with the accel init stop accel */
  490. dev_err(rdev->dev, "Disabling GPU acceleration\n");
  491. r100_cp_fini(rdev);
  492. r100_wb_fini(rdev);
  493. r100_ib_fini(rdev);
  494. rs400_gart_fini(rdev);
  495. radeon_irq_kms_fini(rdev);
  496. rdev->accel_working = false;
  497. }
  498. return 0;
  499. }