rs400.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539
  1. /*
  2. * Copyright 2008 Advanced Micro Devices, Inc.
  3. * Copyright 2008 Red Hat Inc.
  4. * Copyright 2009 Jerome Glisse.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the "Software"),
  8. * to deal in the Software without restriction, including without limitation
  9. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10. * and/or sell copies of the Software, and to permit persons to whom the
  11. * Software is furnished to do so, subject to the following conditions:
  12. *
  13. * The above copyright notice and this permission notice shall be included in
  14. * all copies or substantial portions of the Software.
  15. *
  16. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22. * OTHER DEALINGS IN THE SOFTWARE.
  23. *
  24. * Authors: Dave Airlie
  25. * Alex Deucher
  26. * Jerome Glisse
  27. */
  28. #include <linux/seq_file.h>
  29. #include <drm/drmP.h>
  30. #include "radeon.h"
  31. #include "radeon_asic.h"
  32. #include "rs400d.h"
  33. /* This files gather functions specifics to : rs400,rs480 */
  34. static int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev);
  35. void rs400_gart_adjust_size(struct radeon_device *rdev)
  36. {
  37. /* Check gart size */
  38. switch (rdev->mc.gtt_size/(1024*1024)) {
  39. case 32:
  40. case 64:
  41. case 128:
  42. case 256:
  43. case 512:
  44. case 1024:
  45. case 2048:
  46. break;
  47. default:
  48. DRM_ERROR("Unable to use IGP GART size %uM\n",
  49. (unsigned)(rdev->mc.gtt_size >> 20));
  50. DRM_ERROR("Valid GART size for IGP are 32M,64M,128M,256M,512M,1G,2G\n");
  51. DRM_ERROR("Forcing to 32M GART size\n");
  52. rdev->mc.gtt_size = 32 * 1024 * 1024;
  53. return;
  54. }
  55. if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) {
  56. /* FIXME: RS400 & RS480 seems to have issue with GART size
  57. * if 4G of system memory (needs more testing) */
  58. rdev->mc.gtt_size = 32 * 1024 * 1024;
  59. DRM_ERROR("Forcing to 32M GART size (because of ASIC bug ?)\n");
  60. }
  61. }
  62. void rs400_gart_tlb_flush(struct radeon_device *rdev)
  63. {
  64. uint32_t tmp;
  65. unsigned int timeout = rdev->usec_timeout;
  66. WREG32_MC(RS480_GART_CACHE_CNTRL, RS480_GART_CACHE_INVALIDATE);
  67. do {
  68. tmp = RREG32_MC(RS480_GART_CACHE_CNTRL);
  69. if ((tmp & RS480_GART_CACHE_INVALIDATE) == 0)
  70. break;
  71. DRM_UDELAY(1);
  72. timeout--;
  73. } while (timeout > 0);
  74. WREG32_MC(RS480_GART_CACHE_CNTRL, 0);
  75. }
  76. int rs400_gart_init(struct radeon_device *rdev)
  77. {
  78. int r;
  79. if (rdev->gart.table.ram.ptr) {
  80. WARN(1, "RS400 GART already initialized.\n");
  81. return 0;
  82. }
  83. /* Check gart size */
  84. switch(rdev->mc.gtt_size / (1024 * 1024)) {
  85. case 32:
  86. case 64:
  87. case 128:
  88. case 256:
  89. case 512:
  90. case 1024:
  91. case 2048:
  92. break;
  93. default:
  94. return -EINVAL;
  95. }
  96. /* Initialize common gart structure */
  97. r = radeon_gart_init(rdev);
  98. if (r)
  99. return r;
  100. if (rs400_debugfs_pcie_gart_info_init(rdev))
  101. DRM_ERROR("Failed to register debugfs file for RS400 GART !\n");
  102. rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
  103. return radeon_gart_table_ram_alloc(rdev);
  104. }
  105. int rs400_gart_enable(struct radeon_device *rdev)
  106. {
  107. uint32_t size_reg;
  108. uint32_t tmp;
  109. radeon_gart_restore(rdev);
  110. tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH);
  111. tmp |= RS690_DIS_OUT_OF_PCI_GART_ACCESS;
  112. WREG32_MC(RS690_AIC_CTRL_SCRATCH, tmp);
  113. /* Check gart size */
  114. switch(rdev->mc.gtt_size / (1024 * 1024)) {
  115. case 32:
  116. size_reg = RS480_VA_SIZE_32MB;
  117. break;
  118. case 64:
  119. size_reg = RS480_VA_SIZE_64MB;
  120. break;
  121. case 128:
  122. size_reg = RS480_VA_SIZE_128MB;
  123. break;
  124. case 256:
  125. size_reg = RS480_VA_SIZE_256MB;
  126. break;
  127. case 512:
  128. size_reg = RS480_VA_SIZE_512MB;
  129. break;
  130. case 1024:
  131. size_reg = RS480_VA_SIZE_1GB;
  132. break;
  133. case 2048:
  134. size_reg = RS480_VA_SIZE_2GB;
  135. break;
  136. default:
  137. return -EINVAL;
  138. }
  139. /* It should be fine to program it to max value */
  140. if (rdev->family == CHIP_RS690 || (rdev->family == CHIP_RS740)) {
  141. WREG32_MC(RS690_MCCFG_AGP_BASE, 0xFFFFFFFF);
  142. WREG32_MC(RS690_MCCFG_AGP_BASE_2, 0);
  143. } else {
  144. WREG32(RADEON_AGP_BASE, 0xFFFFFFFF);
  145. WREG32(RS480_AGP_BASE_2, 0);
  146. }
  147. tmp = REG_SET(RS690_MC_AGP_TOP, rdev->mc.gtt_end >> 16);
  148. tmp |= REG_SET(RS690_MC_AGP_START, rdev->mc.gtt_start >> 16);
  149. if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) {
  150. WREG32_MC(RS690_MCCFG_AGP_LOCATION, tmp);
  151. tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS;
  152. WREG32(RADEON_BUS_CNTL, tmp);
  153. } else {
  154. WREG32(RADEON_MC_AGP_LOCATION, tmp);
  155. tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
  156. WREG32(RADEON_BUS_CNTL, tmp);
  157. }
  158. /* Table should be in 32bits address space so ignore bits above. */
  159. tmp = (u32)rdev->gart.table_addr & 0xfffff000;
  160. tmp |= (upper_32_bits(rdev->gart.table_addr) & 0xff) << 4;
  161. WREG32_MC(RS480_GART_BASE, tmp);
  162. /* TODO: more tweaking here */
  163. WREG32_MC(RS480_GART_FEATURE_ID,
  164. (RS480_TLB_ENABLE |
  165. RS480_GTW_LAC_EN | RS480_1LEVEL_GART));
  166. /* Disable snooping */
  167. WREG32_MC(RS480_AGP_MODE_CNTL,
  168. (1 << RS480_REQ_TYPE_SNOOP_SHIFT) | RS480_REQ_TYPE_SNOOP_DIS);
  169. /* Disable AGP mode */
  170. /* FIXME: according to doc we should set HIDE_MMCFG_BAR=0,
  171. * AGPMODE30=0 & AGP30ENHANCED=0 in NB_CNTL */
  172. if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) {
  173. WREG32_MC(RS480_MC_MISC_CNTL,
  174. (RS480_GART_INDEX_REG_EN | RS690_BLOCK_GFX_D3_EN));
  175. } else {
  176. WREG32_MC(RS480_MC_MISC_CNTL, RS480_GART_INDEX_REG_EN);
  177. }
  178. /* Enable gart */
  179. WREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN | size_reg));
  180. rs400_gart_tlb_flush(rdev);
  181. rdev->gart.ready = true;
  182. return 0;
  183. }
  184. void rs400_gart_disable(struct radeon_device *rdev)
  185. {
  186. uint32_t tmp;
  187. tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH);
  188. tmp |= RS690_DIS_OUT_OF_PCI_GART_ACCESS;
  189. WREG32_MC(RS690_AIC_CTRL_SCRATCH, tmp);
  190. WREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE, 0);
  191. }
  192. void rs400_gart_fini(struct radeon_device *rdev)
  193. {
  194. rs400_gart_disable(rdev);
  195. radeon_gart_table_ram_free(rdev);
  196. radeon_gart_fini(rdev);
  197. }
  198. int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
  199. {
  200. uint32_t entry;
  201. if (i < 0 || i > rdev->gart.num_gpu_pages) {
  202. return -EINVAL;
  203. }
  204. entry = (lower_32_bits(addr) & PAGE_MASK) |
  205. ((upper_32_bits(addr) & 0xff) << 4) |
  206. 0xc;
  207. entry = cpu_to_le32(entry);
  208. rdev->gart.table.ram.ptr[i] = entry;
  209. return 0;
  210. }
  211. int rs400_mc_wait_for_idle(struct radeon_device *rdev)
  212. {
  213. unsigned i;
  214. uint32_t tmp;
  215. for (i = 0; i < rdev->usec_timeout; i++) {
  216. /* read MC_STATUS */
  217. tmp = RREG32(0x0150);
  218. if (tmp & (1 << 2)) {
  219. return 0;
  220. }
  221. DRM_UDELAY(1);
  222. }
  223. return -1;
  224. }
  225. void rs400_gpu_init(struct radeon_device *rdev)
  226. {
  227. /* FIXME: HDP same place on rs400 ? */
  228. r100_hdp_reset(rdev);
  229. /* FIXME: is this correct ? */
  230. r420_pipes_init(rdev);
  231. if (rs400_mc_wait_for_idle(rdev)) {
  232. printk(KERN_WARNING "rs400: Failed to wait MC idle while "
  233. "programming pipes. Bad things might happen. %08x\n", RREG32(0x150));
  234. }
  235. }
  236. void rs400_mc_init(struct radeon_device *rdev)
  237. {
  238. u64 base;
  239. rs400_gart_adjust_size(rdev);
  240. rdev->mc.igp_sideport_enabled = radeon_combios_sideport_present(rdev);
  241. /* DDR for all card after R300 & IGP */
  242. rdev->mc.vram_is_ddr = true;
  243. rdev->mc.vram_width = 128;
  244. r100_vram_init_sizes(rdev);
  245. base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16;
  246. radeon_vram_location(rdev, &rdev->mc, base);
  247. radeon_gtt_location(rdev, &rdev->mc);
  248. }
  249. uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg)
  250. {
  251. uint32_t r;
  252. WREG32(RS480_NB_MC_INDEX, reg & 0xff);
  253. r = RREG32(RS480_NB_MC_DATA);
  254. WREG32(RS480_NB_MC_INDEX, 0xff);
  255. return r;
  256. }
  257. void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
  258. {
  259. WREG32(RS480_NB_MC_INDEX, ((reg) & 0xff) | RS480_NB_MC_IND_WR_EN);
  260. WREG32(RS480_NB_MC_DATA, (v));
  261. WREG32(RS480_NB_MC_INDEX, 0xff);
  262. }
  263. #if defined(CONFIG_DEBUG_FS)
  264. static int rs400_debugfs_gart_info(struct seq_file *m, void *data)
  265. {
  266. struct drm_info_node *node = (struct drm_info_node *) m->private;
  267. struct drm_device *dev = node->minor->dev;
  268. struct radeon_device *rdev = dev->dev_private;
  269. uint32_t tmp;
  270. tmp = RREG32(RADEON_HOST_PATH_CNTL);
  271. seq_printf(m, "HOST_PATH_CNTL 0x%08x\n", tmp);
  272. tmp = RREG32(RADEON_BUS_CNTL);
  273. seq_printf(m, "BUS_CNTL 0x%08x\n", tmp);
  274. tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH);
  275. seq_printf(m, "AIC_CTRL_SCRATCH 0x%08x\n", tmp);
  276. if (rdev->family == CHIP_RS690 || (rdev->family == CHIP_RS740)) {
  277. tmp = RREG32_MC(RS690_MCCFG_AGP_BASE);
  278. seq_printf(m, "MCCFG_AGP_BASE 0x%08x\n", tmp);
  279. tmp = RREG32_MC(RS690_MCCFG_AGP_BASE_2);
  280. seq_printf(m, "MCCFG_AGP_BASE_2 0x%08x\n", tmp);
  281. tmp = RREG32_MC(RS690_MCCFG_AGP_LOCATION);
  282. seq_printf(m, "MCCFG_AGP_LOCATION 0x%08x\n", tmp);
  283. tmp = RREG32_MC(0x100);
  284. seq_printf(m, "MCCFG_FB_LOCATION 0x%08x\n", tmp);
  285. tmp = RREG32(0x134);
  286. seq_printf(m, "HDP_FB_LOCATION 0x%08x\n", tmp);
  287. } else {
  288. tmp = RREG32(RADEON_AGP_BASE);
  289. seq_printf(m, "AGP_BASE 0x%08x\n", tmp);
  290. tmp = RREG32(RS480_AGP_BASE_2);
  291. seq_printf(m, "AGP_BASE_2 0x%08x\n", tmp);
  292. tmp = RREG32(RADEON_MC_AGP_LOCATION);
  293. seq_printf(m, "MC_AGP_LOCATION 0x%08x\n", tmp);
  294. }
  295. tmp = RREG32_MC(RS480_GART_BASE);
  296. seq_printf(m, "GART_BASE 0x%08x\n", tmp);
  297. tmp = RREG32_MC(RS480_GART_FEATURE_ID);
  298. seq_printf(m, "GART_FEATURE_ID 0x%08x\n", tmp);
  299. tmp = RREG32_MC(RS480_AGP_MODE_CNTL);
  300. seq_printf(m, "AGP_MODE_CONTROL 0x%08x\n", tmp);
  301. tmp = RREG32_MC(RS480_MC_MISC_CNTL);
  302. seq_printf(m, "MC_MISC_CNTL 0x%08x\n", tmp);
  303. tmp = RREG32_MC(0x5F);
  304. seq_printf(m, "MC_MISC_UMA_CNTL 0x%08x\n", tmp);
  305. tmp = RREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE);
  306. seq_printf(m, "AGP_ADDRESS_SPACE_SIZE 0x%08x\n", tmp);
  307. tmp = RREG32_MC(RS480_GART_CACHE_CNTRL);
  308. seq_printf(m, "GART_CACHE_CNTRL 0x%08x\n", tmp);
  309. tmp = RREG32_MC(0x3B);
  310. seq_printf(m, "MC_GART_ERROR_ADDRESS 0x%08x\n", tmp);
  311. tmp = RREG32_MC(0x3C);
  312. seq_printf(m, "MC_GART_ERROR_ADDRESS_HI 0x%08x\n", tmp);
  313. tmp = RREG32_MC(0x30);
  314. seq_printf(m, "GART_ERROR_0 0x%08x\n", tmp);
  315. tmp = RREG32_MC(0x31);
  316. seq_printf(m, "GART_ERROR_1 0x%08x\n", tmp);
  317. tmp = RREG32_MC(0x32);
  318. seq_printf(m, "GART_ERROR_2 0x%08x\n", tmp);
  319. tmp = RREG32_MC(0x33);
  320. seq_printf(m, "GART_ERROR_3 0x%08x\n", tmp);
  321. tmp = RREG32_MC(0x34);
  322. seq_printf(m, "GART_ERROR_4 0x%08x\n", tmp);
  323. tmp = RREG32_MC(0x35);
  324. seq_printf(m, "GART_ERROR_5 0x%08x\n", tmp);
  325. tmp = RREG32_MC(0x36);
  326. seq_printf(m, "GART_ERROR_6 0x%08x\n", tmp);
  327. tmp = RREG32_MC(0x37);
  328. seq_printf(m, "GART_ERROR_7 0x%08x\n", tmp);
  329. return 0;
  330. }
  331. static struct drm_info_list rs400_gart_info_list[] = {
  332. {"rs400_gart_info", rs400_debugfs_gart_info, 0, NULL},
  333. };
  334. #endif
  335. static int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev)
  336. {
  337. #if defined(CONFIG_DEBUG_FS)
  338. return radeon_debugfs_add_files(rdev, rs400_gart_info_list, 1);
  339. #else
  340. return 0;
  341. #endif
  342. }
  343. void rs400_mc_program(struct radeon_device *rdev)
  344. {
  345. struct r100_mc_save save;
  346. /* Stops all mc clients */
  347. r100_mc_stop(rdev, &save);
  348. /* Wait for mc idle */
  349. if (rs400_mc_wait_for_idle(rdev))
  350. dev_warn(rdev->dev, "rs400: Wait MC idle timeout before updating MC.\n");
  351. WREG32(R_000148_MC_FB_LOCATION,
  352. S_000148_MC_FB_START(rdev->mc.vram_start >> 16) |
  353. S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16));
  354. r100_mc_resume(rdev, &save);
  355. }
  356. static int rs400_startup(struct radeon_device *rdev)
  357. {
  358. int r;
  359. rs400_mc_program(rdev);
  360. /* Resume clock */
  361. r300_clock_startup(rdev);
  362. /* Initialize GPU configuration (# pipes, ...) */
  363. rs400_gpu_init(rdev);
  364. r100_enable_bm(rdev);
  365. /* Initialize GART (initialize after TTM so we can allocate
  366. * memory through TTM but finalize after TTM) */
  367. r = rs400_gart_enable(rdev);
  368. if (r)
  369. return r;
  370. /* Enable IRQ */
  371. r100_irq_set(rdev);
  372. rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
  373. /* 1M ring buffer */
  374. r = r100_cp_init(rdev, 1024 * 1024);
  375. if (r) {
  376. dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
  377. return r;
  378. }
  379. r = r100_wb_init(rdev);
  380. if (r)
  381. dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
  382. r = r100_ib_init(rdev);
  383. if (r) {
  384. dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
  385. return r;
  386. }
  387. return 0;
  388. }
  389. int rs400_resume(struct radeon_device *rdev)
  390. {
  391. /* Make sur GART are not working */
  392. rs400_gart_disable(rdev);
  393. /* Resume clock before doing reset */
  394. r300_clock_startup(rdev);
  395. /* setup MC before calling post tables */
  396. rs400_mc_program(rdev);
  397. /* Reset gpu before posting otherwise ATOM will enter infinite loop */
  398. if (radeon_gpu_reset(rdev)) {
  399. dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
  400. RREG32(R_000E40_RBBM_STATUS),
  401. RREG32(R_0007C0_CP_STAT));
  402. }
  403. /* post */
  404. radeon_combios_asic_init(rdev->ddev);
  405. /* Resume clock after posting */
  406. r300_clock_startup(rdev);
  407. /* Initialize surface registers */
  408. radeon_surface_init(rdev);
  409. return rs400_startup(rdev);
  410. }
  411. int rs400_suspend(struct radeon_device *rdev)
  412. {
  413. r100_cp_disable(rdev);
  414. r100_wb_disable(rdev);
  415. r100_irq_disable(rdev);
  416. rs400_gart_disable(rdev);
  417. return 0;
  418. }
  419. void rs400_fini(struct radeon_device *rdev)
  420. {
  421. r100_cp_fini(rdev);
  422. r100_wb_fini(rdev);
  423. r100_ib_fini(rdev);
  424. radeon_gem_fini(rdev);
  425. rs400_gart_fini(rdev);
  426. radeon_irq_kms_fini(rdev);
  427. radeon_fence_driver_fini(rdev);
  428. radeon_bo_fini(rdev);
  429. radeon_atombios_fini(rdev);
  430. kfree(rdev->bios);
  431. rdev->bios = NULL;
  432. }
  433. int rs400_init(struct radeon_device *rdev)
  434. {
  435. int r;
  436. /* Disable VGA */
  437. r100_vga_render_disable(rdev);
  438. /* Initialize scratch registers */
  439. radeon_scratch_init(rdev);
  440. /* Initialize surface registers */
  441. radeon_surface_init(rdev);
  442. /* TODO: disable VGA need to use VGA request */
  443. /* BIOS*/
  444. if (!radeon_get_bios(rdev)) {
  445. if (ASIC_IS_AVIVO(rdev))
  446. return -EINVAL;
  447. }
  448. if (rdev->is_atom_bios) {
  449. dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n");
  450. return -EINVAL;
  451. } else {
  452. r = radeon_combios_init(rdev);
  453. if (r)
  454. return r;
  455. }
  456. /* Reset gpu before posting otherwise ATOM will enter infinite loop */
  457. if (radeon_gpu_reset(rdev)) {
  458. dev_warn(rdev->dev,
  459. "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
  460. RREG32(R_000E40_RBBM_STATUS),
  461. RREG32(R_0007C0_CP_STAT));
  462. }
  463. /* check if cards are posted or not */
  464. if (radeon_boot_test_post_card(rdev) == false)
  465. return -EINVAL;
  466. /* Initialize clocks */
  467. radeon_get_clock_info(rdev->ddev);
  468. /* Initialize power management */
  469. radeon_pm_init(rdev);
  470. /* initialize memory controller */
  471. rs400_mc_init(rdev);
  472. /* Fence driver */
  473. r = radeon_fence_driver_init(rdev);
  474. if (r)
  475. return r;
  476. r = radeon_irq_kms_init(rdev);
  477. if (r)
  478. return r;
  479. /* Memory manager */
  480. r = radeon_bo_init(rdev);
  481. if (r)
  482. return r;
  483. r = rs400_gart_init(rdev);
  484. if (r)
  485. return r;
  486. r300_set_reg_safe(rdev);
  487. rdev->accel_working = true;
  488. r = rs400_startup(rdev);
  489. if (r) {
  490. /* Somethings want wront with the accel init stop accel */
  491. dev_err(rdev->dev, "Disabling GPU acceleration\n");
  492. r100_cp_fini(rdev);
  493. r100_wb_fini(rdev);
  494. r100_ib_fini(rdev);
  495. rs400_gart_fini(rdev);
  496. radeon_irq_kms_fini(rdev);
  497. rdev->accel_working = false;
  498. }
  499. return 0;
  500. }