rs600.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693
  1. /*
  2. * Copyright 2008 Advanced Micro Devices, Inc.
  3. * Copyright 2008 Red Hat Inc.
  4. * Copyright 2009 Jerome Glisse.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the "Software"),
  8. * to deal in the Software without restriction, including without limitation
  9. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10. * and/or sell copies of the Software, and to permit persons to whom the
  11. * Software is furnished to do so, subject to the following conditions:
  12. *
  13. * The above copyright notice and this permission notice shall be included in
  14. * all copies or substantial portions of the Software.
  15. *
  16. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22. * OTHER DEALINGS IN THE SOFTWARE.
  23. *
  24. * Authors: Dave Airlie
  25. * Alex Deucher
  26. * Jerome Glisse
  27. */
  28. /* RS600 / Radeon X1250/X1270 integrated GPU
  29. *
  30. * This file gather function specific to RS600 which is the IGP of
  31. * the X1250/X1270 family supporting intel CPU (while RS690/RS740
  32. * is the X1250/X1270 supporting AMD CPU). The display engine are
  33. * the avivo one, bios is an atombios, 3D block are the one of the
  34. * R4XX family. The GART is different from the RS400 one and is very
  35. * close to the one of the R600 family (R600 likely being an evolution
  36. * of the RS600 GART block).
  37. */
  38. #include "drmP.h"
  39. #include "radeon.h"
  40. #include "atom.h"
  41. #include "rs600d.h"
  42. #include "rs600_reg_safe.h"
  43. void rs600_gpu_init(struct radeon_device *rdev);
  44. int rs600_mc_wait_for_idle(struct radeon_device *rdev);
  45. int rs600_mc_init(struct radeon_device *rdev)
  46. {
  47. /* read back the MC value from the hw */
  48. int r;
  49. u32 tmp;
  50. /* Setup GPU memory space */
  51. tmp = RREG32_MC(R_000004_MC_FB_LOCATION);
  52. rdev->mc.vram_location = G_000004_MC_FB_START(tmp) << 16;
  53. rdev->mc.gtt_location = 0xffffffffUL;
  54. r = radeon_mc_setup(rdev);
  55. if (r)
  56. return r;
  57. return 0;
  58. }
  59. /* hpd for digital panel detect/disconnect */
  60. bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
  61. {
  62. u32 tmp;
  63. bool connected = false;
  64. switch (hpd) {
  65. case RADEON_HPD_1:
  66. tmp = RREG32(R_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS);
  67. if (G_007D04_DC_HOT_PLUG_DETECT1_SENSE(tmp))
  68. connected = true;
  69. break;
  70. case RADEON_HPD_2:
  71. tmp = RREG32(R_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS);
  72. if (G_007D14_DC_HOT_PLUG_DETECT2_SENSE(tmp))
  73. connected = true;
  74. break;
  75. default:
  76. break;
  77. }
  78. return connected;
  79. }
  80. void rs600_hpd_set_polarity(struct radeon_device *rdev,
  81. enum radeon_hpd_id hpd)
  82. {
  83. u32 tmp;
  84. bool connected = rs600_hpd_sense(rdev, hpd);
  85. switch (hpd) {
  86. case RADEON_HPD_1:
  87. tmp = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL);
  88. if (connected)
  89. tmp &= ~S_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(1);
  90. else
  91. tmp |= S_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(1);
  92. WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
  93. break;
  94. case RADEON_HPD_2:
  95. tmp = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL);
  96. if (connected)
  97. tmp &= ~S_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(1);
  98. else
  99. tmp |= S_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(1);
  100. WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
  101. break;
  102. default:
  103. break;
  104. }
  105. }
  106. void rs600_hpd_init(struct radeon_device *rdev)
  107. {
  108. struct drm_device *dev = rdev->ddev;
  109. struct drm_connector *connector;
  110. list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
  111. struct radeon_connector *radeon_connector = to_radeon_connector(connector);
  112. switch (radeon_connector->hpd.hpd) {
  113. case RADEON_HPD_1:
  114. WREG32(R_007D00_DC_HOT_PLUG_DETECT1_CONTROL,
  115. S_007D00_DC_HOT_PLUG_DETECT1_EN(1));
  116. rdev->irq.hpd[0] = true;
  117. break;
  118. case RADEON_HPD_2:
  119. WREG32(R_007D10_DC_HOT_PLUG_DETECT2_CONTROL,
  120. S_007D10_DC_HOT_PLUG_DETECT2_EN(1));
  121. rdev->irq.hpd[1] = true;
  122. break;
  123. default:
  124. break;
  125. }
  126. }
  127. rs600_irq_set(rdev);
  128. }
  129. void rs600_hpd_fini(struct radeon_device *rdev)
  130. {
  131. struct drm_device *dev = rdev->ddev;
  132. struct drm_connector *connector;
  133. list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
  134. struct radeon_connector *radeon_connector = to_radeon_connector(connector);
  135. switch (radeon_connector->hpd.hpd) {
  136. case RADEON_HPD_1:
  137. WREG32(R_007D00_DC_HOT_PLUG_DETECT1_CONTROL,
  138. S_007D00_DC_HOT_PLUG_DETECT1_EN(0));
  139. rdev->irq.hpd[0] = false;
  140. break;
  141. case RADEON_HPD_2:
  142. WREG32(R_007D10_DC_HOT_PLUG_DETECT2_CONTROL,
  143. S_007D10_DC_HOT_PLUG_DETECT2_EN(0));
  144. rdev->irq.hpd[1] = false;
  145. break;
  146. default:
  147. break;
  148. }
  149. }
  150. }
  151. /*
  152. * GART.
  153. */
  154. void rs600_gart_tlb_flush(struct radeon_device *rdev)
  155. {
  156. uint32_t tmp;
  157. tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
  158. tmp &= C_000100_INVALIDATE_ALL_L1_TLBS & C_000100_INVALIDATE_L2_CACHE;
  159. WREG32_MC(R_000100_MC_PT0_CNTL, tmp);
  160. tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
  161. tmp |= S_000100_INVALIDATE_ALL_L1_TLBS(1) & S_000100_INVALIDATE_L2_CACHE(1);
  162. WREG32_MC(R_000100_MC_PT0_CNTL, tmp);
  163. tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
  164. tmp &= C_000100_INVALIDATE_ALL_L1_TLBS & C_000100_INVALIDATE_L2_CACHE;
  165. WREG32_MC(R_000100_MC_PT0_CNTL, tmp);
  166. tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
  167. }
  168. int rs600_gart_init(struct radeon_device *rdev)
  169. {
  170. int r;
  171. if (rdev->gart.table.vram.robj) {
  172. WARN(1, "RS600 GART already initialized.\n");
  173. return 0;
  174. }
  175. /* Initialize common gart structure */
  176. r = radeon_gart_init(rdev);
  177. if (r) {
  178. return r;
  179. }
  180. rdev->gart.table_size = rdev->gart.num_gpu_pages * 8;
  181. return radeon_gart_table_vram_alloc(rdev);
  182. }
  183. int rs600_gart_enable(struct radeon_device *rdev)
  184. {
  185. u32 tmp;
  186. int r, i;
  187. if (rdev->gart.table.vram.robj == NULL) {
  188. dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
  189. return -EINVAL;
  190. }
  191. r = radeon_gart_table_vram_pin(rdev);
  192. if (r)
  193. return r;
  194. /* Enable bus master */
  195. tmp = RREG32(R_00004C_BUS_CNTL) & C_00004C_BUS_MASTER_DIS;
  196. WREG32(R_00004C_BUS_CNTL, tmp);
  197. /* FIXME: setup default page */
  198. WREG32_MC(R_000100_MC_PT0_CNTL,
  199. (S_000100_EFFECTIVE_L2_CACHE_SIZE(6) |
  200. S_000100_EFFECTIVE_L2_QUEUE_SIZE(6)));
  201. for (i = 0; i < 19; i++) {
  202. WREG32_MC(R_00016C_MC_PT0_CLIENT0_CNTL + i,
  203. S_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE(1) |
  204. S_00016C_SYSTEM_ACCESS_MODE_MASK(
  205. V_00016C_SYSTEM_ACCESS_MODE_NOT_IN_SYS) |
  206. S_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS(
  207. V_00016C_SYSTEM_APERTURE_UNMAPPED_PASSTHROUGH) |
  208. S_00016C_EFFECTIVE_L1_CACHE_SIZE(3) |
  209. S_00016C_ENABLE_FRAGMENT_PROCESSING(1) |
  210. S_00016C_EFFECTIVE_L1_QUEUE_SIZE(3));
  211. }
  212. /* enable first context */
  213. WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL,
  214. S_000102_ENABLE_PAGE_TABLE(1) |
  215. S_000102_PAGE_TABLE_DEPTH(V_000102_PAGE_TABLE_FLAT));
  216. /* disable all other contexts */
  217. for (i = 1; i < 8; i++)
  218. WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL + i, 0);
  219. /* setup the page table */
  220. WREG32_MC(R_00012C_MC_PT0_CONTEXT0_FLAT_BASE_ADDR,
  221. rdev->gart.table_addr);
  222. WREG32_MC(R_00013C_MC_PT0_CONTEXT0_FLAT_START_ADDR, rdev->mc.gtt_start);
  223. WREG32_MC(R_00014C_MC_PT0_CONTEXT0_FLAT_END_ADDR, rdev->mc.gtt_end);
  224. WREG32_MC(R_00011C_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR, 0);
  225. /* System context maps to VRAM space */
  226. WREG32_MC(R_000112_MC_PT0_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start);
  227. WREG32_MC(R_000114_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end);
  228. /* enable page tables */
  229. tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
  230. WREG32_MC(R_000100_MC_PT0_CNTL, (tmp | S_000100_ENABLE_PT(1)));
  231. tmp = RREG32_MC(R_000009_MC_CNTL1);
  232. WREG32_MC(R_000009_MC_CNTL1, (tmp | S_000009_ENABLE_PAGE_TABLES(1)));
  233. rs600_gart_tlb_flush(rdev);
  234. rdev->gart.ready = true;
  235. return 0;
  236. }
  237. void rs600_gart_disable(struct radeon_device *rdev)
  238. {
  239. u32 tmp;
  240. int r;
  241. /* FIXME: disable out of gart access */
  242. WREG32_MC(R_000100_MC_PT0_CNTL, 0);
  243. tmp = RREG32_MC(R_000009_MC_CNTL1);
  244. WREG32_MC(R_000009_MC_CNTL1, tmp & C_000009_ENABLE_PAGE_TABLES);
  245. if (rdev->gart.table.vram.robj) {
  246. r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
  247. if (r == 0) {
  248. radeon_bo_kunmap(rdev->gart.table.vram.robj);
  249. radeon_bo_unpin(rdev->gart.table.vram.robj);
  250. radeon_bo_unreserve(rdev->gart.table.vram.robj);
  251. }
  252. }
  253. }
  254. void rs600_gart_fini(struct radeon_device *rdev)
  255. {
  256. rs600_gart_disable(rdev);
  257. radeon_gart_table_vram_free(rdev);
  258. radeon_gart_fini(rdev);
  259. }
  260. #define R600_PTE_VALID (1 << 0)
  261. #define R600_PTE_SYSTEM (1 << 1)
  262. #define R600_PTE_SNOOPED (1 << 2)
  263. #define R600_PTE_READABLE (1 << 5)
  264. #define R600_PTE_WRITEABLE (1 << 6)
  265. int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
  266. {
  267. void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
  268. if (i < 0 || i > rdev->gart.num_gpu_pages) {
  269. return -EINVAL;
  270. }
  271. addr = addr & 0xFFFFFFFFFFFFF000ULL;
  272. addr |= R600_PTE_VALID | R600_PTE_SYSTEM | R600_PTE_SNOOPED;
  273. addr |= R600_PTE_READABLE | R600_PTE_WRITEABLE;
  274. writeq(addr, ((void __iomem *)ptr) + (i * 8));
  275. return 0;
  276. }
  277. int rs600_irq_set(struct radeon_device *rdev)
  278. {
  279. uint32_t tmp = 0;
  280. uint32_t mode_int = 0;
  281. u32 hpd1 = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL) &
  282. ~S_007D08_DC_HOT_PLUG_DETECT1_INT_EN(1);
  283. u32 hpd2 = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL) &
  284. ~S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1);
  285. if (rdev->irq.sw_int) {
  286. tmp |= S_000040_SW_INT_EN(1);
  287. }
  288. if (rdev->irq.crtc_vblank_int[0]) {
  289. mode_int |= S_006540_D1MODE_VBLANK_INT_MASK(1);
  290. }
  291. if (rdev->irq.crtc_vblank_int[1]) {
  292. mode_int |= S_006540_D2MODE_VBLANK_INT_MASK(1);
  293. }
  294. if (rdev->irq.hpd[0]) {
  295. hpd1 |= S_007D08_DC_HOT_PLUG_DETECT1_INT_EN(1);
  296. }
  297. if (rdev->irq.hpd[1]) {
  298. hpd2 |= S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1);
  299. }
  300. WREG32(R_000040_GEN_INT_CNTL, tmp);
  301. WREG32(R_006540_DxMODE_INT_MASK, mode_int);
  302. WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
  303. WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
  304. return 0;
  305. }
  306. static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_int)
  307. {
  308. uint32_t irqs = RREG32(R_000044_GEN_INT_STATUS);
  309. uint32_t irq_mask = ~C_000044_SW_INT;
  310. u32 tmp;
  311. if (G_000044_DISPLAY_INT_STAT(irqs)) {
  312. *r500_disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS);
  313. if (G_007EDC_LB_D1_VBLANK_INTERRUPT(*r500_disp_int)) {
  314. WREG32(R_006534_D1MODE_VBLANK_STATUS,
  315. S_006534_D1MODE_VBLANK_ACK(1));
  316. }
  317. if (G_007EDC_LB_D2_VBLANK_INTERRUPT(*r500_disp_int)) {
  318. WREG32(R_006D34_D2MODE_VBLANK_STATUS,
  319. S_006D34_D2MODE_VBLANK_ACK(1));
  320. }
  321. if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(*r500_disp_int)) {
  322. tmp = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL);
  323. tmp |= S_007D08_DC_HOT_PLUG_DETECT1_INT_ACK(1);
  324. WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
  325. }
  326. if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(*r500_disp_int)) {
  327. tmp = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL);
  328. tmp |= S_007D18_DC_HOT_PLUG_DETECT2_INT_ACK(1);
  329. WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
  330. }
  331. } else {
  332. *r500_disp_int = 0;
  333. }
  334. if (irqs) {
  335. WREG32(R_000044_GEN_INT_STATUS, irqs);
  336. }
  337. return irqs & irq_mask;
  338. }
  339. void rs600_irq_disable(struct radeon_device *rdev)
  340. {
  341. u32 tmp;
  342. WREG32(R_000040_GEN_INT_CNTL, 0);
  343. WREG32(R_006540_DxMODE_INT_MASK, 0);
  344. /* Wait and acknowledge irq */
  345. mdelay(1);
  346. rs600_irq_ack(rdev, &tmp);
  347. }
  348. int rs600_irq_process(struct radeon_device *rdev)
  349. {
  350. uint32_t status, msi_rearm;
  351. uint32_t r500_disp_int;
  352. bool queue_hotplug = false;
  353. status = rs600_irq_ack(rdev, &r500_disp_int);
  354. if (!status && !r500_disp_int) {
  355. return IRQ_NONE;
  356. }
  357. while (status || r500_disp_int) {
  358. /* SW interrupt */
  359. if (G_000040_SW_INT_EN(status))
  360. radeon_fence_process(rdev);
  361. /* Vertical blank interrupts */
  362. if (G_007EDC_LB_D1_VBLANK_INTERRUPT(r500_disp_int))
  363. drm_handle_vblank(rdev->ddev, 0);
  364. if (G_007EDC_LB_D2_VBLANK_INTERRUPT(r500_disp_int))
  365. drm_handle_vblank(rdev->ddev, 1);
  366. if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(r500_disp_int)) {
  367. queue_hotplug = true;
  368. DRM_DEBUG("HPD1\n");
  369. }
  370. if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(r500_disp_int)) {
  371. queue_hotplug = true;
  372. DRM_DEBUG("HPD2\n");
  373. }
  374. status = rs600_irq_ack(rdev, &r500_disp_int);
  375. }
  376. if (queue_hotplug)
  377. queue_work(rdev->wq, &rdev->hotplug_work);
  378. if (rdev->msi_enabled) {
  379. switch (rdev->family) {
  380. case CHIP_RS600:
  381. case CHIP_RS690:
  382. case CHIP_RS740:
  383. msi_rearm = RREG32(RADEON_BUS_CNTL) & ~RS600_MSI_REARM;
  384. WREG32(RADEON_BUS_CNTL, msi_rearm);
  385. WREG32(RADEON_BUS_CNTL, msi_rearm | RS600_MSI_REARM);
  386. break;
  387. default:
  388. msi_rearm = RREG32(RADEON_MSI_REARM_EN) & ~RV370_MSI_REARM_EN;
  389. WREG32(RADEON_MSI_REARM_EN, msi_rearm);
  390. WREG32(RADEON_MSI_REARM_EN, msi_rearm | RV370_MSI_REARM_EN);
  391. break;
  392. }
  393. }
  394. return IRQ_HANDLED;
  395. }
  396. u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc)
  397. {
  398. if (crtc == 0)
  399. return RREG32(R_0060A4_D1CRTC_STATUS_FRAME_COUNT);
  400. else
  401. return RREG32(R_0068A4_D2CRTC_STATUS_FRAME_COUNT);
  402. }
  403. int rs600_mc_wait_for_idle(struct radeon_device *rdev)
  404. {
  405. unsigned i;
  406. for (i = 0; i < rdev->usec_timeout; i++) {
  407. if (G_000000_MC_IDLE(RREG32_MC(R_000000_MC_STATUS)))
  408. return 0;
  409. udelay(1);
  410. }
  411. return -1;
  412. }
  413. void rs600_gpu_init(struct radeon_device *rdev)
  414. {
  415. r100_hdp_reset(rdev);
  416. r420_pipes_init(rdev);
  417. /* Wait for mc idle */
  418. if (rs600_mc_wait_for_idle(rdev))
  419. dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
  420. }
  421. void rs600_vram_info(struct radeon_device *rdev)
  422. {
  423. rdev->mc.vram_is_ddr = true;
  424. rdev->mc.vram_width = 128;
  425. rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
  426. rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
  427. rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
  428. rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
  429. if (rdev->mc.mc_vram_size > rdev->mc.aper_size)
  430. rdev->mc.mc_vram_size = rdev->mc.aper_size;
  431. if (rdev->mc.real_vram_size > rdev->mc.aper_size)
  432. rdev->mc.real_vram_size = rdev->mc.aper_size;
  433. }
  434. void rs600_bandwidth_update(struct radeon_device *rdev)
  435. {
  436. /* FIXME: implement, should this be like rs690 ? */
  437. }
  438. uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg)
  439. {
  440. WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) |
  441. S_000070_MC_IND_CITF_ARB0(1));
  442. return RREG32(R_000074_MC_IND_DATA);
  443. }
  444. void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
  445. {
  446. WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) |
  447. S_000070_MC_IND_CITF_ARB0(1) | S_000070_MC_IND_WR_EN(1));
  448. WREG32(R_000074_MC_IND_DATA, v);
  449. }
  450. void rs600_debugfs(struct radeon_device *rdev)
  451. {
  452. if (r100_debugfs_rbbm_init(rdev))
  453. DRM_ERROR("Failed to register debugfs file for RBBM !\n");
  454. }
  455. void rs600_set_safe_registers(struct radeon_device *rdev)
  456. {
  457. rdev->config.r300.reg_safe_bm = rs600_reg_safe_bm;
  458. rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(rs600_reg_safe_bm);
  459. }
  460. static void rs600_mc_program(struct radeon_device *rdev)
  461. {
  462. struct rv515_mc_save save;
  463. /* Stops all mc clients */
  464. rv515_mc_stop(rdev, &save);
  465. /* Wait for mc idle */
  466. if (rs600_mc_wait_for_idle(rdev))
  467. dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
  468. /* FIXME: What does AGP means for such chipset ? */
  469. WREG32_MC(R_000005_MC_AGP_LOCATION, 0x0FFFFFFF);
  470. WREG32_MC(R_000006_AGP_BASE, 0);
  471. WREG32_MC(R_000007_AGP_BASE_2, 0);
  472. /* Program MC */
  473. WREG32_MC(R_000004_MC_FB_LOCATION,
  474. S_000004_MC_FB_START(rdev->mc.vram_start >> 16) |
  475. S_000004_MC_FB_TOP(rdev->mc.vram_end >> 16));
  476. WREG32(R_000134_HDP_FB_LOCATION,
  477. S_000134_HDP_FB_START(rdev->mc.vram_start >> 16));
  478. rv515_mc_resume(rdev, &save);
  479. }
  480. static int rs600_startup(struct radeon_device *rdev)
  481. {
  482. int r;
  483. rs600_mc_program(rdev);
  484. /* Resume clock */
  485. rv515_clock_startup(rdev);
  486. /* Initialize GPU configuration (# pipes, ...) */
  487. rs600_gpu_init(rdev);
  488. /* Initialize GART (initialize after TTM so we can allocate
  489. * memory through TTM but finalize after TTM) */
  490. r = rs600_gart_enable(rdev);
  491. if (r)
  492. return r;
  493. /* Enable IRQ */
  494. rs600_irq_set(rdev);
  495. /* 1M ring buffer */
  496. r = r100_cp_init(rdev, 1024 * 1024);
  497. if (r) {
  498. dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
  499. return r;
  500. }
  501. r = r100_wb_init(rdev);
  502. if (r)
  503. dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
  504. r = r100_ib_init(rdev);
  505. if (r) {
  506. dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
  507. return r;
  508. }
  509. return 0;
  510. }
  511. int rs600_resume(struct radeon_device *rdev)
  512. {
  513. /* Make sur GART are not working */
  514. rs600_gart_disable(rdev);
  515. /* Resume clock before doing reset */
  516. rv515_clock_startup(rdev);
  517. /* Reset gpu before posting otherwise ATOM will enter infinite loop */
  518. if (radeon_gpu_reset(rdev)) {
  519. dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
  520. RREG32(R_000E40_RBBM_STATUS),
  521. RREG32(R_0007C0_CP_STAT));
  522. }
  523. /* post */
  524. atom_asic_init(rdev->mode_info.atom_context);
  525. /* Resume clock after posting */
  526. rv515_clock_startup(rdev);
  527. /* Initialize surface registers */
  528. radeon_surface_init(rdev);
  529. return rs600_startup(rdev);
  530. }
  531. int rs600_suspend(struct radeon_device *rdev)
  532. {
  533. r100_cp_disable(rdev);
  534. r100_wb_disable(rdev);
  535. rs600_irq_disable(rdev);
  536. rs600_gart_disable(rdev);
  537. return 0;
  538. }
  539. void rs600_fini(struct radeon_device *rdev)
  540. {
  541. rs600_suspend(rdev);
  542. r100_cp_fini(rdev);
  543. r100_wb_fini(rdev);
  544. r100_ib_fini(rdev);
  545. radeon_gem_fini(rdev);
  546. rs600_gart_fini(rdev);
  547. radeon_irq_kms_fini(rdev);
  548. radeon_fence_driver_fini(rdev);
  549. radeon_bo_fini(rdev);
  550. radeon_atombios_fini(rdev);
  551. kfree(rdev->bios);
  552. rdev->bios = NULL;
  553. }
  554. int rs600_init(struct radeon_device *rdev)
  555. {
  556. int r;
  557. /* Disable VGA */
  558. rv515_vga_render_disable(rdev);
  559. /* Initialize scratch registers */
  560. radeon_scratch_init(rdev);
  561. /* Initialize surface registers */
  562. radeon_surface_init(rdev);
  563. /* BIOS */
  564. if (!radeon_get_bios(rdev)) {
  565. if (ASIC_IS_AVIVO(rdev))
  566. return -EINVAL;
  567. }
  568. if (rdev->is_atom_bios) {
  569. r = radeon_atombios_init(rdev);
  570. if (r)
  571. return r;
  572. } else {
  573. dev_err(rdev->dev, "Expecting atombios for RS600 GPU\n");
  574. return -EINVAL;
  575. }
  576. /* Reset gpu before posting otherwise ATOM will enter infinite loop */
  577. if (radeon_gpu_reset(rdev)) {
  578. dev_warn(rdev->dev,
  579. "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
  580. RREG32(R_000E40_RBBM_STATUS),
  581. RREG32(R_0007C0_CP_STAT));
  582. }
  583. /* check if cards are posted or not */
  584. if (radeon_boot_test_post_card(rdev) == false)
  585. return -EINVAL;
  586. /* Initialize clocks */
  587. radeon_get_clock_info(rdev->ddev);
  588. /* Initialize power management */
  589. radeon_pm_init(rdev);
  590. /* Get vram informations */
  591. rs600_vram_info(rdev);
  592. /* Initialize memory controller (also test AGP) */
  593. r = rs600_mc_init(rdev);
  594. if (r)
  595. return r;
  596. rs600_debugfs(rdev);
  597. /* Fence driver */
  598. r = radeon_fence_driver_init(rdev);
  599. if (r)
  600. return r;
  601. r = radeon_irq_kms_init(rdev);
  602. if (r)
  603. return r;
  604. /* Memory manager */
  605. r = radeon_bo_init(rdev);
  606. if (r)
  607. return r;
  608. r = rs600_gart_init(rdev);
  609. if (r)
  610. return r;
  611. rs600_set_safe_registers(rdev);
  612. rdev->accel_working = true;
  613. r = rs600_startup(rdev);
  614. if (r) {
  615. /* Somethings want wront with the accel init stop accel */
  616. dev_err(rdev->dev, "Disabling GPU acceleration\n");
  617. rs600_suspend(rdev);
  618. r100_cp_fini(rdev);
  619. r100_wb_fini(rdev);
  620. r100_ib_fini(rdev);
  621. rs600_gart_fini(rdev);
  622. radeon_irq_kms_fini(rdev);
  623. rdev->accel_working = false;
  624. }
  625. return 0;
  626. }