|
@@ -84,6 +84,53 @@ void evergreen_tiling_fields(unsigned tiling_flags, unsigned *bankw,
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+static int sumo_set_uvd_clock(struct radeon_device *rdev, u32 clock,
|
|
|
+ u32 cntl_reg, u32 status_reg)
|
|
|
+{
|
|
|
+ int r, i;
|
|
|
+ struct atom_clock_dividers dividers;
|
|
|
+
|
|
|
+ r = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
|
|
|
+ clock, false, ÷rs);
|
|
|
+ if (r)
|
|
|
+ return r;
|
|
|
+
|
|
|
+ WREG32_P(cntl_reg, dividers.post_div, ~(DCLK_DIR_CNTL_EN|DCLK_DIVIDER_MASK));
|
|
|
+
|
|
|
+ for (i = 0; i < 100; i++) {
|
|
|
+ if (RREG32(status_reg) & DCLK_STATUS)
|
|
|
+ break;
|
|
|
+ mdelay(10);
|
|
|
+ }
|
|
|
+ if (i == 100)
|
|
|
+ return -ETIMEDOUT;
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+int sumo_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
|
|
|
+{
|
|
|
+ int r = 0;
|
|
|
+ u32 cg_scratch = RREG32(CG_SCRATCH1);
|
|
|
+
|
|
|
+ r = sumo_set_uvd_clock(rdev, vclk, CG_VCLK_CNTL, CG_VCLK_STATUS);
|
|
|
+ if (r)
|
|
|
+ goto done;
|
|
|
+ cg_scratch &= 0xffff0000;
|
|
|
+ cg_scratch |= vclk / 100; /* Mhz */
|
|
|
+
|
|
|
+ r = sumo_set_uvd_clock(rdev, dclk, CG_DCLK_CNTL, CG_DCLK_STATUS);
|
|
|
+ if (r)
|
|
|
+ goto done;
|
|
|
+ cg_scratch &= 0x0000ffff;
|
|
|
+ cg_scratch |= (dclk / 100) << 16; /* Mhz */
|
|
|
+
|
|
|
+done:
|
|
|
+ WREG32(CG_SCRATCH1, cg_scratch);
|
|
|
+
|
|
|
+ return r;
|
|
|
+}
|
|
|
+
|
|
|
void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
|
|
|
{
|
|
|
u16 ctl, v;
|