|
@@ -1670,6 +1670,8 @@ int rv6xx_dpm_set_power_state(struct radeon_device *rdev)
|
|
|
struct radeon_ps *old_ps = rdev->pm.dpm.current_ps;
|
|
|
int ret;
|
|
|
|
|
|
+ pi->restricted_levels = 0;
|
|
|
+
|
|
|
rv6xx_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps);
|
|
|
|
|
|
rv6xx_clear_vc(rdev);
|
|
@@ -1756,6 +1758,8 @@ int rv6xx_dpm_set_power_state(struct radeon_device *rdev)
|
|
|
|
|
|
rv6xx_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps);
|
|
|
|
|
|
+ rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO;
|
|
|
+
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -2085,3 +2089,34 @@ u32 rv6xx_dpm_get_mclk(struct radeon_device *rdev, bool low)
|
|
|
else
|
|
|
return requested_state->high.mclk;
|
|
|
}
|
|
|
+
|
|
|
+int rv6xx_dpm_force_performance_level(struct radeon_device *rdev,
|
|
|
+ enum radeon_dpm_forced_level level)
|
|
|
+{
|
|
|
+ struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
|
|
|
+
|
|
|
+ if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
|
|
|
+ pi->restricted_levels = 3;
|
|
|
+ } else if (level == RADEON_DPM_FORCED_LEVEL_LOW) {
|
|
|
+ pi->restricted_levels = 2;
|
|
|
+ } else {
|
|
|
+ pi->restricted_levels = 0;
|
|
|
+ }
|
|
|
+
|
|
|
+ rv6xx_clear_vc(rdev);
|
|
|
+ r600_power_level_enable(rdev, R600_POWER_LEVEL_LOW, true);
|
|
|
+ r600_set_at(rdev, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF);
|
|
|
+ r600_wait_for_power_level(rdev, R600_POWER_LEVEL_LOW);
|
|
|
+ r600_power_level_enable(rdev, R600_POWER_LEVEL_HIGH, false);
|
|
|
+ r600_power_level_enable(rdev, R600_POWER_LEVEL_MEDIUM, false);
|
|
|
+ rv6xx_enable_medium(rdev);
|
|
|
+ rv6xx_enable_high(rdev);
|
|
|
+ if (pi->restricted_levels == 3)
|
|
|
+ r600_power_level_enable(rdev, R600_POWER_LEVEL_LOW, false);
|
|
|
+ rv6xx_program_vc(rdev);
|
|
|
+ rv6xx_program_at(rdev);
|
|
|
+
|
|
|
+ rdev->pm.dpm.forced_level = level;
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|