|
@@ -699,11 +699,19 @@ static void radeon_uvd_idle_work_handler(struct work_struct *work)
|
|
|
struct radeon_device *rdev =
|
|
|
container_of(work, struct radeon_device, uvd.idle_work.work);
|
|
|
|
|
|
- if (radeon_fence_count_emitted(rdev, R600_RING_TYPE_UVD_INDEX) == 0)
|
|
|
- radeon_set_uvd_clocks(rdev, 0, 0);
|
|
|
- else
|
|
|
+ if (radeon_fence_count_emitted(rdev, R600_RING_TYPE_UVD_INDEX) == 0) {
|
|
|
+ if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
|
|
|
+ mutex_lock(&rdev->pm.mutex);
|
|
|
+ rdev->pm.dpm.uvd_active = false;
|
|
|
+ mutex_unlock(&rdev->pm.mutex);
|
|
|
+ radeon_pm_compute_clocks(rdev);
|
|
|
+ } else {
|
|
|
+ radeon_set_uvd_clocks(rdev, 0, 0);
|
|
|
+ }
|
|
|
+ } else {
|
|
|
schedule_delayed_work(&rdev->uvd.idle_work,
|
|
|
msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS));
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
void radeon_uvd_note_usage(struct radeon_device *rdev)
|
|
@@ -711,8 +719,14 @@ void radeon_uvd_note_usage(struct radeon_device *rdev)
|
|
|
bool set_clocks = !cancel_delayed_work_sync(&rdev->uvd.idle_work);
|
|
|
set_clocks &= schedule_delayed_work(&rdev->uvd.idle_work,
|
|
|
msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS));
|
|
|
- if (set_clocks)
|
|
|
- radeon_set_uvd_clocks(rdev, 53300, 40000);
|
|
|
+ if (set_clocks) {
|
|
|
+ if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
|
|
|
+ /* XXX pick SD/HD/MVC */
|
|
|
+ radeon_dpm_enable_power_state(rdev, POWER_STATE_TYPE_INTERNAL_UVD);
|
|
|
+ } else {
|
|
|
+ radeon_set_uvd_clocks(rdev, 53300, 40000);
|
|
|
+ }
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
static unsigned radeon_uvd_calc_upll_post_div(unsigned vco_freq,
|