|
@@ -460,9 +460,11 @@ bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish)
|
|
static void radeon_pm_idle_work_handler(struct work_struct *work)
|
|
static void radeon_pm_idle_work_handler(struct work_struct *work)
|
|
{
|
|
{
|
|
struct radeon_device *rdev;
|
|
struct radeon_device *rdev;
|
|
|
|
+ int resched;
|
|
rdev = container_of(work, struct radeon_device,
|
|
rdev = container_of(work, struct radeon_device,
|
|
pm.idle_work.work);
|
|
pm.idle_work.work);
|
|
|
|
|
|
|
|
+ resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
|
|
mutex_lock(&rdev->ddev->struct_mutex);
|
|
mutex_lock(&rdev->ddev->struct_mutex);
|
|
mutex_lock(&rdev->pm.mutex);
|
|
mutex_lock(&rdev->pm.mutex);
|
|
if (rdev->pm.state == PM_STATE_ACTIVE) {
|
|
if (rdev->pm.state == PM_STATE_ACTIVE) {
|
|
@@ -509,6 +511,7 @@ static void radeon_pm_idle_work_handler(struct work_struct *work)
|
|
}
|
|
}
|
|
mutex_unlock(&rdev->pm.mutex);
|
|
mutex_unlock(&rdev->pm.mutex);
|
|
mutex_unlock(&rdev->ddev->struct_mutex);
|
|
mutex_unlock(&rdev->ddev->struct_mutex);
|
|
|
|
+ ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
|
|
|
|
|
|
queue_delayed_work(rdev->wq, &rdev->pm.idle_work,
|
|
queue_delayed_work(rdev->wq, &rdev->pm.idle_work,
|
|
msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
|
|
msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
|