|
@@ -440,14 +440,11 @@ int radeon_fence_wait_any(struct radeon_device *rdev,
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+/* caller must hold ring lock */
|
|
|
int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring)
|
|
|
{
|
|
|
uint64_t seq;
|
|
|
|
|
|
- /* We are not protected by ring lock when reading current seq but
|
|
|
- * it's ok as worst case is we return to early while we could have
|
|
|
- * wait.
|
|
|
- */
|
|
|
seq = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL;
|
|
|
if (seq >= rdev->fence_drv[ring].sync_seq[ring]) {
|
|
|
/* nothing to wait for, last_seq is
|
|
@@ -457,15 +454,27 @@ int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring)
|
|
|
return radeon_fence_wait_seq(rdev, seq, ring, false, false);
|
|
|
}
|
|
|
|
|
|
-int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring)
|
|
|
+/* caller must hold ring lock */
|
|
|
+void radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring)
|
|
|
{
|
|
|
- /* We are not protected by ring lock when reading current seq
|
|
|
- * but it's ok as wait empty is call from place where no more
|
|
|
- * activity can be scheduled so there won't be concurrent access
|
|
|
- * to seq value.
|
|
|
- */
|
|
|
- return radeon_fence_wait_seq(rdev, rdev->fence_drv[ring].sync_seq[ring],
|
|
|
- ring, false, false);
|
|
|
+ uint64_t seq = rdev->fence_drv[ring].sync_seq[ring];
|
|
|
+
|
|
|
+ while(1) {
|
|
|
+ int r;
|
|
|
+ r = radeon_fence_wait_seq(rdev, seq, ring, false, false);
|
|
|
+ if (r == -EDEADLK) {
|
|
|
+ mutex_unlock(&rdev->ring_lock);
|
|
|
+ r = radeon_gpu_reset(rdev);
|
|
|
+ mutex_lock(&rdev->ring_lock);
|
|
|
+ if (!r)
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+ if (r) {
|
|
|
+ dev_err(rdev->dev, "error waiting for ring to become"
|
|
|
+ " idle (%d)\n", r);
|
|
|
+ }
|
|
|
+ return;
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence)
|