|
@@ -1952,8 +1952,7 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
|
|
|
* request and object lists appropriately for that event.
|
|
|
*/
|
|
|
int
|
|
|
-i915_wait_request(struct intel_ring_buffer *ring,
|
|
|
- uint32_t seqno)
|
|
|
+i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
|
|
|
{
|
|
|
drm_i915_private_t *dev_priv = ring->dev->dev_private;
|
|
|
int ret = 0;
|
|
@@ -1991,7 +1990,7 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
|
|
|
* it.
|
|
|
*/
|
|
|
if (obj->active) {
|
|
|
- ret = i915_wait_request(obj->ring, obj->last_rendering_seqno);
|
|
|
+ ret = i915_wait_seqno(obj->ring, obj->last_rendering_seqno);
|
|
|
if (ret)
|
|
|
return ret;
|
|
|
i915_gem_retire_requests_ring(obj->ring);
|
|
@@ -2264,7 +2263,7 @@ static int i915_ring_idle(struct intel_ring_buffer *ring)
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
- return i915_wait_request(ring, i915_gem_next_request_seqno(ring));
|
|
|
+ return i915_wait_seqno(ring, i915_gem_next_request_seqno(ring));
|
|
|
}
|
|
|
|
|
|
int i915_gpu_idle(struct drm_device *dev)
|
|
@@ -2468,7 +2467,7 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj)
|
|
|
}
|
|
|
|
|
|
if (obj->last_fenced_seqno) {
|
|
|
- ret = i915_wait_request(obj->ring, obj->last_fenced_seqno);
|
|
|
+ ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
|
|
|
if (ret)
|
|
|
return ret;
|
|
|
|