|
@@ -583,6 +583,7 @@ pc_render_add_request(struct intel_ring_buffer *ring,
|
|
|
PIPE_CONTROL_FLUSH(ring, scratch_addr);
|
|
|
scratch_addr += 128;
|
|
|
PIPE_CONTROL_FLUSH(ring, scratch_addr);
|
|
|
+
|
|
|
intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
|
|
|
PIPE_CONTROL_WRITE_FLUSH |
|
|
|
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
|
|
@@ -1107,11 +1108,89 @@ static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno)
|
|
|
+{
|
|
|
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
|
|
|
+ bool was_interruptible;
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ /* XXX As we have not yet audited all the paths to check that
|
|
|
+ * they are ready for ERESTARTSYS from intel_ring_begin, do not
|
|
|
+ * allow us to be interruptible by a signal.
|
|
|
+ */
|
|
|
+ was_interruptible = dev_priv->mm.interruptible;
|
|
|
+ dev_priv->mm.interruptible = false;
|
|
|
+
|
|
|
+ ret = i915_wait_request(ring, seqno, true);
|
|
|
+
|
|
|
+ dev_priv->mm.interruptible = was_interruptible;
|
|
|
+
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
+static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
|
|
|
+{
|
|
|
+ struct drm_i915_gem_request *request;
|
|
|
+ u32 seqno = 0;
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ i915_gem_retire_requests_ring(ring);
|
|
|
+
|
|
|
+ if (ring->last_retired_head != -1) {
|
|
|
+ ring->head = ring->last_retired_head;
|
|
|
+ ring->last_retired_head = -1;
|
|
|
+ ring->space = ring_space(ring);
|
|
|
+ if (ring->space >= n)
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+
|
|
|
+ list_for_each_entry(request, &ring->request_list, list) {
|
|
|
+ int space;
|
|
|
+
|
|
|
+ if (request->tail == -1)
|
|
|
+ continue;
|
|
|
+
|
|
|
+ space = request->tail - (ring->tail + 8);
|
|
|
+ if (space < 0)
|
|
|
+ space += ring->size;
|
|
|
+ if (space >= n) {
|
|
|
+ seqno = request->seqno;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Consume this request in case we need more space than
|
|
|
+ * is available and so need to prevent a race between
|
|
|
+ * updating last_retired_head and direct reads of
|
|
|
+ * I915_RING_HEAD. It also provides a nice sanity check.
|
|
|
+ */
|
|
|
+ request->tail = -1;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (seqno == 0)
|
|
|
+ return -ENOSPC;
|
|
|
+
|
|
|
+ ret = intel_ring_wait_seqno(ring, seqno);
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
+
|
|
|
+ if (WARN_ON(ring->last_retired_head == -1))
|
|
|
+ return -ENOSPC;
|
|
|
+
|
|
|
+ ring->head = ring->last_retired_head;
|
|
|
+ ring->last_retired_head = -1;
|
|
|
+ ring->space = ring_space(ring);
|
|
|
+ if (WARN_ON(ring->space < n))
|
|
|
+ return -ENOSPC;
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
|
|
|
{
|
|
|
struct drm_device *dev = ring->dev;
|
|
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
|
unsigned long end;
|
|
|
+ int ret;
|
|
|
u32 head;
|
|
|
|
|
|
/* If the reported head position has wrapped or hasn't advanced,
|
|
@@ -1125,6 +1204,10 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+ ret = intel_ring_wait_request(ring, n);
|
|
|
+ if (ret != -ENOSPC)
|
|
|
+ return ret;
|
|
|
+
|
|
|
trace_i915_ring_wait_begin(ring);
|
|
|
if (drm_core_check_feature(dev, DRIVER_GEM))
|
|
|
/* With GEM the hangcheck timer should kick us out of the loop,
|