Browse Source

drm/i915: Add intel_ring_handle_seqno wrap

If there are pre-wrap values in semaphore-mbox registers after wrap,
syncing against some after-wrap request will complete immediately.
Fix this by emitting ring commands to set mbox registers to zero
when the wrap happens.

v2: Use __intel_ring_begin to emit ring commands, from
Chris Wilson.

Signed-off-by: Mika Kuoppala <mika.kuoppala@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
[danvet: Add a small comment to handle_seqno_wrap.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Mika Kuoppala 12 years ago
parent
commit
498d2ac15c

+ 4 - 0
drivers/gpu/drm/i915/i915_gem.c

@@ -1952,6 +1952,10 @@ i915_gem_handle_seqno_wrap(struct drm_device *dev)
 
 	i915_gem_retire_requests(dev);
 	for_each_ring(ring, dev_priv, i) {
+		ret = intel_ring_handle_seqno_wrap(ring);
+		if (ret)
+			return ret;
+
 		for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
 			ring->sync_seqno[j] = 0;
 	}

+ 22 - 0
drivers/gpu/drm/i915/intel_ringbuffer.c

@@ -1402,6 +1402,28 @@ int intel_ring_begin(struct intel_ring_buffer *ring,
 	return __intel_ring_begin(ring, num_dwords * sizeof(uint32_t));
 }
 
+int intel_ring_handle_seqno_wrap(struct intel_ring_buffer *ring)
+{
+	int ret;
+
+	BUG_ON(ring->outstanding_lazy_request);
+
+	if (INTEL_INFO(ring->dev)->gen < 6)
+		return 0;
+
+	ret = __intel_ring_begin(ring, 6 * sizeof(uint32_t));
+	if (ret)
+		return ret;
+
+	/* Leaving a stale, pre-wrap seqno behind in the mboxes will result in
+	 * post-wrap semaphore waits completing immediately. Clear them. */
+	update_mboxes(ring, ring->signal_mbox[0]);
+	update_mboxes(ring, ring->signal_mbox[1]);
+	intel_ring_advance(ring);
+
+	return 0;
+}
+
 void intel_ring_advance(struct intel_ring_buffer *ring)
 {
 	struct drm_i915_private *dev_priv = ring->dev->dev_private;

+ 1 - 1
drivers/gpu/drm/i915/intel_ringbuffer.h

@@ -196,7 +196,7 @@ static inline void intel_ring_emit(struct intel_ring_buffer *ring,
 }
 void intel_ring_advance(struct intel_ring_buffer *ring);
 int __must_check intel_ring_idle(struct intel_ring_buffer *ring);
-
+int __must_check intel_ring_handle_seqno_wrap(struct intel_ring_buffer *ring);
 int intel_ring_flush_all_caches(struct intel_ring_buffer *ring);
 int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring);