|
@@ -635,6 +635,19 @@ render_ring_add_request(struct intel_ring_buffer *ring,
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+static u32
|
|
|
+gen6_ring_get_seqno(struct intel_ring_buffer *ring)
|
|
|
+{
|
|
|
+ struct drm_device *dev = ring->dev;
|
|
|
+
|
|
|
+ /* Workaround to force correct ordering between irq and seqno writes on
|
|
|
+ * ivb (and maybe also on snb) by reading from a CS register (like
|
|
|
+ * ACTHD) before reading the status page. */
|
|
|
+ if (IS_GEN7(dev))
|
|
|
+ intel_ring_get_active_head(ring);
|
|
|
+ return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
|
|
|
+}
|
|
|
+
|
|
|
static u32
|
|
|
ring_get_seqno(struct intel_ring_buffer *ring)
|
|
|
{
|
|
@@ -811,6 +824,12 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
|
|
|
if (!dev->irq_enabled)
|
|
|
return false;
|
|
|
|
|
|
+ /* It looks like we need to prevent the gt from suspending while waiting
|
|
|
+ * for an notifiy irq, otherwise irqs seem to get lost on at least the
|
|
|
+ * blt/bsd rings on ivb. */
|
|
|
+ if (IS_GEN7(dev))
|
|
|
+ gen6_gt_force_wake_get(dev_priv);
|
|
|
+
|
|
|
spin_lock(&ring->irq_lock);
|
|
|
if (ring->irq_refcount++ == 0) {
|
|
|
ring->irq_mask &= ~rflag;
|
|
@@ -835,6 +854,9 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
|
|
|
ironlake_disable_irq(dev_priv, gflag);
|
|
|
}
|
|
|
spin_unlock(&ring->irq_lock);
|
|
|
+
|
|
|
+ if (IS_GEN7(dev))
|
|
|
+ gen6_gt_force_wake_put(dev_priv);
|
|
|
}
|
|
|
|
|
|
static bool
|
|
@@ -1341,7 +1363,7 @@ static const struct intel_ring_buffer gen6_bsd_ring = {
|
|
|
.write_tail = gen6_bsd_ring_write_tail,
|
|
|
.flush = gen6_ring_flush,
|
|
|
.add_request = gen6_add_request,
|
|
|
- .get_seqno = ring_get_seqno,
|
|
|
+ .get_seqno = gen6_ring_get_seqno,
|
|
|
.irq_get = gen6_bsd_ring_get_irq,
|
|
|
.irq_put = gen6_bsd_ring_put_irq,
|
|
|
.dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
|
|
@@ -1476,7 +1498,7 @@ static const struct intel_ring_buffer gen6_blt_ring = {
|
|
|
.write_tail = ring_write_tail,
|
|
|
.flush = blt_ring_flush,
|
|
|
.add_request = gen6_add_request,
|
|
|
- .get_seqno = ring_get_seqno,
|
|
|
+ .get_seqno = gen6_ring_get_seqno,
|
|
|
.irq_get = blt_ring_get_irq,
|
|
|
.irq_put = blt_ring_put_irq,
|
|
|
.dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
|
|
@@ -1499,6 +1521,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
|
|
|
ring->flush = gen6_render_ring_flush;
|
|
|
ring->irq_get = gen6_render_ring_get_irq;
|
|
|
ring->irq_put = gen6_render_ring_put_irq;
|
|
|
+ ring->get_seqno = gen6_ring_get_seqno;
|
|
|
} else if (IS_GEN5(dev)) {
|
|
|
ring->add_request = pc_render_add_request;
|
|
|
ring->get_seqno = pc_render_get_seqno;
|