|
@@ -644,6 +644,35 @@ i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
|
|
|
POSTING_READ(IMR);
|
|
|
}
|
|
|
|
|
|
+static bool
|
|
|
+gen5_ring_get_irq(struct intel_ring_buffer *ring)
|
|
|
+{
|
|
|
+ struct drm_device *dev = ring->dev;
|
|
|
+ drm_i915_private_t *dev_priv = dev->dev_private;
|
|
|
+
|
|
|
+ if (!dev->irq_enabled)
|
|
|
+ return false;
|
|
|
+
|
|
|
+ spin_lock(&ring->irq_lock);
|
|
|
+ if (ring->irq_refcount++ == 0)
|
|
|
+ ironlake_enable_irq(dev_priv, ring->irq_enable_mask);
|
|
|
+ spin_unlock(&ring->irq_lock);
|
|
|
+
|
|
|
+ return true;
|
|
|
+}
|
|
|
+
|
|
|
+static void
|
|
|
+gen5_ring_put_irq(struct intel_ring_buffer *ring)
|
|
|
+{
|
|
|
+ struct drm_device *dev = ring->dev;
|
|
|
+ drm_i915_private_t *dev_priv = dev->dev_private;
|
|
|
+
|
|
|
+ spin_lock(&ring->irq_lock);
|
|
|
+ if (--ring->irq_refcount == 0)
|
|
|
+ ironlake_disable_irq(dev_priv, ring->irq_enable_mask);
|
|
|
+ spin_unlock(&ring->irq_lock);
|
|
|
+}
|
|
|
+
|
|
|
static bool
|
|
|
i9xx_ring_get_irq(struct intel_ring_buffer *ring)
|
|
|
{
|
|
@@ -654,13 +683,8 @@ i9xx_ring_get_irq(struct intel_ring_buffer *ring)
|
|
|
return false;
|
|
|
|
|
|
spin_lock(&ring->irq_lock);
|
|
|
- if (ring->irq_refcount++ == 0) {
|
|
|
- if (INTEL_INFO(dev)->gen >= 5)
|
|
|
- ironlake_enable_irq(dev_priv,
|
|
|
- ring->irq_enable_mask);
|
|
|
- else
|
|
|
- i915_enable_irq(dev_priv, ring->irq_enable_mask);
|
|
|
- }
|
|
|
+ if (ring->irq_refcount++ == 0)
|
|
|
+ i915_enable_irq(dev_priv, ring->irq_enable_mask);
|
|
|
spin_unlock(&ring->irq_lock);
|
|
|
|
|
|
return true;
|
|
@@ -673,13 +697,8 @@ i9xx_ring_put_irq(struct intel_ring_buffer *ring)
|
|
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
|
|
|
|
|
spin_lock(&ring->irq_lock);
|
|
|
- if (--ring->irq_refcount == 0) {
|
|
|
- if (INTEL_INFO(dev)->gen >= 5)
|
|
|
- ironlake_disable_irq(dev_priv,
|
|
|
- ring->irq_enable_mask);
|
|
|
- else
|
|
|
- i915_disable_irq(dev_priv, ring->irq_enable_mask);
|
|
|
- }
|
|
|
+ if (--ring->irq_refcount == 0)
|
|
|
+ i915_disable_irq(dev_priv, ring->irq_enable_mask);
|
|
|
spin_unlock(&ring->irq_lock);
|
|
|
}
|
|
|
|
|
@@ -1301,8 +1320,8 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
|
|
|
ring->add_request = pc_render_add_request;
|
|
|
ring->flush = render_ring_flush;
|
|
|
ring->get_seqno = pc_render_get_seqno;
|
|
|
- ring->irq_get = i9xx_ring_get_irq;
|
|
|
- ring->irq_put = i9xx_ring_put_irq;
|
|
|
+ ring->irq_get = gen5_ring_get_irq;
|
|
|
+ ring->irq_put = gen5_ring_put_irq;
|
|
|
ring->irq_enable_mask = GT_USER_INTERRUPT | GT_PIPE_NOTIFY;
|
|
|
} else {
|
|
|
ring->add_request = render_ring_add_request;
|
|
@@ -1342,8 +1361,8 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
|
|
|
ring->add_request = pc_render_add_request;
|
|
|
ring->flush = render_ring_flush;
|
|
|
ring->get_seqno = pc_render_get_seqno;
|
|
|
- ring->irq_get = i9xx_ring_get_irq;
|
|
|
- ring->irq_put = i9xx_ring_put_irq;
|
|
|
+ ring->irq_get = gen5_ring_get_irq;
|
|
|
+ ring->irq_put = gen5_ring_put_irq;
|
|
|
ring->irq_enable_mask = GT_USER_INTERRUPT | GT_PIPE_NOTIFY;
|
|
|
} else {
|
|
|
ring->add_request = render_ring_add_request;
|
|
@@ -1418,12 +1437,15 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
|
|
|
ring->flush = bsd_ring_flush;
|
|
|
ring->add_request = ring_add_request;
|
|
|
ring->get_seqno = ring_get_seqno;
|
|
|
- ring->irq_get = i9xx_ring_get_irq;
|
|
|
- ring->irq_put = i9xx_ring_put_irq;
|
|
|
- if (IS_GEN5(dev))
|
|
|
+ if (IS_GEN5(dev)) {
|
|
|
ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
|
|
|
- else
|
|
|
+ ring->irq_get = gen5_ring_get_irq;
|
|
|
+ ring->irq_put = gen5_ring_put_irq;
|
|
|
+ } else {
|
|
|
ring->irq_enable_mask = I915_BSD_USER_INTERRUPT;
|
|
|
+ ring->irq_get = i9xx_ring_get_irq;
|
|
|
+ ring->irq_put = i9xx_ring_put_irq;
|
|
|
+ }
|
|
|
ring->dispatch_execbuffer = ring_dispatch_execbuffer;
|
|
|
}
|
|
|
ring->init = init_ring_common;
|