|
@@ -621,7 +621,7 @@ ring_add_request(struct intel_ring_buffer *ring,
|
|
|
}
|
|
|
|
|
|
static bool
|
|
|
-ring_get_irq(struct intel_ring_buffer *ring, u32 flag)
|
|
|
+gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
|
|
|
{
|
|
|
struct drm_device *dev = ring->dev;
|
|
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
|
@@ -630,71 +630,67 @@ ring_get_irq(struct intel_ring_buffer *ring, u32 flag)
|
|
|
return false;
|
|
|
|
|
|
spin_lock(&ring->irq_lock);
|
|
|
- if (ring->irq_refcount++ == 0)
|
|
|
- ironlake_enable_irq(dev_priv, flag);
|
|
|
+ if (ring->irq_refcount++ == 0) {
|
|
|
+ ring->irq_mask &= ~rflag;
|
|
|
+ I915_WRITE_IMR(ring, ring->irq_mask);
|
|
|
+ ironlake_enable_irq(dev_priv, gflag);
|
|
|
+ }
|
|
|
spin_unlock(&ring->irq_lock);
|
|
|
|
|
|
return true;
|
|
|
}
|
|
|
|
|
|
static void
|
|
|
-ring_put_irq(struct intel_ring_buffer *ring, u32 flag)
|
|
|
+gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
|
|
|
{
|
|
|
struct drm_device *dev = ring->dev;
|
|
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
|
|
|
|
|
spin_lock(&ring->irq_lock);
|
|
|
- if (--ring->irq_refcount == 0)
|
|
|
- ironlake_disable_irq(dev_priv, flag);
|
|
|
+ if (--ring->irq_refcount == 0) {
|
|
|
+ ring->irq_mask |= rflag;
|
|
|
+ I915_WRITE_IMR(ring, ring->irq_mask);
|
|
|
+ ironlake_disable_irq(dev_priv, gflag);
|
|
|
+ }
|
|
|
spin_unlock(&ring->irq_lock);
|
|
|
}
|
|
|
|
|
|
static bool
|
|
|
-gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
|
|
|
+bsd_ring_get_irq(struct intel_ring_buffer *ring)
|
|
|
{
|
|
|
struct drm_device *dev = ring->dev;
|
|
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
|
|
|
|
|
if (!dev->irq_enabled)
|
|
|
- return false;
|
|
|
+ return false;
|
|
|
|
|
|
spin_lock(&ring->irq_lock);
|
|
|
if (ring->irq_refcount++ == 0) {
|
|
|
- ring->irq_mask &= ~rflag;
|
|
|
- I915_WRITE_IMR(ring, ring->irq_mask);
|
|
|
- ironlake_enable_irq(dev_priv, gflag);
|
|
|
+ if (IS_G4X(dev))
|
|
|
+ i915_enable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
|
|
|
+ else
|
|
|
+ ironlake_enable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
|
|
|
}
|
|
|
spin_unlock(&ring->irq_lock);
|
|
|
|
|
|
return true;
|
|
|
}
|
|
|
-
|
|
|
static void
|
|
|
-gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
|
|
|
+bsd_ring_put_irq(struct intel_ring_buffer *ring)
|
|
|
{
|
|
|
struct drm_device *dev = ring->dev;
|
|
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
|
|
|
|
|
spin_lock(&ring->irq_lock);
|
|
|
if (--ring->irq_refcount == 0) {
|
|
|
- ring->irq_mask |= rflag;
|
|
|
- I915_WRITE_IMR(ring, ring->irq_mask);
|
|
|
- ironlake_disable_irq(dev_priv, gflag);
|
|
|
+ if (IS_G4X(dev))
|
|
|
+ i915_disable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
|
|
|
+ else
|
|
|
+ ironlake_disable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
|
|
|
}
|
|
|
spin_unlock(&ring->irq_lock);
|
|
|
}
|
|
|
|
|
|
-static bool
|
|
|
-bsd_ring_get_irq(struct intel_ring_buffer *ring)
|
|
|
-{
|
|
|
- return ring_get_irq(ring, GT_BSD_USER_INTERRUPT);
|
|
|
-}
|
|
|
-static void
|
|
|
-bsd_ring_put_irq(struct intel_ring_buffer *ring)
|
|
|
-{
|
|
|
- ring_put_irq(ring, GT_BSD_USER_INTERRUPT);
|
|
|
-}
|
|
|
-
|
|
|
static int
|
|
|
ring_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
|
|
|
{
|