|
@@ -610,17 +610,18 @@ gen5_ring_get_irq(struct intel_ring_buffer *ring)
|
|
|
{
|
|
|
struct drm_device *dev = ring->dev;
|
|
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
if (!dev->irq_enabled)
|
|
|
return false;
|
|
|
|
|
|
- spin_lock(&ring->irq_lock);
|
|
|
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
|
|
|
if (ring->irq_refcount++ == 0) {
|
|
|
dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
|
|
|
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
|
|
|
POSTING_READ(GTIMR);
|
|
|
}
|
|
|
- spin_unlock(&ring->irq_lock);
|
|
|
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
|
|
|
|
|
|
return true;
|
|
|
}
|
|
@@ -630,14 +631,15 @@ gen5_ring_put_irq(struct intel_ring_buffer *ring)
|
|
|
{
|
|
|
struct drm_device *dev = ring->dev;
|
|
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
- spin_lock(&ring->irq_lock);
|
|
|
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
|
|
|
if (--ring->irq_refcount == 0) {
|
|
|
dev_priv->gt_irq_mask |= ring->irq_enable_mask;
|
|
|
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
|
|
|
POSTING_READ(GTIMR);
|
|
|
}
|
|
|
- spin_unlock(&ring->irq_lock);
|
|
|
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
|
|
|
}
|
|
|
|
|
|
static bool
|
|
@@ -645,17 +647,18 @@ i9xx_ring_get_irq(struct intel_ring_buffer *ring)
|
|
|
{
|
|
|
struct drm_device *dev = ring->dev;
|
|
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
if (!dev->irq_enabled)
|
|
|
return false;
|
|
|
|
|
|
- spin_lock(&ring->irq_lock);
|
|
|
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
|
|
|
if (ring->irq_refcount++ == 0) {
|
|
|
dev_priv->irq_mask &= ~ring->irq_enable_mask;
|
|
|
I915_WRITE(IMR, dev_priv->irq_mask);
|
|
|
POSTING_READ(IMR);
|
|
|
}
|
|
|
- spin_unlock(&ring->irq_lock);
|
|
|
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
|
|
|
|
|
|
return true;
|
|
|
}
|
|
@@ -665,14 +668,15 @@ i9xx_ring_put_irq(struct intel_ring_buffer *ring)
|
|
|
{
|
|
|
struct drm_device *dev = ring->dev;
|
|
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
- spin_lock(&ring->irq_lock);
|
|
|
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
|
|
|
if (--ring->irq_refcount == 0) {
|
|
|
dev_priv->irq_mask |= ring->irq_enable_mask;
|
|
|
I915_WRITE(IMR, dev_priv->irq_mask);
|
|
|
POSTING_READ(IMR);
|
|
|
}
|
|
|
- spin_unlock(&ring->irq_lock);
|
|
|
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
|
|
|
}
|
|
|
|
|
|
static bool
|
|
@@ -680,17 +684,18 @@ i8xx_ring_get_irq(struct intel_ring_buffer *ring)
|
|
|
{
|
|
|
struct drm_device *dev = ring->dev;
|
|
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
if (!dev->irq_enabled)
|
|
|
return false;
|
|
|
|
|
|
- spin_lock(&ring->irq_lock);
|
|
|
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
|
|
|
if (ring->irq_refcount++ == 0) {
|
|
|
dev_priv->irq_mask &= ~ring->irq_enable_mask;
|
|
|
I915_WRITE16(IMR, dev_priv->irq_mask);
|
|
|
POSTING_READ16(IMR);
|
|
|
}
|
|
|
- spin_unlock(&ring->irq_lock);
|
|
|
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
|
|
|
|
|
|
return true;
|
|
|
}
|
|
@@ -700,14 +705,15 @@ i8xx_ring_put_irq(struct intel_ring_buffer *ring)
|
|
|
{
|
|
|
struct drm_device *dev = ring->dev;
|
|
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
- spin_lock(&ring->irq_lock);
|
|
|
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
|
|
|
if (--ring->irq_refcount == 0) {
|
|
|
dev_priv->irq_mask |= ring->irq_enable_mask;
|
|
|
I915_WRITE16(IMR, dev_priv->irq_mask);
|
|
|
POSTING_READ16(IMR);
|
|
|
}
|
|
|
- spin_unlock(&ring->irq_lock);
|
|
|
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
|
|
|
}
|
|
|
|
|
|
void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
|
|
@@ -786,6 +792,7 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring)
|
|
|
{
|
|
|
struct drm_device *dev = ring->dev;
|
|
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
if (!dev->irq_enabled)
|
|
|
return false;
|
|
@@ -795,14 +802,14 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring)
|
|
|
* blt/bsd rings on ivb. */
|
|
|
gen6_gt_force_wake_get(dev_priv);
|
|
|
|
|
|
- spin_lock(&ring->irq_lock);
|
|
|
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
|
|
|
if (ring->irq_refcount++ == 0) {
|
|
|
I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
|
|
|
dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
|
|
|
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
|
|
|
POSTING_READ(GTIMR);
|
|
|
}
|
|
|
- spin_unlock(&ring->irq_lock);
|
|
|
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
|
|
|
|
|
|
return true;
|
|
|
}
|
|
@@ -812,15 +819,16 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring)
|
|
|
{
|
|
|
struct drm_device *dev = ring->dev;
|
|
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
- spin_lock(&ring->irq_lock);
|
|
|
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
|
|
|
if (--ring->irq_refcount == 0) {
|
|
|
I915_WRITE_IMR(ring, ~0);
|
|
|
dev_priv->gt_irq_mask |= ring->irq_enable_mask;
|
|
|
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
|
|
|
POSTING_READ(GTIMR);
|
|
|
}
|
|
|
- spin_unlock(&ring->irq_lock);
|
|
|
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
|
|
|
|
|
|
gen6_gt_force_wake_put(dev_priv);
|
|
|
}
|
|
@@ -954,7 +962,6 @@ static int intel_init_ring_buffer(struct drm_device *dev,
|
|
|
ring->size = 32 * PAGE_SIZE;
|
|
|
|
|
|
init_waitqueue_head(&ring->irq_queue);
|
|
|
- spin_lock_init(&ring->irq_lock);
|
|
|
|
|
|
if (I915_NEED_GFX_HWS(dev)) {
|
|
|
ret = init_status_page(ring);
|