|
@@ -400,6 +400,9 @@ void radeon_irq_kms_enable_afmt(struct radeon_device *rdev, int block)
|
|
|
{
|
|
|
unsigned long irqflags;
|
|
|
|
|
|
+ if (!rdev->ddev->irq_enabled)
|
|
|
+ return;
|
|
|
+
|
|
|
spin_lock_irqsave(&rdev->irq.lock, irqflags);
|
|
|
rdev->irq.afmt[block] = true;
|
|
|
radeon_irq_set(rdev);
|
|
@@ -419,6 +422,9 @@ void radeon_irq_kms_disable_afmt(struct radeon_device *rdev, int block)
|
|
|
{
|
|
|
unsigned long irqflags;
|
|
|
|
|
|
+ if (!rdev->ddev->irq_enabled)
|
|
|
+ return;
|
|
|
+
|
|
|
spin_lock_irqsave(&rdev->irq.lock, irqflags);
|
|
|
rdev->irq.afmt[block] = false;
|
|
|
radeon_irq_set(rdev);
|
|
@@ -438,6 +444,9 @@ void radeon_irq_kms_enable_hpd(struct radeon_device *rdev, unsigned hpd_mask)
|
|
|
unsigned long irqflags;
|
|
|
int i;
|
|
|
|
|
|
+ if (!rdev->ddev->irq_enabled)
|
|
|
+ return;
|
|
|
+
|
|
|
spin_lock_irqsave(&rdev->irq.lock, irqflags);
|
|
|
for (i = 0; i < RADEON_MAX_HPD_PINS; ++i)
|
|
|
rdev->irq.hpd[i] |= !!(hpd_mask & (1 << i));
|
|
@@ -458,6 +467,9 @@ void radeon_irq_kms_disable_hpd(struct radeon_device *rdev, unsigned hpd_mask)
|
|
|
unsigned long irqflags;
|
|
|
int i;
|
|
|
|
|
|
+ if (!rdev->ddev->irq_enabled)
|
|
|
+ return;
|
|
|
+
|
|
|
spin_lock_irqsave(&rdev->irq.lock, irqflags);
|
|
|
for (i = 0; i < RADEON_MAX_HPD_PINS; ++i)
|
|
|
rdev->irq.hpd[i] &= !(hpd_mask & (1 << i));
|