|
@@ -428,6 +428,7 @@ void evergreen_hpd_init(struct radeon_device *rdev)
|
|
|
{
|
|
|
struct drm_device *dev = rdev->ddev;
|
|
|
struct drm_connector *connector;
|
|
|
+ unsigned enabled = 0;
|
|
|
u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) |
|
|
|
DC_HPDx_RX_INT_TIMER(0xfa) | DC_HPDx_EN;
|
|
|
|
|
@@ -436,73 +437,64 @@ void evergreen_hpd_init(struct radeon_device *rdev)
|
|
|
switch (radeon_connector->hpd.hpd) {
|
|
|
case RADEON_HPD_1:
|
|
|
WREG32(DC_HPD1_CONTROL, tmp);
|
|
|
- rdev->irq.hpd[0] = true;
|
|
|
break;
|
|
|
case RADEON_HPD_2:
|
|
|
WREG32(DC_HPD2_CONTROL, tmp);
|
|
|
- rdev->irq.hpd[1] = true;
|
|
|
break;
|
|
|
case RADEON_HPD_3:
|
|
|
WREG32(DC_HPD3_CONTROL, tmp);
|
|
|
- rdev->irq.hpd[2] = true;
|
|
|
break;
|
|
|
case RADEON_HPD_4:
|
|
|
WREG32(DC_HPD4_CONTROL, tmp);
|
|
|
- rdev->irq.hpd[3] = true;
|
|
|
break;
|
|
|
case RADEON_HPD_5:
|
|
|
WREG32(DC_HPD5_CONTROL, tmp);
|
|
|
- rdev->irq.hpd[4] = true;
|
|
|
break;
|
|
|
case RADEON_HPD_6:
|
|
|
WREG32(DC_HPD6_CONTROL, tmp);
|
|
|
- rdev->irq.hpd[5] = true;
|
|
|
break;
|
|
|
default:
|
|
|
break;
|
|
|
}
|
|
|
radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
|
|
|
+ enabled |= 1 << radeon_connector->hpd.hpd;
|
|
|
}
|
|
|
- if (rdev->irq.installed)
|
|
|
- evergreen_irq_set(rdev);
|
|
|
+ radeon_irq_kms_enable_hpd(rdev, enabled);
|
|
|
}
|
|
|
|
|
|
void evergreen_hpd_fini(struct radeon_device *rdev)
|
|
|
{
|
|
|
struct drm_device *dev = rdev->ddev;
|
|
|
struct drm_connector *connector;
|
|
|
+ unsigned disabled = 0;
|
|
|
|
|
|
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
|
|
|
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
|
|
|
switch (radeon_connector->hpd.hpd) {
|
|
|
case RADEON_HPD_1:
|
|
|
WREG32(DC_HPD1_CONTROL, 0);
|
|
|
- rdev->irq.hpd[0] = false;
|
|
|
break;
|
|
|
case RADEON_HPD_2:
|
|
|
WREG32(DC_HPD2_CONTROL, 0);
|
|
|
- rdev->irq.hpd[1] = false;
|
|
|
break;
|
|
|
case RADEON_HPD_3:
|
|
|
WREG32(DC_HPD3_CONTROL, 0);
|
|
|
- rdev->irq.hpd[2] = false;
|
|
|
break;
|
|
|
case RADEON_HPD_4:
|
|
|
WREG32(DC_HPD4_CONTROL, 0);
|
|
|
- rdev->irq.hpd[3] = false;
|
|
|
break;
|
|
|
case RADEON_HPD_5:
|
|
|
WREG32(DC_HPD5_CONTROL, 0);
|
|
|
- rdev->irq.hpd[4] = false;
|
|
|
break;
|
|
|
case RADEON_HPD_6:
|
|
|
WREG32(DC_HPD6_CONTROL, 0);
|
|
|
- rdev->irq.hpd[5] = false;
|
|
|
break;
|
|
|
default:
|
|
|
break;
|
|
|
}
|
|
|
+ disabled |= 1 << radeon_connector->hpd.hpd;
|
|
|
}
|
|
|
+ radeon_irq_kms_disable_hpd(rdev, disabled);
|
|
|
}
|
|
|
|
|
|
/* watermark setup */
|
|
@@ -1371,7 +1363,7 @@ void evergreen_mc_program(struct radeon_device *rdev)
|
|
|
*/
|
|
|
void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
|
|
|
{
|
|
|
- struct radeon_ring *ring = &rdev->ring[ib->fence->ring];
|
|
|
+ struct radeon_ring *ring = &rdev->ring[ib->ring];
|
|
|
|
|
|
/* set to DX10/11 mode */
|
|
|
radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
|
|
@@ -2348,20 +2340,20 @@ int evergreen_irq_set(struct radeon_device *rdev)
|
|
|
|
|
|
if (rdev->family >= CHIP_CAYMAN) {
|
|
|
/* enable CP interrupts on all rings */
|
|
|
- if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) {
|
|
|
+ if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
|
|
|
DRM_DEBUG("evergreen_irq_set: sw int gfx\n");
|
|
|
cp_int_cntl |= TIME_STAMP_INT_ENABLE;
|
|
|
}
|
|
|
- if (rdev->irq.sw_int[CAYMAN_RING_TYPE_CP1_INDEX]) {
|
|
|
+ if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP1_INDEX])) {
|
|
|
DRM_DEBUG("evergreen_irq_set: sw int cp1\n");
|
|
|
cp_int_cntl1 |= TIME_STAMP_INT_ENABLE;
|
|
|
}
|
|
|
- if (rdev->irq.sw_int[CAYMAN_RING_TYPE_CP2_INDEX]) {
|
|
|
+ if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP2_INDEX])) {
|
|
|
DRM_DEBUG("evergreen_irq_set: sw int cp2\n");
|
|
|
cp_int_cntl2 |= TIME_STAMP_INT_ENABLE;
|
|
|
}
|
|
|
} else {
|
|
|
- if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) {
|
|
|
+ if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
|
|
|
DRM_DEBUG("evergreen_irq_set: sw int gfx\n");
|
|
|
cp_int_cntl |= RB_INT_ENABLE;
|
|
|
cp_int_cntl |= TIME_STAMP_INT_ENABLE;
|
|
@@ -2369,32 +2361,32 @@ int evergreen_irq_set(struct radeon_device *rdev)
|
|
|
}
|
|
|
|
|
|
if (rdev->irq.crtc_vblank_int[0] ||
|
|
|
- rdev->irq.pflip[0]) {
|
|
|
+ atomic_read(&rdev->irq.pflip[0])) {
|
|
|
DRM_DEBUG("evergreen_irq_set: vblank 0\n");
|
|
|
crtc1 |= VBLANK_INT_MASK;
|
|
|
}
|
|
|
if (rdev->irq.crtc_vblank_int[1] ||
|
|
|
- rdev->irq.pflip[1]) {
|
|
|
+ atomic_read(&rdev->irq.pflip[1])) {
|
|
|
DRM_DEBUG("evergreen_irq_set: vblank 1\n");
|
|
|
crtc2 |= VBLANK_INT_MASK;
|
|
|
}
|
|
|
if (rdev->irq.crtc_vblank_int[2] ||
|
|
|
- rdev->irq.pflip[2]) {
|
|
|
+ atomic_read(&rdev->irq.pflip[2])) {
|
|
|
DRM_DEBUG("evergreen_irq_set: vblank 2\n");
|
|
|
crtc3 |= VBLANK_INT_MASK;
|
|
|
}
|
|
|
if (rdev->irq.crtc_vblank_int[3] ||
|
|
|
- rdev->irq.pflip[3]) {
|
|
|
+ atomic_read(&rdev->irq.pflip[3])) {
|
|
|
DRM_DEBUG("evergreen_irq_set: vblank 3\n");
|
|
|
crtc4 |= VBLANK_INT_MASK;
|
|
|
}
|
|
|
if (rdev->irq.crtc_vblank_int[4] ||
|
|
|
- rdev->irq.pflip[4]) {
|
|
|
+ atomic_read(&rdev->irq.pflip[4])) {
|
|
|
DRM_DEBUG("evergreen_irq_set: vblank 4\n");
|
|
|
crtc5 |= VBLANK_INT_MASK;
|
|
|
}
|
|
|
if (rdev->irq.crtc_vblank_int[5] ||
|
|
|
- rdev->irq.pflip[5]) {
|
|
|
+ atomic_read(&rdev->irq.pflip[5])) {
|
|
|
DRM_DEBUG("evergreen_irq_set: vblank 5\n");
|
|
|
crtc6 |= VBLANK_INT_MASK;
|
|
|
}
|
|
@@ -2676,7 +2668,6 @@ int evergreen_irq_process(struct radeon_device *rdev)
|
|
|
u32 rptr;
|
|
|
u32 src_id, src_data;
|
|
|
u32 ring_index;
|
|
|
- unsigned long flags;
|
|
|
bool queue_hotplug = false;
|
|
|
bool queue_hdmi = false;
|
|
|
|
|
@@ -2684,22 +2675,21 @@ int evergreen_irq_process(struct radeon_device *rdev)
|
|
|
return IRQ_NONE;
|
|
|
|
|
|
wptr = evergreen_get_ih_wptr(rdev);
|
|
|
+
|
|
|
+restart_ih:
|
|
|
+ /* is somebody else already processing irqs? */
|
|
|
+ if (atomic_xchg(&rdev->ih.lock, 1))
|
|
|
+ return IRQ_NONE;
|
|
|
+
|
|
|
rptr = rdev->ih.rptr;
|
|
|
DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
|
|
|
|
|
|
- spin_lock_irqsave(&rdev->ih.lock, flags);
|
|
|
- if (rptr == wptr) {
|
|
|
- spin_unlock_irqrestore(&rdev->ih.lock, flags);
|
|
|
- return IRQ_NONE;
|
|
|
- }
|
|
|
-restart_ih:
|
|
|
/* Order reading of wptr vs. reading of IH ring data */
|
|
|
rmb();
|
|
|
|
|
|
/* display interrupts */
|
|
|
evergreen_irq_ack(rdev);
|
|
|
|
|
|
- rdev->ih.wptr = wptr;
|
|
|
while (rptr != wptr) {
|
|
|
/* wptr/rptr are in bytes! */
|
|
|
ring_index = rptr / 4;
|
|
@@ -2716,7 +2706,7 @@ restart_ih:
|
|
|
rdev->pm.vblank_sync = true;
|
|
|
wake_up(&rdev->irq.vblank_queue);
|
|
|
}
|
|
|
- if (rdev->irq.pflip[0])
|
|
|
+ if (atomic_read(&rdev->irq.pflip[0]))
|
|
|
radeon_crtc_handle_flip(rdev, 0);
|
|
|
rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
|
|
|
DRM_DEBUG("IH: D1 vblank\n");
|
|
@@ -2742,7 +2732,7 @@ restart_ih:
|
|
|
rdev->pm.vblank_sync = true;
|
|
|
wake_up(&rdev->irq.vblank_queue);
|
|
|
}
|
|
|
- if (rdev->irq.pflip[1])
|
|
|
+ if (atomic_read(&rdev->irq.pflip[1]))
|
|
|
radeon_crtc_handle_flip(rdev, 1);
|
|
|
rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
|
|
|
DRM_DEBUG("IH: D2 vblank\n");
|
|
@@ -2768,7 +2758,7 @@ restart_ih:
|
|
|
rdev->pm.vblank_sync = true;
|
|
|
wake_up(&rdev->irq.vblank_queue);
|
|
|
}
|
|
|
- if (rdev->irq.pflip[2])
|
|
|
+ if (atomic_read(&rdev->irq.pflip[2]))
|
|
|
radeon_crtc_handle_flip(rdev, 2);
|
|
|
rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
|
|
|
DRM_DEBUG("IH: D3 vblank\n");
|
|
@@ -2794,7 +2784,7 @@ restart_ih:
|
|
|
rdev->pm.vblank_sync = true;
|
|
|
wake_up(&rdev->irq.vblank_queue);
|
|
|
}
|
|
|
- if (rdev->irq.pflip[3])
|
|
|
+ if (atomic_read(&rdev->irq.pflip[3]))
|
|
|
radeon_crtc_handle_flip(rdev, 3);
|
|
|
rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
|
|
|
DRM_DEBUG("IH: D4 vblank\n");
|
|
@@ -2820,7 +2810,7 @@ restart_ih:
|
|
|
rdev->pm.vblank_sync = true;
|
|
|
wake_up(&rdev->irq.vblank_queue);
|
|
|
}
|
|
|
- if (rdev->irq.pflip[4])
|
|
|
+ if (atomic_read(&rdev->irq.pflip[4]))
|
|
|
radeon_crtc_handle_flip(rdev, 4);
|
|
|
rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
|
|
|
DRM_DEBUG("IH: D5 vblank\n");
|
|
@@ -2846,7 +2836,7 @@ restart_ih:
|
|
|
rdev->pm.vblank_sync = true;
|
|
|
wake_up(&rdev->irq.vblank_queue);
|
|
|
}
|
|
|
- if (rdev->irq.pflip[5])
|
|
|
+ if (atomic_read(&rdev->irq.pflip[5]))
|
|
|
radeon_crtc_handle_flip(rdev, 5);
|
|
|
rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
|
|
|
DRM_DEBUG("IH: D6 vblank\n");
|
|
@@ -2986,7 +2976,6 @@ restart_ih:
|
|
|
break;
|
|
|
case 233: /* GUI IDLE */
|
|
|
DRM_DEBUG("IH: GUI idle\n");
|
|
|
- rdev->pm.gui_idle = true;
|
|
|
wake_up(&rdev->irq.idle_queue);
|
|
|
break;
|
|
|
default:
|
|
@@ -2998,17 +2987,19 @@ restart_ih:
|
|
|
rptr += 16;
|
|
|
rptr &= rdev->ih.ptr_mask;
|
|
|
}
|
|
|
- /* make sure wptr hasn't changed while processing */
|
|
|
- wptr = evergreen_get_ih_wptr(rdev);
|
|
|
- if (wptr != rdev->ih.wptr)
|
|
|
- goto restart_ih;
|
|
|
if (queue_hotplug)
|
|
|
schedule_work(&rdev->hotplug_work);
|
|
|
if (queue_hdmi)
|
|
|
schedule_work(&rdev->audio_work);
|
|
|
rdev->ih.rptr = rptr;
|
|
|
WREG32(IH_RB_RPTR, rdev->ih.rptr);
|
|
|
- spin_unlock_irqrestore(&rdev->ih.lock, flags);
|
|
|
+ atomic_set(&rdev->ih.lock, 0);
|
|
|
+
|
|
|
+ /* make sure wptr hasn't changed while processing */
|
|
|
+ wptr = evergreen_get_ih_wptr(rdev);
|
|
|
+ if (wptr != rptr)
|
|
|
+ goto restart_ih;
|
|
|
+
|
|
|
return IRQ_HANDLED;
|
|
|
}
|
|
|
|