|
@@ -40,32 +40,37 @@
|
|
|
#include "radeon.h"
|
|
|
#include "radeon_trace.h"
|
|
|
|
|
|
-static void radeon_fence_write(struct radeon_device *rdev, u32 seq)
|
|
|
+static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring)
|
|
|
{
|
|
|
+ u32 scratch_index;
|
|
|
+
|
|
|
if (rdev->wb.enabled) {
|
|
|
- u32 scratch_index;
|
|
|
if (rdev->wb.use_event)
|
|
|
- scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
|
|
|
+ scratch_index = R600_WB_EVENT_OFFSET +
|
|
|
+ rdev->fence_drv[ring].scratch_reg - rdev->scratch.reg_base;
|
|
|
else
|
|
|
- scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
|
|
|
+ scratch_index = RADEON_WB_SCRATCH_OFFSET +
|
|
|
+ rdev->fence_drv[ring].scratch_reg - rdev->scratch.reg_base;
|
|
|
rdev->wb.wb[scratch_index/4] = cpu_to_le32(seq);
|
|
|
} else
|
|
|
- WREG32(rdev->fence_drv.scratch_reg, seq);
|
|
|
+ WREG32(rdev->fence_drv[ring].scratch_reg, seq);
|
|
|
}
|
|
|
|
|
|
-static u32 radeon_fence_read(struct radeon_device *rdev)
|
|
|
+static u32 radeon_fence_read(struct radeon_device *rdev, int ring)
|
|
|
{
|
|
|
- u32 seq;
|
|
|
+ u32 seq = 0;
|
|
|
+ u32 scratch_index;
|
|
|
|
|
|
if (rdev->wb.enabled) {
|
|
|
- u32 scratch_index;
|
|
|
if (rdev->wb.use_event)
|
|
|
- scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
|
|
|
+ scratch_index = R600_WB_EVENT_OFFSET +
|
|
|
+ rdev->fence_drv[ring].scratch_reg - rdev->scratch.reg_base;
|
|
|
else
|
|
|
- scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
|
|
|
+ scratch_index = RADEON_WB_SCRATCH_OFFSET +
|
|
|
+ rdev->fence_drv[ring].scratch_reg - rdev->scratch.reg_base;
|
|
|
seq = le32_to_cpu(rdev->wb.wb[scratch_index/4]);
|
|
|
} else
|
|
|
- seq = RREG32(rdev->fence_drv.scratch_reg);
|
|
|
+ seq = RREG32(rdev->fence_drv[ring].scratch_reg);
|
|
|
return seq;
|
|
|
}
|
|
|
|
|
@@ -73,28 +78,28 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
|
|
|
{
|
|
|
unsigned long irq_flags;
|
|
|
|
|
|
- write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
|
|
|
+ write_lock_irqsave(&rdev->fence_lock, irq_flags);
|
|
|
if (fence->emitted) {
|
|
|
- write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
|
|
|
+ write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
|
|
|
return 0;
|
|
|
}
|
|
|
- fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
|
|
|
+ fence->seq = atomic_add_return(1, &rdev->fence_drv[fence->ring].seq);
|
|
|
if (!rdev->cp.ready)
|
|
|
/* FIXME: cp is not running assume everythings is done right
|
|
|
* away
|
|
|
*/
|
|
|
- radeon_fence_write(rdev, fence->seq);
|
|
|
+ radeon_fence_write(rdev, fence->seq, fence->ring);
|
|
|
else
|
|
|
radeon_fence_ring_emit(rdev, fence);
|
|
|
|
|
|
trace_radeon_fence_emit(rdev->ddev, fence->seq);
|
|
|
fence->emitted = true;
|
|
|
- list_move_tail(&fence->list, &rdev->fence_drv.emitted);
|
|
|
- write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
|
|
|
+ list_move_tail(&fence->list, &rdev->fence_drv[fence->ring].emitted);
|
|
|
+ write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static bool radeon_fence_poll_locked(struct radeon_device *rdev)
|
|
|
+static bool radeon_fence_poll_locked(struct radeon_device *rdev, int ring)
|
|
|
{
|
|
|
struct radeon_fence *fence;
|
|
|
struct list_head *i, *n;
|
|
@@ -102,34 +107,34 @@ static bool radeon_fence_poll_locked(struct radeon_device *rdev)
|
|
|
bool wake = false;
|
|
|
unsigned long cjiffies;
|
|
|
|
|
|
- seq = radeon_fence_read(rdev);
|
|
|
- if (seq != rdev->fence_drv.last_seq) {
|
|
|
- rdev->fence_drv.last_seq = seq;
|
|
|
- rdev->fence_drv.last_jiffies = jiffies;
|
|
|
- rdev->fence_drv.last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
|
|
|
+ seq = radeon_fence_read(rdev, ring);
|
|
|
+ if (seq != rdev->fence_drv[ring].last_seq) {
|
|
|
+ rdev->fence_drv[ring].last_seq = seq;
|
|
|
+ rdev->fence_drv[ring].last_jiffies = jiffies;
|
|
|
+ rdev->fence_drv[ring].last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
|
|
|
} else {
|
|
|
cjiffies = jiffies;
|
|
|
- if (time_after(cjiffies, rdev->fence_drv.last_jiffies)) {
|
|
|
- cjiffies -= rdev->fence_drv.last_jiffies;
|
|
|
- if (time_after(rdev->fence_drv.last_timeout, cjiffies)) {
|
|
|
+ if (time_after(cjiffies, rdev->fence_drv[ring].last_jiffies)) {
|
|
|
+ cjiffies -= rdev->fence_drv[ring].last_jiffies;
|
|
|
+ if (time_after(rdev->fence_drv[ring].last_timeout, cjiffies)) {
|
|
|
/* update the timeout */
|
|
|
- rdev->fence_drv.last_timeout -= cjiffies;
|
|
|
+ rdev->fence_drv[ring].last_timeout -= cjiffies;
|
|
|
} else {
|
|
|
/* the 500ms timeout is elapsed we should test
|
|
|
* for GPU lockup
|
|
|
*/
|
|
|
- rdev->fence_drv.last_timeout = 1;
|
|
|
+ rdev->fence_drv[ring].last_timeout = 1;
|
|
|
}
|
|
|
} else {
|
|
|
/* wrap around update last jiffies, we will just wait
|
|
|
* a little longer
|
|
|
*/
|
|
|
- rdev->fence_drv.last_jiffies = cjiffies;
|
|
|
+ rdev->fence_drv[ring].last_jiffies = cjiffies;
|
|
|
}
|
|
|
return false;
|
|
|
}
|
|
|
n = NULL;
|
|
|
- list_for_each(i, &rdev->fence_drv.emitted) {
|
|
|
+ list_for_each(i, &rdev->fence_drv[ring].emitted) {
|
|
|
fence = list_entry(i, struct radeon_fence, list);
|
|
|
if (fence->seq == seq) {
|
|
|
n = i;
|
|
@@ -141,11 +146,11 @@ static bool radeon_fence_poll_locked(struct radeon_device *rdev)
|
|
|
i = n;
|
|
|
do {
|
|
|
n = i->prev;
|
|
|
- list_move_tail(i, &rdev->fence_drv.signaled);
|
|
|
+ list_move_tail(i, &rdev->fence_drv[ring].signaled);
|
|
|
fence = list_entry(i, struct radeon_fence, list);
|
|
|
fence->signaled = true;
|
|
|
i = n;
|
|
|
- } while (i != &rdev->fence_drv.emitted);
|
|
|
+ } while (i != &rdev->fence_drv[ring].emitted);
|
|
|
wake = true;
|
|
|
}
|
|
|
return wake;
|
|
@@ -157,14 +162,16 @@ static void radeon_fence_destroy(struct kref *kref)
|
|
|
struct radeon_fence *fence;
|
|
|
|
|
|
fence = container_of(kref, struct radeon_fence, kref);
|
|
|
- write_lock_irqsave(&fence->rdev->fence_drv.lock, irq_flags);
|
|
|
+ write_lock_irqsave(&fence->rdev->fence_lock, irq_flags);
|
|
|
list_del(&fence->list);
|
|
|
fence->emitted = false;
|
|
|
- write_unlock_irqrestore(&fence->rdev->fence_drv.lock, irq_flags);
|
|
|
+ write_unlock_irqrestore(&fence->rdev->fence_lock, irq_flags);
|
|
|
kfree(fence);
|
|
|
}
|
|
|
|
|
|
-int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence)
|
|
|
+int radeon_fence_create(struct radeon_device *rdev,
|
|
|
+ struct radeon_fence **fence,
|
|
|
+ int ring)
|
|
|
{
|
|
|
unsigned long irq_flags;
|
|
|
|
|
@@ -177,15 +184,15 @@ int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence)
|
|
|
(*fence)->emitted = false;
|
|
|
(*fence)->signaled = false;
|
|
|
(*fence)->seq = 0;
|
|
|
+ (*fence)->ring = ring;
|
|
|
INIT_LIST_HEAD(&(*fence)->list);
|
|
|
|
|
|
- write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
|
|
|
- list_add_tail(&(*fence)->list, &rdev->fence_drv.created);
|
|
|
- write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
|
|
|
+ write_lock_irqsave(&rdev->fence_lock, irq_flags);
|
|
|
+ list_add_tail(&(*fence)->list, &rdev->fence_drv[ring].created);
|
|
|
+ write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-
|
|
|
bool radeon_fence_signaled(struct radeon_fence *fence)
|
|
|
{
|
|
|
unsigned long irq_flags;
|
|
@@ -197,7 +204,7 @@ bool radeon_fence_signaled(struct radeon_fence *fence)
|
|
|
if (fence->rdev->gpu_lockup)
|
|
|
return true;
|
|
|
|
|
|
- write_lock_irqsave(&fence->rdev->fence_drv.lock, irq_flags);
|
|
|
+ write_lock_irqsave(&fence->rdev->fence_lock, irq_flags);
|
|
|
signaled = fence->signaled;
|
|
|
/* if we are shuting down report all fence as signaled */
|
|
|
if (fence->rdev->shutdown) {
|
|
@@ -208,10 +215,10 @@ bool radeon_fence_signaled(struct radeon_fence *fence)
|
|
|
signaled = true;
|
|
|
}
|
|
|
if (!signaled) {
|
|
|
- radeon_fence_poll_locked(fence->rdev);
|
|
|
+ radeon_fence_poll_locked(fence->rdev, fence->ring);
|
|
|
signaled = fence->signaled;
|
|
|
}
|
|
|
- write_unlock_irqrestore(&fence->rdev->fence_drv.lock, irq_flags);
|
|
|
+ write_unlock_irqrestore(&fence->rdev->fence_lock, irq_flags);
|
|
|
return signaled;
|
|
|
}
|
|
|
|
|
@@ -230,14 +237,14 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr)
|
|
|
if (radeon_fence_signaled(fence)) {
|
|
|
return 0;
|
|
|
}
|
|
|
- timeout = rdev->fence_drv.last_timeout;
|
|
|
+ timeout = rdev->fence_drv[fence->ring].last_timeout;
|
|
|
retry:
|
|
|
/* save current sequence used to check for GPU lockup */
|
|
|
- seq = rdev->fence_drv.last_seq;
|
|
|
+ seq = rdev->fence_drv[fence->ring].last_seq;
|
|
|
trace_radeon_fence_wait_begin(rdev->ddev, seq);
|
|
|
if (intr) {
|
|
|
radeon_irq_kms_sw_irq_get(rdev);
|
|
|
- r = wait_event_interruptible_timeout(rdev->fence_drv.queue,
|
|
|
+ r = wait_event_interruptible_timeout(rdev->fence_drv[fence->ring].queue,
|
|
|
radeon_fence_signaled(fence), timeout);
|
|
|
radeon_irq_kms_sw_irq_put(rdev);
|
|
|
if (unlikely(r < 0)) {
|
|
@@ -245,7 +252,7 @@ retry:
|
|
|
}
|
|
|
} else {
|
|
|
radeon_irq_kms_sw_irq_get(rdev);
|
|
|
- r = wait_event_timeout(rdev->fence_drv.queue,
|
|
|
+ r = wait_event_timeout(rdev->fence_drv[fence->ring].queue,
|
|
|
radeon_fence_signaled(fence), timeout);
|
|
|
radeon_irq_kms_sw_irq_put(rdev);
|
|
|
}
|
|
@@ -258,10 +265,11 @@ retry:
|
|
|
timeout = r;
|
|
|
goto retry;
|
|
|
}
|
|
|
- /* don't protect read access to rdev->fence_drv.last_seq
|
|
|
+ /* don't protect read access to rdev->fence_drv[t].last_seq
|
|
|
* if we experiencing a lockup the value doesn't change
|
|
|
*/
|
|
|
- if (seq == rdev->fence_drv.last_seq && radeon_gpu_is_lockup(rdev)) {
|
|
|
+ if (seq == rdev->fence_drv[fence->ring].last_seq &&
|
|
|
+ radeon_gpu_is_lockup(rdev)) {
|
|
|
/* good news we believe it's a lockup */
|
|
|
printk(KERN_WARNING "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n",
|
|
|
fence->seq, seq);
|
|
@@ -272,20 +280,20 @@ retry:
|
|
|
r = radeon_gpu_reset(rdev);
|
|
|
if (r)
|
|
|
return r;
|
|
|
- radeon_fence_write(rdev, fence->seq);
|
|
|
+ radeon_fence_write(rdev, fence->seq, fence->ring);
|
|
|
rdev->gpu_lockup = false;
|
|
|
}
|
|
|
timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
|
|
|
- write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
|
|
|
- rdev->fence_drv.last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
|
|
|
- rdev->fence_drv.last_jiffies = jiffies;
|
|
|
- write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
|
|
|
+ write_lock_irqsave(&rdev->fence_lock, irq_flags);
|
|
|
+ rdev->fence_drv[fence->ring].last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
|
|
|
+ rdev->fence_drv[fence->ring].last_jiffies = jiffies;
|
|
|
+ write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
|
|
|
goto retry;
|
|
|
}
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-int radeon_fence_wait_next(struct radeon_device *rdev)
|
|
|
+int radeon_fence_wait_next(struct radeon_device *rdev, int ring)
|
|
|
{
|
|
|
unsigned long irq_flags;
|
|
|
struct radeon_fence *fence;
|
|
@@ -294,21 +302,21 @@ int radeon_fence_wait_next(struct radeon_device *rdev)
|
|
|
if (rdev->gpu_lockup) {
|
|
|
return 0;
|
|
|
}
|
|
|
- write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
|
|
|
- if (list_empty(&rdev->fence_drv.emitted)) {
|
|
|
- write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
|
|
|
+ write_lock_irqsave(&rdev->fence_lock, irq_flags);
|
|
|
+ if (list_empty(&rdev->fence_drv[ring].emitted)) {
|
|
|
+ write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
|
|
|
return 0;
|
|
|
}
|
|
|
- fence = list_entry(rdev->fence_drv.emitted.next,
|
|
|
+ fence = list_entry(rdev->fence_drv[ring].emitted.next,
|
|
|
struct radeon_fence, list);
|
|
|
radeon_fence_ref(fence);
|
|
|
- write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
|
|
|
+ write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
|
|
|
r = radeon_fence_wait(fence, false);
|
|
|
radeon_fence_unref(&fence);
|
|
|
return r;
|
|
|
}
|
|
|
|
|
|
-int radeon_fence_wait_last(struct radeon_device *rdev)
|
|
|
+int radeon_fence_wait_last(struct radeon_device *rdev, int ring)
|
|
|
{
|
|
|
unsigned long irq_flags;
|
|
|
struct radeon_fence *fence;
|
|
@@ -317,15 +325,15 @@ int radeon_fence_wait_last(struct radeon_device *rdev)
|
|
|
if (rdev->gpu_lockup) {
|
|
|
return 0;
|
|
|
}
|
|
|
- write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
|
|
|
- if (list_empty(&rdev->fence_drv.emitted)) {
|
|
|
- write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
|
|
|
+ write_lock_irqsave(&rdev->fence_lock, irq_flags);
|
|
|
+ if (list_empty(&rdev->fence_drv[ring].emitted)) {
|
|
|
+ write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
|
|
|
return 0;
|
|
|
}
|
|
|
- fence = list_entry(rdev->fence_drv.emitted.prev,
|
|
|
+ fence = list_entry(rdev->fence_drv[ring].emitted.prev,
|
|
|
struct radeon_fence, list);
|
|
|
radeon_fence_ref(fence);
|
|
|
- write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
|
|
|
+ write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
|
|
|
r = radeon_fence_wait(fence, false);
|
|
|
radeon_fence_unref(&fence);
|
|
|
return r;
|
|
@@ -347,39 +355,49 @@ void radeon_fence_unref(struct radeon_fence **fence)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-void radeon_fence_process(struct radeon_device *rdev)
|
|
|
+void radeon_fence_process(struct radeon_device *rdev, int ring)
|
|
|
{
|
|
|
unsigned long irq_flags;
|
|
|
bool wake;
|
|
|
|
|
|
- write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
|
|
|
- wake = radeon_fence_poll_locked(rdev);
|
|
|
- write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
|
|
|
+ write_lock_irqsave(&rdev->fence_lock, irq_flags);
|
|
|
+ wake = radeon_fence_poll_locked(rdev, ring);
|
|
|
+ write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
|
|
|
if (wake) {
|
|
|
- wake_up_all(&rdev->fence_drv.queue);
|
|
|
+ wake_up_all(&rdev->fence_drv[ring].queue);
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-int radeon_fence_driver_init(struct radeon_device *rdev)
|
|
|
+int radeon_fence_driver_init(struct radeon_device *rdev, int num_rings)
|
|
|
{
|
|
|
unsigned long irq_flags;
|
|
|
- int r;
|
|
|
+ int r, ring;
|
|
|
|
|
|
- write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
|
|
|
- r = radeon_scratch_get(rdev, &rdev->fence_drv.scratch_reg);
|
|
|
- if (r) {
|
|
|
- dev_err(rdev->dev, "fence failed to get scratch register\n");
|
|
|
- write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
|
|
|
- return r;
|
|
|
+ for (ring = 0; ring < num_rings; ring++) {
|
|
|
+ write_lock_irqsave(&rdev->fence_lock, irq_flags);
|
|
|
+ r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg);
|
|
|
+ if (r) {
|
|
|
+ dev_err(rdev->dev, "fence failed to get scratch register\n");
|
|
|
+ write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
|
|
|
+ return r;
|
|
|
+ }
|
|
|
+ radeon_fence_write(rdev, 0, ring);
|
|
|
+ atomic_set(&rdev->fence_drv[ring].seq, 0);
|
|
|
+ INIT_LIST_HEAD(&rdev->fence_drv[ring].created);
|
|
|
+ INIT_LIST_HEAD(&rdev->fence_drv[ring].emitted);
|
|
|
+ INIT_LIST_HEAD(&rdev->fence_drv[ring].signaled);
|
|
|
+ init_waitqueue_head(&rdev->fence_drv[ring].queue);
|
|
|
+ rdev->fence_drv[ring].initialized = true;
|
|
|
+ write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
|
|
|
+ }
|
|
|
+ for (ring = num_rings; ring < RADEON_NUM_RINGS; ring++) {
|
|
|
+ write_lock_irqsave(&rdev->fence_lock, irq_flags);
|
|
|
+ INIT_LIST_HEAD(&rdev->fence_drv[ring].created);
|
|
|
+ INIT_LIST_HEAD(&rdev->fence_drv[ring].emitted);
|
|
|
+ INIT_LIST_HEAD(&rdev->fence_drv[ring].signaled);
|
|
|
+ rdev->fence_drv[ring].initialized = false;
|
|
|
+ write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
|
|
|
}
|
|
|
- radeon_fence_write(rdev, 0);
|
|
|
- atomic_set(&rdev->fence_drv.seq, 0);
|
|
|
- INIT_LIST_HEAD(&rdev->fence_drv.created);
|
|
|
- INIT_LIST_HEAD(&rdev->fence_drv.emitted);
|
|
|
- INIT_LIST_HEAD(&rdev->fence_drv.signaled);
|
|
|
- init_waitqueue_head(&rdev->fence_drv.queue);
|
|
|
- rdev->fence_drv.initialized = true;
|
|
|
- write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
|
|
|
if (radeon_debugfs_fence_init(rdev)) {
|
|
|
dev_err(rdev->dev, "fence debugfs file creation failed\n");
|
|
|
}
|
|
@@ -389,14 +407,17 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
|
|
|
void radeon_fence_driver_fini(struct radeon_device *rdev)
|
|
|
{
|
|
|
unsigned long irq_flags;
|
|
|
-
|
|
|
- if (!rdev->fence_drv.initialized)
|
|
|
- return;
|
|
|
- wake_up_all(&rdev->fence_drv.queue);
|
|
|
- write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
|
|
|
- radeon_scratch_free(rdev, rdev->fence_drv.scratch_reg);
|
|
|
- write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
|
|
|
- rdev->fence_drv.initialized = false;
|
|
|
+ int ring;
|
|
|
+
|
|
|
+ for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
|
|
|
+ if (!rdev->fence_drv[ring].initialized)
|
|
|
+ continue;
|
|
|
+ wake_up_all(&rdev->fence_drv[ring].queue);
|
|
|
+ write_lock_irqsave(&rdev->fence_lock, irq_flags);
|
|
|
+ radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
|
|
|
+ write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
|
|
|
+ rdev->fence_drv[ring].initialized = false;
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
|
|
@@ -410,14 +431,21 @@ static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
|
|
|
struct drm_device *dev = node->minor->dev;
|
|
|
struct radeon_device *rdev = dev->dev_private;
|
|
|
struct radeon_fence *fence;
|
|
|
-
|
|
|
- seq_printf(m, "Last signaled fence 0x%08X\n",
|
|
|
- radeon_fence_read(rdev));
|
|
|
- if (!list_empty(&rdev->fence_drv.emitted)) {
|
|
|
- fence = list_entry(rdev->fence_drv.emitted.prev,
|
|
|
- struct radeon_fence, list);
|
|
|
- seq_printf(m, "Last emitted fence %p with 0x%08X\n",
|
|
|
- fence, fence->seq);
|
|
|
+ int i;
|
|
|
+
|
|
|
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
|
|
|
+ if (!rdev->fence_drv[i].initialized)
|
|
|
+ continue;
|
|
|
+
|
|
|
+ seq_printf(m, "--- ring %d ---\n", i);
|
|
|
+ seq_printf(m, "Last signaled fence 0x%08X\n",
|
|
|
+ radeon_fence_read(rdev, i));
|
|
|
+ if (!list_empty(&rdev->fence_drv[i].emitted)) {
|
|
|
+ fence = list_entry(rdev->fence_drv[i].emitted.prev,
|
|
|
+ struct radeon_fence, list);
|
|
|
+ seq_printf(m, "Last emitted fence %p with 0x%08X\n",
|
|
|
+ fence, fence->seq);
|
|
|
+ }
|
|
|
}
|
|
|
return 0;
|
|
|
}
|