|
@@ -336,6 +336,7 @@ semaphore_acquire(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
|
|
|
{
|
|
|
struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
|
|
|
struct nouveau_fence *fence = NULL;
|
|
|
+ u64 offset = chan->fence.vma.offset + sema->mem->start;
|
|
|
int ret;
|
|
|
|
|
|
if (dev_priv->chipset < 0x84) {
|
|
@@ -345,13 +346,10 @@ semaphore_acquire(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
|
|
|
|
|
|
BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 3);
|
|
|
OUT_RING (chan, NvSema);
|
|
|
- OUT_RING (chan, sema->mem->start);
|
|
|
+ OUT_RING (chan, offset);
|
|
|
OUT_RING (chan, 1);
|
|
|
} else
|
|
|
if (dev_priv->chipset < 0xc0) {
|
|
|
- struct nouveau_vma *vma = &dev_priv->fence.bo->vma;
|
|
|
- u64 offset = vma->offset + sema->mem->start;
|
|
|
-
|
|
|
ret = RING_SPACE(chan, 7);
|
|
|
if (ret)
|
|
|
return ret;
|
|
@@ -364,9 +362,6 @@ semaphore_acquire(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
|
|
|
OUT_RING (chan, 1);
|
|
|
OUT_RING (chan, 1); /* ACQUIRE_EQ */
|
|
|
} else {
|
|
|
- struct nouveau_vma *vma = &dev_priv->fence.bo->vma;
|
|
|
- u64 offset = vma->offset + sema->mem->start;
|
|
|
-
|
|
|
ret = RING_SPACE(chan, 5);
|
|
|
if (ret)
|
|
|
return ret;
|
|
@@ -394,6 +389,7 @@ semaphore_release(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
|
|
|
{
|
|
|
struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
|
|
|
struct nouveau_fence *fence = NULL;
|
|
|
+ u64 offset = chan->fence.vma.offset + sema->mem->start;
|
|
|
int ret;
|
|
|
|
|
|
if (dev_priv->chipset < 0x84) {
|
|
@@ -403,14 +399,11 @@ semaphore_release(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
|
|
|
|
|
|
BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 2);
|
|
|
OUT_RING (chan, NvSema);
|
|
|
- OUT_RING (chan, sema->mem->start);
|
|
|
+ OUT_RING (chan, offset);
|
|
|
BEGIN_RING(chan, NvSubSw, NV_SW_SEMAPHORE_RELEASE, 1);
|
|
|
OUT_RING (chan, 1);
|
|
|
} else
|
|
|
if (dev_priv->chipset < 0xc0) {
|
|
|
- struct nouveau_vma *vma = &dev_priv->fence.bo->vma;
|
|
|
- u64 offset = vma->offset + sema->mem->start;
|
|
|
-
|
|
|
ret = RING_SPACE(chan, 7);
|
|
|
if (ret)
|
|
|
return ret;
|
|
@@ -423,9 +416,6 @@ semaphore_release(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
|
|
|
OUT_RING (chan, 1);
|
|
|
OUT_RING (chan, 2); /* RELEASE */
|
|
|
} else {
|
|
|
- struct nouveau_vma *vma = &dev_priv->fence.bo->vma;
|
|
|
- u64 offset = vma->offset + sema->mem->start;
|
|
|
-
|
|
|
ret = RING_SPACE(chan, 5);
|
|
|
if (ret)
|
|
|
return ret;
|
|
@@ -540,6 +530,12 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
|
|
|
nouveau_gpuobj_ref(NULL, &obj);
|
|
|
if (ret)
|
|
|
return ret;
|
|
|
+ } else {
|
|
|
+ /* map fence bo into channel's vm */
|
|
|
+ ret = nouveau_bo_vma_add(dev_priv->fence.bo, chan->vm,
|
|
|
+ &chan->fence.vma);
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
}
|
|
|
|
|
|
INIT_LIST_HEAD(&chan->fence.pending);
|
|
@@ -551,10 +547,10 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
|
|
|
void
|
|
|
nouveau_fence_channel_fini(struct nouveau_channel *chan)
|
|
|
{
|
|
|
+ struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
|
|
|
struct nouveau_fence *tmp, *fence;
|
|
|
|
|
|
spin_lock(&chan->fence.lock);
|
|
|
-
|
|
|
list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) {
|
|
|
fence->signalled = true;
|
|
|
list_del(&fence->entry);
|
|
@@ -564,8 +560,9 @@ nouveau_fence_channel_fini(struct nouveau_channel *chan)
|
|
|
|
|
|
kref_put(&fence->refcount, nouveau_fence_del);
|
|
|
}
|
|
|
-
|
|
|
spin_unlock(&chan->fence.lock);
|
|
|
+
|
|
|
+ nouveau_bo_vma_del(dev_priv->fence.bo, &chan->fence.vma);
|
|
|
}
|
|
|
|
|
|
int
|