|
@@ -1009,6 +1009,27 @@ void cayman_pcie_gart_fini(struct radeon_device *rdev)
|
|
|
/*
|
|
|
* CP.
|
|
|
*/
|
|
|
+void cayman_fence_ring_emit(struct radeon_device *rdev,
|
|
|
+ struct radeon_fence *fence)
|
|
|
+{
|
|
|
+ struct radeon_ring *ring = &rdev->ring[fence->ring];
|
|
|
+ u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
|
|
|
+
|
|
|
+ /* flush read cache over gart */
|
|
|
+ radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
|
|
|
+ radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | PACKET3_SH_ACTION_ENA);
|
|
|
+ radeon_ring_write(ring, 0xFFFFFFFF);
|
|
|
+ radeon_ring_write(ring, 0);
|
|
|
+ radeon_ring_write(ring, 10); /* poll interval */
|
|
|
+ /* EVENT_WRITE_EOP - flush caches, send int */
|
|
|
+ radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
|
|
|
+ radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
|
|
|
+ radeon_ring_write(ring, addr & 0xffffffff);
|
|
|
+ radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
|
|
|
+ radeon_ring_write(ring, fence->seq);
|
|
|
+ radeon_ring_write(ring, 0);
|
|
|
+}
|
|
|
+
|
|
|
static void cayman_cp_enable(struct radeon_device *rdev, bool enable)
|
|
|
{
|
|
|
if (enable)
|