|
@@ -581,17 +581,18 @@ int cayman_vm_init(struct radeon_device *rdev);
|
|
|
void cayman_vm_fini(struct radeon_device *rdev);
|
|
|
void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
|
|
|
uint32_t cayman_vm_page_flags(struct radeon_device *rdev, uint32_t flags);
|
|
|
-void cayman_vm_set_page(struct radeon_device *rdev,
|
|
|
- struct radeon_ib *ib,
|
|
|
- uint64_t pe,
|
|
|
- uint64_t addr, unsigned count,
|
|
|
- uint32_t incr, uint32_t flags);
|
|
|
int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
|
|
|
int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
|
|
|
void cayman_dma_ring_ib_execute(struct radeon_device *rdev,
|
|
|
struct radeon_ib *ib);
|
|
|
bool cayman_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
|
|
|
bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
|
|
|
+void cayman_dma_vm_set_page(struct radeon_device *rdev,
|
|
|
+ struct radeon_ib *ib,
|
|
|
+ uint64_t pe,
|
|
|
+ uint64_t addr, unsigned count,
|
|
|
+ uint32_t incr, uint32_t flags);
|
|
|
+
|
|
|
void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
|
|
|
|
|
|
int ni_dpm_init(struct radeon_device *rdev);
|
|
@@ -653,17 +654,17 @@ int si_irq_set(struct radeon_device *rdev);
|
|
|
int si_irq_process(struct radeon_device *rdev);
|
|
|
int si_vm_init(struct radeon_device *rdev);
|
|
|
void si_vm_fini(struct radeon_device *rdev);
|
|
|
-void si_vm_set_page(struct radeon_device *rdev,
|
|
|
- struct radeon_ib *ib,
|
|
|
- uint64_t pe,
|
|
|
- uint64_t addr, unsigned count,
|
|
|
- uint32_t incr, uint32_t flags);
|
|
|
void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
|
|
|
int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
|
|
|
int si_copy_dma(struct radeon_device *rdev,
|
|
|
uint64_t src_offset, uint64_t dst_offset,
|
|
|
unsigned num_gpu_pages,
|
|
|
struct radeon_fence **fence);
|
|
|
+void si_dma_vm_set_page(struct radeon_device *rdev,
|
|
|
+ struct radeon_ib *ib,
|
|
|
+ uint64_t pe,
|
|
|
+ uint64_t addr, unsigned count,
|
|
|
+ uint32_t incr, uint32_t flags);
|
|
|
void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
|
|
|
u32 si_get_xclk(struct radeon_device *rdev);
|
|
|
uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev);
|
|
@@ -735,11 +736,11 @@ int cik_irq_process(struct radeon_device *rdev);
|
|
|
int cik_vm_init(struct radeon_device *rdev);
|
|
|
void cik_vm_fini(struct radeon_device *rdev);
|
|
|
void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
|
|
|
-void cik_vm_set_page(struct radeon_device *rdev,
|
|
|
- struct radeon_ib *ib,
|
|
|
- uint64_t pe,
|
|
|
- uint64_t addr, unsigned count,
|
|
|
- uint32_t incr, uint32_t flags);
|
|
|
+void cik_sdma_vm_set_page(struct radeon_device *rdev,
|
|
|
+ struct radeon_ib *ib,
|
|
|
+ uint64_t pe,
|
|
|
+ uint64_t addr, unsigned count,
|
|
|
+ uint32_t incr, uint32_t flags);
|
|
|
void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
|
|
|
int cik_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
|
|
|
u32 cik_compute_ring_get_rptr(struct radeon_device *rdev,
|