|
@@ -931,3 +931,95 @@ static void cayman_gpu_init(struct radeon_device *rdev)
|
|
|
udelay(50);
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * GART
|
|
|
+ */
|
|
|
+void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev)
|
|
|
+{
|
|
|
+ /* flush hdp cache */
|
|
|
+ WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
|
|
|
+
|
|
|
+ /* bits 0-7 are the VM contexts0-7 */
|
|
|
+ WREG32(VM_INVALIDATE_REQUEST, 1);
|
|
|
+}
|
|
|
+
|
|
|
+int cayman_pcie_gart_enable(struct radeon_device *rdev)
|
|
|
+{
|
|
|
+ int r;
|
|
|
+
|
|
|
+ if (rdev->gart.table.vram.robj == NULL) {
|
|
|
+ dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+ r = radeon_gart_table_vram_pin(rdev);
|
|
|
+ if (r)
|
|
|
+ return r;
|
|
|
+ radeon_gart_restore(rdev);
|
|
|
+ /* Setup TLB control */
|
|
|
+ WREG32(MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB |
|
|
|
+ ENABLE_L1_FRAGMENT_PROCESSING |
|
|
|
+ SYSTEM_ACCESS_MODE_NOT_IN_SYS |
|
|
|
+ SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
|
|
|
+ /* Setup L2 cache */
|
|
|
+ WREG32(VM_L2_CNTL, ENABLE_L2_CACHE |
|
|
|
+ ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
|
|
|
+ ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
|
|
|
+ EFFECTIVE_L2_QUEUE_SIZE(7) |
|
|
|
+ CONTEXT1_IDENTITY_ACCESS_MODE(1));
|
|
|
+ WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE);
|
|
|
+ WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
|
|
|
+ L2_CACHE_BIGK_FRAGMENT_SIZE(6));
|
|
|
+ /* setup context0 */
|
|
|
+ WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
|
|
|
+ WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
|
|
|
+ WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
|
|
|
+ WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
|
|
|
+ (u32)(rdev->dummy_page.addr >> 12));
|
|
|
+ WREG32(VM_CONTEXT0_CNTL2, 0);
|
|
|
+ WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
|
|
|
+ RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
|
|
|
+ /* disable context1-7 */
|
|
|
+ WREG32(VM_CONTEXT1_CNTL2, 0);
|
|
|
+ WREG32(VM_CONTEXT1_CNTL, 0);
|
|
|
+
|
|
|
+ cayman_pcie_gart_tlb_flush(rdev);
|
|
|
+ rdev->gart.ready = true;
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+void cayman_pcie_gart_disable(struct radeon_device *rdev)
|
|
|
+{
|
|
|
+ int r;
|
|
|
+
|
|
|
+ /* Disable all tables */
|
|
|
+ WREG32(VM_CONTEXT0_CNTL, 0);
|
|
|
+ WREG32(VM_CONTEXT1_CNTL, 0);
|
|
|
+ /* Setup TLB control */
|
|
|
+ WREG32(MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING |
|
|
|
+ SYSTEM_ACCESS_MODE_NOT_IN_SYS |
|
|
|
+ SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
|
|
|
+ /* Setup L2 cache */
|
|
|
+ WREG32(VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
|
|
|
+ ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
|
|
|
+ EFFECTIVE_L2_QUEUE_SIZE(7) |
|
|
|
+ CONTEXT1_IDENTITY_ACCESS_MODE(1));
|
|
|
+ WREG32(VM_L2_CNTL2, 0);
|
|
|
+ WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
|
|
|
+ L2_CACHE_BIGK_FRAGMENT_SIZE(6));
|
|
|
+ if (rdev->gart.table.vram.robj) {
|
|
|
+ r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
|
|
|
+ if (likely(r == 0)) {
|
|
|
+ radeon_bo_kunmap(rdev->gart.table.vram.robj);
|
|
|
+ radeon_bo_unpin(rdev->gart.table.vram.robj);
|
|
|
+ radeon_bo_unreserve(rdev->gart.table.vram.robj);
|
|
|
+ }
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+void cayman_pcie_gart_fini(struct radeon_device *rdev)
|
|
|
+{
|
|
|
+ cayman_pcie_gart_disable(rdev);
|
|
|
+ radeon_gart_table_vram_free(rdev);
|
|
|
+ radeon_gart_fini(rdev);
|
|
|
+}
|
|
|
+
|