|
@@ -49,27 +49,27 @@ int radeon_gart_table_ram_alloc(struct radeon_device *rdev)
|
|
rdev->gart.table_size >> PAGE_SHIFT);
|
|
rdev->gart.table_size >> PAGE_SHIFT);
|
|
}
|
|
}
|
|
#endif
|
|
#endif
|
|
- rdev->gart.table.ram.ptr = ptr;
|
|
|
|
- memset((void *)rdev->gart.table.ram.ptr, 0, rdev->gart.table_size);
|
|
|
|
|
|
+ rdev->gart.ptr = ptr;
|
|
|
|
+ memset((void *)rdev->gart.ptr, 0, rdev->gart.table_size);
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
void radeon_gart_table_ram_free(struct radeon_device *rdev)
|
|
void radeon_gart_table_ram_free(struct radeon_device *rdev)
|
|
{
|
|
{
|
|
- if (rdev->gart.table.ram.ptr == NULL) {
|
|
|
|
|
|
+ if (rdev->gart.ptr == NULL) {
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
#ifdef CONFIG_X86
|
|
#ifdef CONFIG_X86
|
|
if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480 ||
|
|
if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480 ||
|
|
rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
|
|
rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
|
|
- set_memory_wb((unsigned long)rdev->gart.table.ram.ptr,
|
|
|
|
|
|
+ set_memory_wb((unsigned long)rdev->gart.ptr,
|
|
rdev->gart.table_size >> PAGE_SHIFT);
|
|
rdev->gart.table_size >> PAGE_SHIFT);
|
|
}
|
|
}
|
|
#endif
|
|
#endif
|
|
pci_free_consistent(rdev->pdev, rdev->gart.table_size,
|
|
pci_free_consistent(rdev->pdev, rdev->gart.table_size,
|
|
- (void *)rdev->gart.table.ram.ptr,
|
|
|
|
|
|
+ (void *)rdev->gart.ptr,
|
|
rdev->gart.table_addr);
|
|
rdev->gart.table_addr);
|
|
- rdev->gart.table.ram.ptr = NULL;
|
|
|
|
|
|
+ rdev->gart.ptr = NULL;
|
|
rdev->gart.table_addr = 0;
|
|
rdev->gart.table_addr = 0;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -77,10 +77,10 @@ int radeon_gart_table_vram_alloc(struct radeon_device *rdev)
|
|
{
|
|
{
|
|
int r;
|
|
int r;
|
|
|
|
|
|
- if (rdev->gart.table.vram.robj == NULL) {
|
|
|
|
|
|
+ if (rdev->gart.robj == NULL) {
|
|
r = radeon_bo_create(rdev, rdev->gart.table_size,
|
|
r = radeon_bo_create(rdev, rdev->gart.table_size,
|
|
PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
|
|
PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
|
|
- &rdev->gart.table.vram.robj);
|
|
|
|
|
|
+ &rdev->gart.robj);
|
|
if (r) {
|
|
if (r) {
|
|
return r;
|
|
return r;
|
|
}
|
|
}
|
|
@@ -93,38 +93,46 @@ int radeon_gart_table_vram_pin(struct radeon_device *rdev)
|
|
uint64_t gpu_addr;
|
|
uint64_t gpu_addr;
|
|
int r;
|
|
int r;
|
|
|
|
|
|
- r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
|
|
|
|
|
|
+ r = radeon_bo_reserve(rdev->gart.robj, false);
|
|
if (unlikely(r != 0))
|
|
if (unlikely(r != 0))
|
|
return r;
|
|
return r;
|
|
- r = radeon_bo_pin(rdev->gart.table.vram.robj,
|
|
|
|
|
|
+ r = radeon_bo_pin(rdev->gart.robj,
|
|
RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
|
|
RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
|
|
if (r) {
|
|
if (r) {
|
|
- radeon_bo_unreserve(rdev->gart.table.vram.robj);
|
|
|
|
|
|
+ radeon_bo_unreserve(rdev->gart.robj);
|
|
return r;
|
|
return r;
|
|
}
|
|
}
|
|
- r = radeon_bo_kmap(rdev->gart.table.vram.robj,
|
|
|
|
- (void **)&rdev->gart.table.vram.ptr);
|
|
|
|
|
|
+ r = radeon_bo_kmap(rdev->gart.robj, &rdev->gart.ptr);
|
|
if (r)
|
|
if (r)
|
|
- radeon_bo_unpin(rdev->gart.table.vram.robj);
|
|
|
|
- radeon_bo_unreserve(rdev->gart.table.vram.robj);
|
|
|
|
|
|
+ radeon_bo_unpin(rdev->gart.robj);
|
|
|
|
+ radeon_bo_unreserve(rdev->gart.robj);
|
|
rdev->gart.table_addr = gpu_addr;
|
|
rdev->gart.table_addr = gpu_addr;
|
|
return r;
|
|
return r;
|
|
}
|
|
}
|
|
|
|
|
|
-void radeon_gart_table_vram_free(struct radeon_device *rdev)
|
|
|
|
|
|
+void radeon_gart_table_vram_unpin(struct radeon_device *rdev)
|
|
{
|
|
{
|
|
int r;
|
|
int r;
|
|
|
|
|
|
- if (rdev->gart.table.vram.robj == NULL) {
|
|
|
|
|
|
+ if (rdev->gart.robj == NULL) {
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
- r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
|
|
|
|
|
|
+ r = radeon_bo_reserve(rdev->gart.robj, false);
|
|
if (likely(r == 0)) {
|
|
if (likely(r == 0)) {
|
|
- radeon_bo_kunmap(rdev->gart.table.vram.robj);
|
|
|
|
- radeon_bo_unpin(rdev->gart.table.vram.robj);
|
|
|
|
- radeon_bo_unreserve(rdev->gart.table.vram.robj);
|
|
|
|
|
|
+ radeon_bo_kunmap(rdev->gart.robj);
|
|
|
|
+ radeon_bo_unpin(rdev->gart.robj);
|
|
|
|
+ radeon_bo_unreserve(rdev->gart.robj);
|
|
|
|
+ rdev->gart.ptr = NULL;
|
|
}
|
|
}
|
|
- radeon_bo_unref(&rdev->gart.table.vram.robj);
|
|
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void radeon_gart_table_vram_free(struct radeon_device *rdev)
|
|
|
|
+{
|
|
|
|
+ if (rdev->gart.robj == NULL) {
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+ radeon_gart_table_vram_unpin(rdev);
|
|
|
|
+ radeon_bo_unref(&rdev->gart.robj);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
|
|
@@ -151,12 +159,14 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
|
|
if (rdev->gart.pages[p]) {
|
|
if (rdev->gart.pages[p]) {
|
|
if (!rdev->gart.ttm_alloced[p])
|
|
if (!rdev->gart.ttm_alloced[p])
|
|
pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p],
|
|
pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p],
|
|
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
|
|
|
|
|
|
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
|
|
rdev->gart.pages[p] = NULL;
|
|
rdev->gart.pages[p] = NULL;
|
|
rdev->gart.pages_addr[p] = rdev->dummy_page.addr;
|
|
rdev->gart.pages_addr[p] = rdev->dummy_page.addr;
|
|
page_base = rdev->gart.pages_addr[p];
|
|
page_base = rdev->gart.pages_addr[p];
|
|
for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
|
|
for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
|
|
- radeon_gart_set_page(rdev, t, page_base);
|
|
|
|
|
|
+ if (rdev->gart.ptr) {
|
|
|
|
+ radeon_gart_set_page(rdev, t, page_base);
|
|
|
|
+ }
|
|
page_base += RADEON_GPU_PAGE_SIZE;
|
|
page_base += RADEON_GPU_PAGE_SIZE;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
@@ -199,10 +209,12 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
|
|
}
|
|
}
|
|
}
|
|
}
|
|
rdev->gart.pages[p] = pagelist[i];
|
|
rdev->gart.pages[p] = pagelist[i];
|
|
- page_base = rdev->gart.pages_addr[p];
|
|
|
|
- for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
|
|
|
|
- radeon_gart_set_page(rdev, t, page_base);
|
|
|
|
- page_base += RADEON_GPU_PAGE_SIZE;
|
|
|
|
|
|
+ if (rdev->gart.ptr) {
|
|
|
|
+ page_base = rdev->gart.pages_addr[p];
|
|
|
|
+ for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
|
|
|
|
+ radeon_gart_set_page(rdev, t, page_base);
|
|
|
|
+ page_base += RADEON_GPU_PAGE_SIZE;
|
|
|
|
+ }
|
|
}
|
|
}
|
|
}
|
|
}
|
|
mb();
|
|
mb();
|
|
@@ -215,6 +227,9 @@ void radeon_gart_restore(struct radeon_device *rdev)
|
|
int i, j, t;
|
|
int i, j, t;
|
|
u64 page_base;
|
|
u64 page_base;
|
|
|
|
|
|
|
|
+ if (!rdev->gart.ptr) {
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
for (i = 0, t = 0; i < rdev->gart.num_cpu_pages; i++) {
|
|
for (i = 0, t = 0; i < rdev->gart.num_cpu_pages; i++) {
|
|
page_base = rdev->gart.pages_addr[i];
|
|
page_base = rdev->gart.pages_addr[i];
|
|
for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
|
|
for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
|