|
@@ -14,7 +14,7 @@ struct nouveau_sgdma_be {
|
|
|
dma_addr_t *pages;
|
|
|
unsigned nr_pages;
|
|
|
|
|
|
- unsigned pte_start;
|
|
|
+ u64 offset;
|
|
|
bool bound;
|
|
|
};
|
|
|
|
|
@@ -74,18 +74,6 @@ nouveau_sgdma_clear(struct ttm_backend *be)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-static inline unsigned
|
|
|
-nouveau_sgdma_pte(struct drm_device *dev, uint64_t offset)
|
|
|
-{
|
|
|
- struct drm_nouveau_private *dev_priv = dev->dev_private;
|
|
|
- unsigned pte = (offset >> NV_CTXDMA_PAGE_SHIFT);
|
|
|
-
|
|
|
- if (dev_priv->card_type < NV_50)
|
|
|
- return pte + 2;
|
|
|
-
|
|
|
- return pte << 1;
|
|
|
-}
|
|
|
-
|
|
|
static int
|
|
|
nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
|
|
|
{
|
|
@@ -97,32 +85,17 @@ nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
|
|
|
|
|
|
NV_DEBUG(dev, "pg=0x%lx\n", mem->start);
|
|
|
|
|
|
- pte = nouveau_sgdma_pte(nvbe->dev, mem->start << PAGE_SHIFT);
|
|
|
- nvbe->pte_start = pte;
|
|
|
+ nvbe->offset = mem->start << PAGE_SHIFT;
|
|
|
+ pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
|
|
|
for (i = 0; i < nvbe->nr_pages; i++) {
|
|
|
dma_addr_t dma_offset = nvbe->pages[i];
|
|
|
uint32_t offset_l = lower_32_bits(dma_offset);
|
|
|
- uint32_t offset_h = upper_32_bits(dma_offset);
|
|
|
-
|
|
|
- for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) {
|
|
|
- if (dev_priv->card_type < NV_50) {
|
|
|
- nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3);
|
|
|
- pte += 1;
|
|
|
- } else {
|
|
|
- nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 0x21);
|
|
|
- nv_wo32(gpuobj, (pte * 4) + 4, offset_h & 0xff);
|
|
|
- pte += 2;
|
|
|
- }
|
|
|
|
|
|
+ for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) {
|
|
|
+ nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3);
|
|
|
dma_offset += NV_CTXDMA_PAGE_SIZE;
|
|
|
}
|
|
|
}
|
|
|
- dev_priv->engine.instmem.flush(nvbe->dev);
|
|
|
-
|
|
|
- if (dev_priv->card_type == NV_50) {
|
|
|
- dev_priv->engine.fifo.tlb_flush(dev);
|
|
|
- dev_priv->engine.graph.tlb_flush(dev);
|
|
|
- }
|
|
|
|
|
|
nvbe->bound = true;
|
|
|
return 0;
|
|
@@ -142,24 +115,10 @@ nouveau_sgdma_unbind(struct ttm_backend *be)
|
|
|
if (!nvbe->bound)
|
|
|
return 0;
|
|
|
|
|
|
- pte = nvbe->pte_start;
|
|
|
+ pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
|
|
|
for (i = 0; i < nvbe->nr_pages; i++) {
|
|
|
- for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) {
|
|
|
- if (dev_priv->card_type < NV_50) {
|
|
|
- nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000);
|
|
|
- pte += 1;
|
|
|
- } else {
|
|
|
- nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000);
|
|
|
- nv_wo32(gpuobj, (pte * 4) + 4, 0x00000000);
|
|
|
- pte += 2;
|
|
|
- }
|
|
|
- }
|
|
|
- }
|
|
|
- dev_priv->engine.instmem.flush(nvbe->dev);
|
|
|
-
|
|
|
- if (dev_priv->card_type == NV_50) {
|
|
|
- dev_priv->engine.fifo.tlb_flush(dev);
|
|
|
- dev_priv->engine.graph.tlb_flush(dev);
|
|
|
+ for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++)
|
|
|
+ nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000);
|
|
|
}
|
|
|
|
|
|
nvbe->bound = false;
|
|
@@ -182,6 +141,35 @@ nouveau_sgdma_destroy(struct ttm_backend *be)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+static int
|
|
|
+nv50_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
|
|
|
+{
|
|
|
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
|
|
|
+ struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
|
|
|
+
|
|
|
+ nvbe->offset = mem->start << PAGE_SHIFT;
|
|
|
+
|
|
|
+ nouveau_vm_map_sg(&dev_priv->gart_info.vma, nvbe->offset,
|
|
|
+ nvbe->nr_pages << PAGE_SHIFT, nvbe->pages);
|
|
|
+ nvbe->bound = true;
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static int
|
|
|
+nv50_sgdma_unbind(struct ttm_backend *be)
|
|
|
+{
|
|
|
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
|
|
|
+ struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
|
|
|
+
|
|
|
+ if (!nvbe->bound)
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ nouveau_vm_unmap_at(&dev_priv->gart_info.vma, nvbe->offset,
|
|
|
+ nvbe->nr_pages << PAGE_SHIFT);
|
|
|
+ nvbe->bound = false;
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
static struct ttm_backend_func nouveau_sgdma_backend = {
|
|
|
.populate = nouveau_sgdma_populate,
|
|
|
.clear = nouveau_sgdma_clear,
|
|
@@ -190,23 +178,30 @@ static struct ttm_backend_func nouveau_sgdma_backend = {
|
|
|
.destroy = nouveau_sgdma_destroy
|
|
|
};
|
|
|
|
|
|
+static struct ttm_backend_func nv50_sgdma_backend = {
|
|
|
+ .populate = nouveau_sgdma_populate,
|
|
|
+ .clear = nouveau_sgdma_clear,
|
|
|
+ .bind = nv50_sgdma_bind,
|
|
|
+ .unbind = nv50_sgdma_unbind,
|
|
|
+ .destroy = nouveau_sgdma_destroy
|
|
|
+};
|
|
|
+
|
|
|
struct ttm_backend *
|
|
|
nouveau_sgdma_init_ttm(struct drm_device *dev)
|
|
|
{
|
|
|
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
|
|
struct nouveau_sgdma_be *nvbe;
|
|
|
|
|
|
- if (!dev_priv->gart_info.sg_ctxdma)
|
|
|
- return NULL;
|
|
|
-
|
|
|
nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
|
|
|
if (!nvbe)
|
|
|
return NULL;
|
|
|
|
|
|
nvbe->dev = dev;
|
|
|
|
|
|
- nvbe->backend.func = &nouveau_sgdma_backend;
|
|
|
-
|
|
|
+ if (dev_priv->card_type < NV_50)
|
|
|
+ nvbe->backend.func = &nouveau_sgdma_backend;
|
|
|
+ else
|
|
|
+ nvbe->backend.func = &nv50_sgdma_backend;
|
|
|
return &nvbe->backend;
|
|
|
}
|
|
|
|
|
@@ -226,21 +221,15 @@ nouveau_sgdma_init(struct drm_device *dev)
|
|
|
|
|
|
obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 4;
|
|
|
obj_size += 8; /* ctxdma header */
|
|
|
- } else {
|
|
|
- /* 1 entire VM page table */
|
|
|
- aper_size = (512 * 1024 * 1024);
|
|
|
- obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 8;
|
|
|
- }
|
|
|
|
|
|
- ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16,
|
|
|
- NVOBJ_FLAG_ZERO_ALLOC |
|
|
|
- NVOBJ_FLAG_ZERO_FREE, &gpuobj);
|
|
|
- if (ret) {
|
|
|
- NV_ERROR(dev, "Error creating sgdma object: %d\n", ret);
|
|
|
- return ret;
|
|
|
- }
|
|
|
+ ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16,
|
|
|
+ NVOBJ_FLAG_ZERO_ALLOC |
|
|
|
+ NVOBJ_FLAG_ZERO_FREE, &gpuobj);
|
|
|
+ if (ret) {
|
|
|
+ NV_ERROR(dev, "Error creating sgdma object: %d\n", ret);
|
|
|
+ return ret;
|
|
|
+ }
|
|
|
|
|
|
- if (dev_priv->card_type < NV_50) {
|
|
|
nv_wo32(gpuobj, 0, NV_CLASS_DMA_IN_MEMORY |
|
|
|
(1 << 12) /* PT present */ |
|
|
|
(0 << 13) /* PT *not* linear */ |
|
|
@@ -249,18 +238,23 @@ nouveau_sgdma_init(struct drm_device *dev)
|
|
|
nv_wo32(gpuobj, 4, aper_size - 1);
|
|
|
for (i = 2; i < 2 + (aper_size >> 12); i++)
|
|
|
nv_wo32(gpuobj, i * 4, 0x00000000);
|
|
|
- } else {
|
|
|
- for (i = 0; i < obj_size; i += 8) {
|
|
|
- nv_wo32(gpuobj, i + 0, 0x00000000);
|
|
|
- nv_wo32(gpuobj, i + 4, 0x00000000);
|
|
|
- }
|
|
|
+
|
|
|
+ dev_priv->gart_info.sg_ctxdma = gpuobj;
|
|
|
+ dev_priv->gart_info.aper_base = 0;
|
|
|
+ dev_priv->gart_info.aper_size = aper_size;
|
|
|
+ } else
|
|
|
+ if (dev_priv->chan_vm) {
|
|
|
+ ret = nouveau_vm_get(dev_priv->chan_vm, 512 * 1024 * 1024,
|
|
|
+ 12, NV_MEM_ACCESS_RW,
|
|
|
+ &dev_priv->gart_info.vma);
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
+
|
|
|
+ dev_priv->gart_info.aper_base = dev_priv->gart_info.vma.offset;
|
|
|
+ dev_priv->gart_info.aper_size = 512 * 1024 * 1024;
|
|
|
}
|
|
|
- dev_priv->engine.instmem.flush(dev);
|
|
|
|
|
|
dev_priv->gart_info.type = NOUVEAU_GART_SGDMA;
|
|
|
- dev_priv->gart_info.aper_base = 0;
|
|
|
- dev_priv->gart_info.aper_size = aper_size;
|
|
|
- dev_priv->gart_info.sg_ctxdma = gpuobj;
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -270,6 +264,7 @@ nouveau_sgdma_takedown(struct drm_device *dev)
|
|
|
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
|
|
|
|
|
nouveau_gpuobj_ref(NULL, &dev_priv->gart_info.sg_ctxdma);
|
|
|
+ nouveau_vm_put(&dev_priv->gart_info.vma);
|
|
|
}
|
|
|
|
|
|
int
|