|
@@ -8,44 +8,23 @@
|
|
|
#define NV_CTXDMA_PAGE_MASK (NV_CTXDMA_PAGE_SIZE - 1)
|
|
|
|
|
|
struct nouveau_sgdma_be {
|
|
|
- struct ttm_backend backend;
|
|
|
+ struct ttm_tt ttm;
|
|
|
struct drm_device *dev;
|
|
|
-
|
|
|
- dma_addr_t *pages;
|
|
|
- unsigned nr_pages;
|
|
|
- bool unmap_pages;
|
|
|
-
|
|
|
u64 offset;
|
|
|
- bool bound;
|
|
|
};
|
|
|
|
|
|
static int
|
|
|
-nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
|
|
|
- struct page **pages, struct page *dummy_read_page,
|
|
|
- dma_addr_t *dma_addrs)
|
|
|
+nouveau_sgdma_dma_map(struct ttm_tt *ttm)
|
|
|
{
|
|
|
- struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
|
|
|
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
|
|
|
struct drm_device *dev = nvbe->dev;
|
|
|
int i;
|
|
|
|
|
|
- NV_DEBUG(nvbe->dev, "num_pages = %ld\n", num_pages);
|
|
|
-
|
|
|
- nvbe->pages = dma_addrs;
|
|
|
- nvbe->nr_pages = num_pages;
|
|
|
- nvbe->unmap_pages = true;
|
|
|
-
|
|
|
- /* this code path isn't called and is incorrect anyways */
|
|
|
- if (0) { /* dma_addrs[0] != DMA_ERROR_CODE) { */
|
|
|
- nvbe->unmap_pages = false;
|
|
|
- return 0;
|
|
|
- }
|
|
|
-
|
|
|
- for (i = 0; i < num_pages; i++) {
|
|
|
- nvbe->pages[i] = pci_map_page(dev->pdev, pages[i], 0,
|
|
|
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
|
|
|
- if (pci_dma_mapping_error(dev->pdev, nvbe->pages[i])) {
|
|
|
- nvbe->nr_pages = --i;
|
|
|
- be->func->clear(be);
|
|
|
+ for (i = 0; i < ttm->num_pages; i++) {
|
|
|
+ ttm->dma_address[i] = pci_map_page(dev->pdev, ttm->pages[i],
|
|
|
+ 0, PAGE_SIZE,
|
|
|
+ PCI_DMA_BIDIRECTIONAL);
|
|
|
+ if (pci_dma_mapping_error(dev->pdev, ttm->dma_address[i])) {
|
|
|
return -EFAULT;
|
|
|
}
|
|
|
}
|
|
@@ -54,53 +33,52 @@ nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
|
|
|
}
|
|
|
|
|
|
static void
|
|
|
-nouveau_sgdma_clear(struct ttm_backend *be)
|
|
|
+nouveau_sgdma_dma_unmap(struct ttm_tt *ttm)
|
|
|
{
|
|
|
- struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
|
|
|
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
|
|
|
struct drm_device *dev = nvbe->dev;
|
|
|
+ int i;
|
|
|
|
|
|
- if (nvbe->bound)
|
|
|
- be->func->unbind(be);
|
|
|
-
|
|
|
- if (nvbe->unmap_pages) {
|
|
|
- while (nvbe->nr_pages--) {
|
|
|
- pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages],
|
|
|
+ for (i = 0; i < ttm->num_pages; i++) {
|
|
|
+ if (ttm->dma_address[i]) {
|
|
|
+ pci_unmap_page(dev->pdev, ttm->dma_address[i],
|
|
|
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
|
|
|
}
|
|
|
+ ttm->dma_address[i] = 0;
|
|
|
}
|
|
|
}
|
|
|
|
|
|
static void
|
|
|
-nouveau_sgdma_destroy(struct ttm_backend *be)
|
|
|
+nouveau_sgdma_destroy(struct ttm_tt *ttm)
|
|
|
{
|
|
|
- struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
|
|
|
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
|
|
|
|
|
|
- if (be) {
|
|
|
+ if (ttm) {
|
|
|
NV_DEBUG(nvbe->dev, "\n");
|
|
|
-
|
|
|
- if (nvbe) {
|
|
|
- if (nvbe->pages)
|
|
|
- be->func->clear(be);
|
|
|
- kfree(nvbe);
|
|
|
- }
|
|
|
+ kfree(nvbe);
|
|
|
}
|
|
|
}
|
|
|
|
|
|
static int
|
|
|
-nv04_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
|
|
|
+nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
|
|
|
{
|
|
|
- struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
|
|
|
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
|
|
|
struct drm_device *dev = nvbe->dev;
|
|
|
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
|
|
struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
|
|
|
unsigned i, j, pte;
|
|
|
+ int r;
|
|
|
|
|
|
NV_DEBUG(dev, "pg=0x%lx\n", mem->start);
|
|
|
+ r = nouveau_sgdma_dma_map(ttm);
|
|
|
+ if (r) {
|
|
|
+ return r;
|
|
|
+ }
|
|
|
|
|
|
nvbe->offset = mem->start << PAGE_SHIFT;
|
|
|
pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
|
|
|
- for (i = 0; i < nvbe->nr_pages; i++) {
|
|
|
- dma_addr_t dma_offset = nvbe->pages[i];
|
|
|
+ for (i = 0; i < ttm->num_pages; i++) {
|
|
|
+ dma_addr_t dma_offset = ttm->dma_address[i];
|
|
|
uint32_t offset_l = lower_32_bits(dma_offset);
|
|
|
|
|
|
for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) {
|
|
@@ -109,14 +87,13 @@ nv04_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- nvbe->bound = true;
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
static int
|
|
|
-nv04_sgdma_unbind(struct ttm_backend *be)
|
|
|
+nv04_sgdma_unbind(struct ttm_tt *ttm)
|
|
|
{
|
|
|
- struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
|
|
|
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
|
|
|
struct drm_device *dev = nvbe->dev;
|
|
|
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
|
|
struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
|
|
@@ -124,22 +101,20 @@ nv04_sgdma_unbind(struct ttm_backend *be)
|
|
|
|
|
|
NV_DEBUG(dev, "\n");
|
|
|
|
|
|
- if (!nvbe->bound)
|
|
|
+ if (ttm->state != tt_bound)
|
|
|
return 0;
|
|
|
|
|
|
pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
|
|
|
- for (i = 0; i < nvbe->nr_pages; i++) {
|
|
|
+ for (i = 0; i < ttm->num_pages; i++) {
|
|
|
for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++)
|
|
|
nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000);
|
|
|
}
|
|
|
|
|
|
- nvbe->bound = false;
|
|
|
+ nouveau_sgdma_dma_unmap(ttm);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
static struct ttm_backend_func nv04_sgdma_backend = {
|
|
|
- .populate = nouveau_sgdma_populate,
|
|
|
- .clear = nouveau_sgdma_clear,
|
|
|
.bind = nv04_sgdma_bind,
|
|
|
.unbind = nv04_sgdma_unbind,
|
|
|
.destroy = nouveau_sgdma_destroy
|
|
@@ -158,16 +133,21 @@ nv41_sgdma_flush(struct nouveau_sgdma_be *nvbe)
|
|
|
}
|
|
|
|
|
|
static int
|
|
|
-nv41_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
|
|
|
+nv41_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
|
|
|
{
|
|
|
- struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
|
|
|
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
|
|
|
struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
|
|
|
struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
|
|
|
- dma_addr_t *list = nvbe->pages;
|
|
|
+ dma_addr_t *list = ttm->dma_address;
|
|
|
u32 pte = mem->start << 2;
|
|
|
- u32 cnt = nvbe->nr_pages;
|
|
|
+ u32 cnt = ttm->num_pages;
|
|
|
+ int r;
|
|
|
|
|
|
nvbe->offset = mem->start << PAGE_SHIFT;
|
|
|
+ r = nouveau_sgdma_dma_map(ttm);
|
|
|
+ if (r) {
|
|
|
+ return r;
|
|
|
+ }
|
|
|
|
|
|
while (cnt--) {
|
|
|
nv_wo32(pgt, pte, (*list++ >> 7) | 1);
|
|
@@ -175,18 +155,17 @@ nv41_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
|
|
|
}
|
|
|
|
|
|
nv41_sgdma_flush(nvbe);
|
|
|
- nvbe->bound = true;
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
static int
|
|
|
-nv41_sgdma_unbind(struct ttm_backend *be)
|
|
|
+nv41_sgdma_unbind(struct ttm_tt *ttm)
|
|
|
{
|
|
|
- struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
|
|
|
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
|
|
|
struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
|
|
|
struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
|
|
|
u32 pte = (nvbe->offset >> 12) << 2;
|
|
|
- u32 cnt = nvbe->nr_pages;
|
|
|
+ u32 cnt = ttm->num_pages;
|
|
|
|
|
|
while (cnt--) {
|
|
|
nv_wo32(pgt, pte, 0x00000000);
|
|
@@ -194,24 +173,23 @@ nv41_sgdma_unbind(struct ttm_backend *be)
|
|
|
}
|
|
|
|
|
|
nv41_sgdma_flush(nvbe);
|
|
|
- nvbe->bound = false;
|
|
|
+ nouveau_sgdma_dma_unmap(ttm);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
static struct ttm_backend_func nv41_sgdma_backend = {
|
|
|
- .populate = nouveau_sgdma_populate,
|
|
|
- .clear = nouveau_sgdma_clear,
|
|
|
.bind = nv41_sgdma_bind,
|
|
|
.unbind = nv41_sgdma_unbind,
|
|
|
.destroy = nouveau_sgdma_destroy
|
|
|
};
|
|
|
|
|
|
static void
|
|
|
-nv44_sgdma_flush(struct nouveau_sgdma_be *nvbe)
|
|
|
+nv44_sgdma_flush(struct ttm_tt *ttm)
|
|
|
{
|
|
|
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
|
|
|
struct drm_device *dev = nvbe->dev;
|
|
|
|
|
|
- nv_wr32(dev, 0x100814, (nvbe->nr_pages - 1) << 12);
|
|
|
+ nv_wr32(dev, 0x100814, (ttm->num_pages - 1) << 12);
|
|
|
nv_wr32(dev, 0x100808, nvbe->offset | 0x20);
|
|
|
if (!nv_wait(dev, 0x100808, 0x00000001, 0x00000001))
|
|
|
NV_ERROR(dev, "gart flush timeout: 0x%08x\n",
|
|
@@ -270,17 +248,21 @@ nv44_sgdma_fill(struct nouveau_gpuobj *pgt, dma_addr_t *list, u32 base, u32 cnt)
|
|
|
}
|
|
|
|
|
|
static int
|
|
|
-nv44_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
|
|
|
+nv44_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
|
|
|
{
|
|
|
- struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
|
|
|
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
|
|
|
struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
|
|
|
struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
|
|
|
- dma_addr_t *list = nvbe->pages;
|
|
|
+ dma_addr_t *list = ttm->dma_address;
|
|
|
u32 pte = mem->start << 2, tmp[4];
|
|
|
- u32 cnt = nvbe->nr_pages;
|
|
|
- int i;
|
|
|
+ u32 cnt = ttm->num_pages;
|
|
|
+ int i, r;
|
|
|
|
|
|
nvbe->offset = mem->start << PAGE_SHIFT;
|
|
|
+ r = nouveau_sgdma_dma_map(ttm);
|
|
|
+ if (r) {
|
|
|
+ return r;
|
|
|
+ }
|
|
|
|
|
|
if (pte & 0x0000000c) {
|
|
|
u32 max = 4 - ((pte >> 2) & 0x3);
|
|
@@ -305,19 +287,18 @@ nv44_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
|
|
|
if (cnt)
|
|
|
nv44_sgdma_fill(pgt, list, pte, cnt);
|
|
|
|
|
|
- nv44_sgdma_flush(nvbe);
|
|
|
- nvbe->bound = true;
|
|
|
+ nv44_sgdma_flush(ttm);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
static int
|
|
|
-nv44_sgdma_unbind(struct ttm_backend *be)
|
|
|
+nv44_sgdma_unbind(struct ttm_tt *ttm)
|
|
|
{
|
|
|
- struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
|
|
|
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
|
|
|
struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
|
|
|
struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
|
|
|
u32 pte = (nvbe->offset >> 12) << 2;
|
|
|
- u32 cnt = nvbe->nr_pages;
|
|
|
+ u32 cnt = ttm->num_pages;
|
|
|
|
|
|
if (pte & 0x0000000c) {
|
|
|
u32 max = 4 - ((pte >> 2) & 0x3);
|
|
@@ -339,55 +320,53 @@ nv44_sgdma_unbind(struct ttm_backend *be)
|
|
|
if (cnt)
|
|
|
nv44_sgdma_fill(pgt, NULL, pte, cnt);
|
|
|
|
|
|
- nv44_sgdma_flush(nvbe);
|
|
|
- nvbe->bound = false;
|
|
|
+ nv44_sgdma_flush(ttm);
|
|
|
+ nouveau_sgdma_dma_unmap(ttm);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
static struct ttm_backend_func nv44_sgdma_backend = {
|
|
|
- .populate = nouveau_sgdma_populate,
|
|
|
- .clear = nouveau_sgdma_clear,
|
|
|
.bind = nv44_sgdma_bind,
|
|
|
.unbind = nv44_sgdma_unbind,
|
|
|
.destroy = nouveau_sgdma_destroy
|
|
|
};
|
|
|
|
|
|
static int
|
|
|
-nv50_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
|
|
|
+nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
|
|
|
{
|
|
|
- struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
|
|
|
struct nouveau_mem *node = mem->mm_node;
|
|
|
+ int r;
|
|
|
+
|
|
|
/* noop: bound in move_notify() */
|
|
|
- node->pages = nvbe->pages;
|
|
|
- nvbe->pages = (dma_addr_t *)node;
|
|
|
- nvbe->bound = true;
|
|
|
+ r = nouveau_sgdma_dma_map(ttm);
|
|
|
+ if (r) {
|
|
|
+ return r;
|
|
|
+ }
|
|
|
+ node->pages = ttm->dma_address;
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
static int
|
|
|
-nv50_sgdma_unbind(struct ttm_backend *be)
|
|
|
+nv50_sgdma_unbind(struct ttm_tt *ttm)
|
|
|
{
|
|
|
- struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
|
|
|
- struct nouveau_mem *node = (struct nouveau_mem *)nvbe->pages;
|
|
|
/* noop: unbound in move_notify() */
|
|
|
- nvbe->pages = node->pages;
|
|
|
- node->pages = NULL;
|
|
|
- nvbe->bound = false;
|
|
|
+ nouveau_sgdma_dma_unmap(ttm);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
static struct ttm_backend_func nv50_sgdma_backend = {
|
|
|
- .populate = nouveau_sgdma_populate,
|
|
|
- .clear = nouveau_sgdma_clear,
|
|
|
.bind = nv50_sgdma_bind,
|
|
|
.unbind = nv50_sgdma_unbind,
|
|
|
.destroy = nouveau_sgdma_destroy
|
|
|
};
|
|
|
|
|
|
-struct ttm_backend *
|
|
|
-nouveau_sgdma_init_ttm(struct drm_device *dev)
|
|
|
+struct ttm_tt *
|
|
|
+nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev,
|
|
|
+ unsigned long size, uint32_t page_flags,
|
|
|
+ struct page *dummy_read_page)
|
|
|
{
|
|
|
- struct drm_nouveau_private *dev_priv = dev->dev_private;
|
|
|
+ struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
|
|
|
+ struct drm_device *dev = dev_priv->dev;
|
|
|
struct nouveau_sgdma_be *nvbe;
|
|
|
|
|
|
nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
|
|
@@ -395,9 +374,12 @@ nouveau_sgdma_init_ttm(struct drm_device *dev)
|
|
|
return NULL;
|
|
|
|
|
|
nvbe->dev = dev;
|
|
|
+ nvbe->ttm.func = dev_priv->gart_info.func;
|
|
|
|
|
|
- nvbe->backend.func = dev_priv->gart_info.func;
|
|
|
- return &nvbe->backend;
|
|
|
+ if (ttm_tt_init(&nvbe->ttm, bdev, size, page_flags, dummy_read_page)) {
|
|
|
+ return NULL;
|
|
|
+ }
|
|
|
+ return &nvbe->ttm;
|
|
|
}
|
|
|
|
|
|
int
|