Переглянути джерело

drm/nouveau: add instmem flush() hook

This removes the previous prepare_access() and finish_access() hooks, and
replaces it with a much simpler flush() hook.

All the chipset-specific code before nv50 has its use removed completely,
as it's not required there at all.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Ben Skeggs 15 роки тому
батько
коміт
f56cb86f9a

+ 3 - 6
drivers/gpu/drm/nouveau/nouveau_drv.h

@@ -269,8 +269,7 @@ struct nouveau_instmem_engine {
 	void	(*clear)(struct drm_device *, struct nouveau_gpuobj *);
 	void	(*clear)(struct drm_device *, struct nouveau_gpuobj *);
 	int	(*bind)(struct drm_device *, struct nouveau_gpuobj *);
 	int	(*bind)(struct drm_device *, struct nouveau_gpuobj *);
 	int	(*unbind)(struct drm_device *, struct nouveau_gpuobj *);
 	int	(*unbind)(struct drm_device *, struct nouveau_gpuobj *);
-	void	(*prepare_access)(struct drm_device *, bool write);
-	void	(*finish_access)(struct drm_device *);
+	void	(*flush)(struct drm_device *);
 };
 };
 
 
 struct nouveau_mc_engine {
 struct nouveau_mc_engine {
@@ -1027,8 +1026,7 @@ extern int  nv04_instmem_populate(struct drm_device *, struct nouveau_gpuobj *,
 extern void nv04_instmem_clear(struct drm_device *, struct nouveau_gpuobj *);
 extern void nv04_instmem_clear(struct drm_device *, struct nouveau_gpuobj *);
 extern int  nv04_instmem_bind(struct drm_device *, struct nouveau_gpuobj *);
 extern int  nv04_instmem_bind(struct drm_device *, struct nouveau_gpuobj *);
 extern int  nv04_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *);
 extern int  nv04_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *);
-extern void nv04_instmem_prepare_access(struct drm_device *, bool write);
-extern void nv04_instmem_finish_access(struct drm_device *);
+extern void nv04_instmem_flush(struct drm_device *);
 
 
 /* nv50_instmem.c */
 /* nv50_instmem.c */
 extern int  nv50_instmem_init(struct drm_device *);
 extern int  nv50_instmem_init(struct drm_device *);
@@ -1040,8 +1038,7 @@ extern int  nv50_instmem_populate(struct drm_device *, struct nouveau_gpuobj *,
 extern void nv50_instmem_clear(struct drm_device *, struct nouveau_gpuobj *);
 extern void nv50_instmem_clear(struct drm_device *, struct nouveau_gpuobj *);
 extern int  nv50_instmem_bind(struct drm_device *, struct nouveau_gpuobj *);
 extern int  nv50_instmem_bind(struct drm_device *, struct nouveau_gpuobj *);
 extern int  nv50_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *);
 extern int  nv50_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *);
-extern void nv50_instmem_prepare_access(struct drm_device *, bool write);
-extern void nv50_instmem_finish_access(struct drm_device *);
+extern void nv50_instmem_flush(struct drm_device *);
 
 
 /* nv04_mc.c */
 /* nv04_mc.c */
 extern int  nv04_mc_init(struct drm_device *);
 extern int  nv04_mc_init(struct drm_device *);

+ 2 - 4
drivers/gpu/drm/nouveau/nouveau_mem.c

@@ -143,7 +143,6 @@ nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size,
 		phys |= 0x30;
 		phys |= 0x30;
 	}
 	}
 
 
-	dev_priv->engine.instmem.prepare_access(dev, true);
 	while (size) {
 	while (size) {
 		unsigned offset_h = upper_32_bits(phys);
 		unsigned offset_h = upper_32_bits(phys);
 		unsigned offset_l = lower_32_bits(phys);
 		unsigned offset_l = lower_32_bits(phys);
@@ -175,7 +174,7 @@ nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size,
 			}
 			}
 		}
 		}
 	}
 	}
-	dev_priv->engine.instmem.finish_access(dev);
+	dev_priv->engine.instmem.flush(dev);
 
 
 	nv_wr32(dev, 0x100c80, 0x00050001);
 	nv_wr32(dev, 0x100c80, 0x00050001);
 	if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
 	if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
@@ -218,7 +217,6 @@ nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size)
 	virt -= dev_priv->vm_vram_base;
 	virt -= dev_priv->vm_vram_base;
 	pages = (size >> 16) << 1;
 	pages = (size >> 16) << 1;
 
 
-	dev_priv->engine.instmem.prepare_access(dev, true);
 	while (pages) {
 	while (pages) {
 		pgt = dev_priv->vm_vram_pt[virt >> 29];
 		pgt = dev_priv->vm_vram_pt[virt >> 29];
 		pte = (virt & 0x1ffe0000ULL) >> 15;
 		pte = (virt & 0x1ffe0000ULL) >> 15;
@@ -232,7 +230,7 @@ nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size)
 		while (pte < end)
 		while (pte < end)
 			nv_wo32(dev, pgt, pte++, 0);
 			nv_wo32(dev, pgt, pte++, 0);
 	}
 	}
-	dev_priv->engine.instmem.finish_access(dev);
+	dev_priv->engine.instmem.flush(dev);
 
 
 	nv_wr32(dev, 0x100c80, 0x00050001);
 	nv_wr32(dev, 0x100c80, 0x00050001);
 	if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
 	if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {

+ 12 - 33
drivers/gpu/drm/nouveau/nouveau_object.c

@@ -132,7 +132,6 @@ nouveau_ramht_insert(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
 		}
 		}
 	}
 	}
 
 
-	instmem->prepare_access(dev, true);
 	co = ho = nouveau_ramht_hash_handle(dev, chan->id, ref->handle);
 	co = ho = nouveau_ramht_hash_handle(dev, chan->id, ref->handle);
 	do {
 	do {
 		if (!nouveau_ramht_entry_valid(dev, ramht, co)) {
 		if (!nouveau_ramht_entry_valid(dev, ramht, co)) {
@@ -143,7 +142,7 @@ nouveau_ramht_insert(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
 			nv_wo32(dev, ramht, (co + 4)/4, ctx);
 			nv_wo32(dev, ramht, (co + 4)/4, ctx);
 
 
 			list_add_tail(&ref->list, &chan->ramht_refs);
 			list_add_tail(&ref->list, &chan->ramht_refs);
-			instmem->finish_access(dev);
+			instmem->flush(dev);
 			return 0;
 			return 0;
 		}
 		}
 		NV_DEBUG(dev, "collision ch%d 0x%08x: h=0x%08x\n",
 		NV_DEBUG(dev, "collision ch%d 0x%08x: h=0x%08x\n",
@@ -153,7 +152,6 @@ nouveau_ramht_insert(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
 		if (co >= dev_priv->ramht_size)
 		if (co >= dev_priv->ramht_size)
 			co = 0;
 			co = 0;
 	} while (co != ho);
 	} while (co != ho);
-	instmem->finish_access(dev);
 
 
 	NV_ERROR(dev, "RAMHT space exhausted. ch=%d\n", chan->id);
 	NV_ERROR(dev, "RAMHT space exhausted. ch=%d\n", chan->id);
 	return -ENOMEM;
 	return -ENOMEM;
@@ -173,7 +171,6 @@ nouveau_ramht_remove(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
 		return;
 		return;
 	}
 	}
 
 
-	instmem->prepare_access(dev, true);
 	co = ho = nouveau_ramht_hash_handle(dev, chan->id, ref->handle);
 	co = ho = nouveau_ramht_hash_handle(dev, chan->id, ref->handle);
 	do {
 	do {
 		if (nouveau_ramht_entry_valid(dev, ramht, co) &&
 		if (nouveau_ramht_entry_valid(dev, ramht, co) &&
@@ -186,7 +183,7 @@ nouveau_ramht_remove(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
 			nv_wo32(dev, ramht, (co + 4)/4, 0x00000000);
 			nv_wo32(dev, ramht, (co + 4)/4, 0x00000000);
 
 
 			list_del(&ref->list);
 			list_del(&ref->list);
-			instmem->finish_access(dev);
+			instmem->flush(dev);
 			return;
 			return;
 		}
 		}
 
 
@@ -195,7 +192,6 @@ nouveau_ramht_remove(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
 			co = 0;
 			co = 0;
 	} while (co != ho);
 	} while (co != ho);
 	list_del(&ref->list);
 	list_del(&ref->list);
-	instmem->finish_access(dev);
 
 
 	NV_ERROR(dev, "RAMHT entry not found. ch=%d, handle=0x%08x\n",
 	NV_ERROR(dev, "RAMHT entry not found. ch=%d, handle=0x%08x\n",
 		 chan->id, ref->handle);
 		 chan->id, ref->handle);
@@ -280,10 +276,9 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
 	if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
 	if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
 		int i;
 		int i;
 
 
-		engine->instmem.prepare_access(dev, true);
 		for (i = 0; i < gpuobj->im_pramin->size; i += 4)
 		for (i = 0; i < gpuobj->im_pramin->size; i += 4)
 			nv_wo32(dev, gpuobj, i/4, 0);
 			nv_wo32(dev, gpuobj, i/4, 0);
-		engine->instmem.finish_access(dev);
+		engine->instmem.flush(dev);
 	}
 	}
 
 
 	*gpuobj_ret = gpuobj;
 	*gpuobj_ret = gpuobj;
@@ -371,10 +366,9 @@ nouveau_gpuobj_del(struct drm_device *dev, struct nouveau_gpuobj **pgpuobj)
 	}
 	}
 
 
 	if (gpuobj->im_pramin && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) {
 	if (gpuobj->im_pramin && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) {
-		engine->instmem.prepare_access(dev, true);
 		for (i = 0; i < gpuobj->im_pramin->size; i += 4)
 		for (i = 0; i < gpuobj->im_pramin->size; i += 4)
 			nv_wo32(dev, gpuobj, i/4, 0);
 			nv_wo32(dev, gpuobj, i/4, 0);
-		engine->instmem.finish_access(dev);
+		engine->instmem.flush(dev);
 	}
 	}
 
 
 	if (gpuobj->dtor)
 	if (gpuobj->dtor)
@@ -606,10 +600,9 @@ nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t p_offset,
 	}
 	}
 
 
 	if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
 	if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
-		dev_priv->engine.instmem.prepare_access(dev, true);
 		for (i = 0; i < gpuobj->im_pramin->size; i += 4)
 		for (i = 0; i < gpuobj->im_pramin->size; i += 4)
 			nv_wo32(dev, gpuobj, i/4, 0);
 			nv_wo32(dev, gpuobj, i/4, 0);
-		dev_priv->engine.instmem.finish_access(dev);
+		dev_priv->engine.instmem.flush(dev);
 	}
 	}
 
 
 	if (pref) {
 	if (pref) {
@@ -697,8 +690,6 @@ nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class,
 		return ret;
 		return ret;
 	}
 	}
 
 
-	instmem->prepare_access(dev, true);
-
 	if (dev_priv->card_type < NV_50) {
 	if (dev_priv->card_type < NV_50) {
 		uint32_t frame, adjust, pte_flags = 0;
 		uint32_t frame, adjust, pte_flags = 0;
 
 
@@ -735,7 +726,7 @@ nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class,
 		nv_wo32(dev, *gpuobj, 5, flags5);
 		nv_wo32(dev, *gpuobj, 5, flags5);
 	}
 	}
 
 
-	instmem->finish_access(dev);
+	instmem->flush(dev);
 
 
 	(*gpuobj)->engine = NVOBJ_ENGINE_SW;
 	(*gpuobj)->engine = NVOBJ_ENGINE_SW;
 	(*gpuobj)->class  = class;
 	(*gpuobj)->class  = class;
@@ -850,7 +841,6 @@ nouveau_gpuobj_gr_new(struct nouveau_channel *chan, int class,
 		return ret;
 		return ret;
 	}
 	}
 
 
-	dev_priv->engine.instmem.prepare_access(dev, true);
 	if (dev_priv->card_type >= NV_50) {
 	if (dev_priv->card_type >= NV_50) {
 		nv_wo32(dev, *gpuobj, 0, class);
 		nv_wo32(dev, *gpuobj, 0, class);
 		nv_wo32(dev, *gpuobj, 5, 0x00010000);
 		nv_wo32(dev, *gpuobj, 5, 0x00010000);
@@ -875,7 +865,7 @@ nouveau_gpuobj_gr_new(struct nouveau_channel *chan, int class,
 			}
 			}
 		}
 		}
 	}
 	}
-	dev_priv->engine.instmem.finish_access(dev);
+	dev_priv->engine.instmem.flush(dev);
 
 
 	(*gpuobj)->engine = NVOBJ_ENGINE_GR;
 	(*gpuobj)->engine = NVOBJ_ENGINE_GR;
 	(*gpuobj)->class  = class;
 	(*gpuobj)->class  = class;
@@ -988,17 +978,13 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
 	if (dev_priv->card_type >= NV_50) {
 	if (dev_priv->card_type >= NV_50) {
 		uint32_t vm_offset, pde;
 		uint32_t vm_offset, pde;
 
 
-		instmem->prepare_access(dev, true);
-
 		vm_offset = (dev_priv->chipset & 0xf0) == 0x50 ? 0x1400 : 0x200;
 		vm_offset = (dev_priv->chipset & 0xf0) == 0x50 ? 0x1400 : 0x200;
 		vm_offset += chan->ramin->gpuobj->im_pramin->start;
 		vm_offset += chan->ramin->gpuobj->im_pramin->start;
 
 
 		ret = nouveau_gpuobj_new_fake(dev, vm_offset, ~0, 0x4000,
 		ret = nouveau_gpuobj_new_fake(dev, vm_offset, ~0, 0x4000,
 							0, &chan->vm_pd, NULL);
 							0, &chan->vm_pd, NULL);
-		if (ret) {
-			instmem->finish_access(dev);
+		if (ret)
 			return ret;
 			return ret;
-		}
 		for (i = 0; i < 0x4000; i += 8) {
 		for (i = 0; i < 0x4000; i += 8) {
 			nv_wo32(dev, chan->vm_pd, (i+0)/4, 0x00000000);
 			nv_wo32(dev, chan->vm_pd, (i+0)/4, 0x00000000);
 			nv_wo32(dev, chan->vm_pd, (i+4)/4, 0xdeadcafe);
 			nv_wo32(dev, chan->vm_pd, (i+4)/4, 0xdeadcafe);
@@ -1008,10 +994,8 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
 		ret = nouveau_gpuobj_ref_add(dev, NULL, 0,
 		ret = nouveau_gpuobj_ref_add(dev, NULL, 0,
 					     dev_priv->gart_info.sg_ctxdma,
 					     dev_priv->gart_info.sg_ctxdma,
 					     &chan->vm_gart_pt);
 					     &chan->vm_gart_pt);
-		if (ret) {
-			instmem->finish_access(dev);
+		if (ret)
 			return ret;
 			return ret;
-		}
 		nv_wo32(dev, chan->vm_pd, pde++,
 		nv_wo32(dev, chan->vm_pd, pde++,
 			    chan->vm_gart_pt->instance | 0x03);
 			    chan->vm_gart_pt->instance | 0x03);
 		nv_wo32(dev, chan->vm_pd, pde++, 0x00000000);
 		nv_wo32(dev, chan->vm_pd, pde++, 0x00000000);
@@ -1021,17 +1005,15 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
 			ret = nouveau_gpuobj_ref_add(dev, NULL, 0,
 			ret = nouveau_gpuobj_ref_add(dev, NULL, 0,
 						     dev_priv->vm_vram_pt[i],
 						     dev_priv->vm_vram_pt[i],
 						     &chan->vm_vram_pt[i]);
 						     &chan->vm_vram_pt[i]);
-			if (ret) {
-				instmem->finish_access(dev);
+			if (ret)
 				return ret;
 				return ret;
-			}
 
 
 			nv_wo32(dev, chan->vm_pd, pde++,
 			nv_wo32(dev, chan->vm_pd, pde++,
 				    chan->vm_vram_pt[i]->instance | 0x61);
 				    chan->vm_vram_pt[i]->instance | 0x61);
 			nv_wo32(dev, chan->vm_pd, pde++, 0x00000000);
 			nv_wo32(dev, chan->vm_pd, pde++, 0x00000000);
 		}
 		}
 
 
-		instmem->finish_access(dev);
+		instmem->flush(dev);
 	}
 	}
 
 
 	/* RAMHT */
 	/* RAMHT */
@@ -1164,10 +1146,8 @@ nouveau_gpuobj_suspend(struct drm_device *dev)
 			return -ENOMEM;
 			return -ENOMEM;
 		}
 		}
 
 
-		dev_priv->engine.instmem.prepare_access(dev, false);
 		for (i = 0; i < gpuobj->im_pramin->size / 4; i++)
 		for (i = 0; i < gpuobj->im_pramin->size / 4; i++)
 			gpuobj->im_backing_suspend[i] = nv_ro32(dev, gpuobj, i);
 			gpuobj->im_backing_suspend[i] = nv_ro32(dev, gpuobj, i);
-		dev_priv->engine.instmem.finish_access(dev);
 	}
 	}
 
 
 	return 0;
 	return 0;
@@ -1212,10 +1192,9 @@ nouveau_gpuobj_resume(struct drm_device *dev)
 		if (!gpuobj->im_backing_suspend)
 		if (!gpuobj->im_backing_suspend)
 			continue;
 			continue;
 
 
-		dev_priv->engine.instmem.prepare_access(dev, true);
 		for (i = 0; i < gpuobj->im_pramin->size / 4; i++)
 		for (i = 0; i < gpuobj->im_pramin->size / 4; i++)
 			nv_wo32(dev, gpuobj, i, gpuobj->im_backing_suspend[i]);
 			nv_wo32(dev, gpuobj, i, gpuobj->im_backing_suspend[i]);
-		dev_priv->engine.instmem.finish_access(dev);
+		dev_priv->engine.instmem.flush(dev);
 	}
 	}
 
 
 	nouveau_gpuobj_suspend_cleanup(dev);
 	nouveau_gpuobj_suspend_cleanup(dev);

+ 3 - 9
drivers/gpu/drm/nouveau/nouveau_sgdma.c

@@ -97,7 +97,6 @@ nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
 
 
 	NV_DEBUG(dev, "pg=0x%lx\n", mem->mm_node->start);
 	NV_DEBUG(dev, "pg=0x%lx\n", mem->mm_node->start);
 
 
-	dev_priv->engine.instmem.prepare_access(nvbe->dev, true);
 	pte = nouveau_sgdma_pte(nvbe->dev, mem->mm_node->start << PAGE_SHIFT);
 	pte = nouveau_sgdma_pte(nvbe->dev, mem->mm_node->start << PAGE_SHIFT);
 	nvbe->pte_start = pte;
 	nvbe->pte_start = pte;
 	for (i = 0; i < nvbe->nr_pages; i++) {
 	for (i = 0; i < nvbe->nr_pages; i++) {
@@ -116,7 +115,7 @@ nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
 			dma_offset += NV_CTXDMA_PAGE_SIZE;
 			dma_offset += NV_CTXDMA_PAGE_SIZE;
 		}
 		}
 	}
 	}
-	dev_priv->engine.instmem.finish_access(nvbe->dev);
+	dev_priv->engine.instmem.flush(nvbe->dev);
 
 
 	if (dev_priv->card_type == NV_50) {
 	if (dev_priv->card_type == NV_50) {
 		nv_wr32(dev, 0x100c80, 0x00050001);
 		nv_wr32(dev, 0x100c80, 0x00050001);
@@ -154,7 +153,6 @@ nouveau_sgdma_unbind(struct ttm_backend *be)
 	if (!nvbe->bound)
 	if (!nvbe->bound)
 		return 0;
 		return 0;
 
 
-	dev_priv->engine.instmem.prepare_access(nvbe->dev, true);
 	pte = nvbe->pte_start;
 	pte = nvbe->pte_start;
 	for (i = 0; i < nvbe->nr_pages; i++) {
 	for (i = 0; i < nvbe->nr_pages; i++) {
 		dma_addr_t dma_offset = dev_priv->gart_info.sg_dummy_bus;
 		dma_addr_t dma_offset = dev_priv->gart_info.sg_dummy_bus;
@@ -170,7 +168,7 @@ nouveau_sgdma_unbind(struct ttm_backend *be)
 			dma_offset += NV_CTXDMA_PAGE_SIZE;
 			dma_offset += NV_CTXDMA_PAGE_SIZE;
 		}
 		}
 	}
 	}
-	dev_priv->engine.instmem.finish_access(nvbe->dev);
+	dev_priv->engine.instmem.flush(nvbe->dev);
 
 
 	if (dev_priv->card_type == NV_50) {
 	if (dev_priv->card_type == NV_50) {
 		nv_wr32(dev, 0x100c80, 0x00050001);
 		nv_wr32(dev, 0x100c80, 0x00050001);
@@ -272,7 +270,6 @@ nouveau_sgdma_init(struct drm_device *dev)
 		pci_map_page(dev->pdev, dev_priv->gart_info.sg_dummy_page, 0,
 		pci_map_page(dev->pdev, dev_priv->gart_info.sg_dummy_page, 0,
 			     PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
 			     PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
 
 
-	dev_priv->engine.instmem.prepare_access(dev, true);
 	if (dev_priv->card_type < NV_50) {
 	if (dev_priv->card_type < NV_50) {
 		/* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and
 		/* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and
 		 * confirmed to work on c51.  Perhaps means NV_DMA_TARGET_PCIE
 		 * confirmed to work on c51.  Perhaps means NV_DMA_TARGET_PCIE
@@ -294,7 +291,7 @@ nouveau_sgdma_init(struct drm_device *dev)
 			nv_wo32(dev, gpuobj, (i+4)/4, 0);
 			nv_wo32(dev, gpuobj, (i+4)/4, 0);
 		}
 		}
 	}
 	}
-	dev_priv->engine.instmem.finish_access(dev);
+	dev_priv->engine.instmem.flush(dev);
 
 
 	dev_priv->gart_info.type      = NOUVEAU_GART_SGDMA;
 	dev_priv->gart_info.type      = NOUVEAU_GART_SGDMA;
 	dev_priv->gart_info.aper_base = 0;
 	dev_priv->gart_info.aper_base = 0;
@@ -325,14 +322,11 @@ nouveau_sgdma_get_page(struct drm_device *dev, uint32_t offset, uint32_t *page)
 {
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
 	struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
-	struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
 	int pte;
 	int pte;
 
 
 	pte = (offset >> NV_CTXDMA_PAGE_SHIFT);
 	pte = (offset >> NV_CTXDMA_PAGE_SHIFT);
 	if (dev_priv->card_type < NV_50) {
 	if (dev_priv->card_type < NV_50) {
-		instmem->prepare_access(dev, false);
 		*page = nv_ro32(dev, gpuobj, (pte + 2)) & ~NV_CTXDMA_PAGE_MASK;
 		*page = nv_ro32(dev, gpuobj, (pte + 2)) & ~NV_CTXDMA_PAGE_MASK;
-		instmem->finish_access(dev);
 		return 0;
 		return 0;
 	}
 	}
 
 

+ 6 - 12
drivers/gpu/drm/nouveau/nouveau_state.c

@@ -54,8 +54,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
 		engine->instmem.clear		= nv04_instmem_clear;
 		engine->instmem.clear		= nv04_instmem_clear;
 		engine->instmem.bind		= nv04_instmem_bind;
 		engine->instmem.bind		= nv04_instmem_bind;
 		engine->instmem.unbind		= nv04_instmem_unbind;
 		engine->instmem.unbind		= nv04_instmem_unbind;
-		engine->instmem.prepare_access	= nv04_instmem_prepare_access;
-		engine->instmem.finish_access	= nv04_instmem_finish_access;
+		engine->instmem.flush		= nv04_instmem_flush;
 		engine->mc.init			= nv04_mc_init;
 		engine->mc.init			= nv04_mc_init;
 		engine->mc.takedown		= nv04_mc_takedown;
 		engine->mc.takedown		= nv04_mc_takedown;
 		engine->timer.init		= nv04_timer_init;
 		engine->timer.init		= nv04_timer_init;
@@ -95,8 +94,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
 		engine->instmem.clear		= nv04_instmem_clear;
 		engine->instmem.clear		= nv04_instmem_clear;
 		engine->instmem.bind		= nv04_instmem_bind;
 		engine->instmem.bind		= nv04_instmem_bind;
 		engine->instmem.unbind		= nv04_instmem_unbind;
 		engine->instmem.unbind		= nv04_instmem_unbind;
-		engine->instmem.prepare_access	= nv04_instmem_prepare_access;
-		engine->instmem.finish_access	= nv04_instmem_finish_access;
+		engine->instmem.flush		= nv04_instmem_flush;
 		engine->mc.init			= nv04_mc_init;
 		engine->mc.init			= nv04_mc_init;
 		engine->mc.takedown		= nv04_mc_takedown;
 		engine->mc.takedown		= nv04_mc_takedown;
 		engine->timer.init		= nv04_timer_init;
 		engine->timer.init		= nv04_timer_init;
@@ -138,8 +136,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
 		engine->instmem.clear		= nv04_instmem_clear;
 		engine->instmem.clear		= nv04_instmem_clear;
 		engine->instmem.bind		= nv04_instmem_bind;
 		engine->instmem.bind		= nv04_instmem_bind;
 		engine->instmem.unbind		= nv04_instmem_unbind;
 		engine->instmem.unbind		= nv04_instmem_unbind;
-		engine->instmem.prepare_access	= nv04_instmem_prepare_access;
-		engine->instmem.finish_access	= nv04_instmem_finish_access;
+		engine->instmem.flush		= nv04_instmem_flush;
 		engine->mc.init			= nv04_mc_init;
 		engine->mc.init			= nv04_mc_init;
 		engine->mc.takedown		= nv04_mc_takedown;
 		engine->mc.takedown		= nv04_mc_takedown;
 		engine->timer.init		= nv04_timer_init;
 		engine->timer.init		= nv04_timer_init;
@@ -181,8 +178,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
 		engine->instmem.clear		= nv04_instmem_clear;
 		engine->instmem.clear		= nv04_instmem_clear;
 		engine->instmem.bind		= nv04_instmem_bind;
 		engine->instmem.bind		= nv04_instmem_bind;
 		engine->instmem.unbind		= nv04_instmem_unbind;
 		engine->instmem.unbind		= nv04_instmem_unbind;
-		engine->instmem.prepare_access	= nv04_instmem_prepare_access;
-		engine->instmem.finish_access	= nv04_instmem_finish_access;
+		engine->instmem.flush		= nv04_instmem_flush;
 		engine->mc.init			= nv04_mc_init;
 		engine->mc.init			= nv04_mc_init;
 		engine->mc.takedown		= nv04_mc_takedown;
 		engine->mc.takedown		= nv04_mc_takedown;
 		engine->timer.init		= nv04_timer_init;
 		engine->timer.init		= nv04_timer_init;
@@ -225,8 +221,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
 		engine->instmem.clear		= nv04_instmem_clear;
 		engine->instmem.clear		= nv04_instmem_clear;
 		engine->instmem.bind		= nv04_instmem_bind;
 		engine->instmem.bind		= nv04_instmem_bind;
 		engine->instmem.unbind		= nv04_instmem_unbind;
 		engine->instmem.unbind		= nv04_instmem_unbind;
-		engine->instmem.prepare_access	= nv04_instmem_prepare_access;
-		engine->instmem.finish_access	= nv04_instmem_finish_access;
+		engine->instmem.flush		= nv04_instmem_flush;
 		engine->mc.init			= nv40_mc_init;
 		engine->mc.init			= nv40_mc_init;
 		engine->mc.takedown		= nv40_mc_takedown;
 		engine->mc.takedown		= nv40_mc_takedown;
 		engine->timer.init		= nv04_timer_init;
 		engine->timer.init		= nv04_timer_init;
@@ -271,8 +266,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
 		engine->instmem.clear		= nv50_instmem_clear;
 		engine->instmem.clear		= nv50_instmem_clear;
 		engine->instmem.bind		= nv50_instmem_bind;
 		engine->instmem.bind		= nv50_instmem_bind;
 		engine->instmem.unbind		= nv50_instmem_unbind;
 		engine->instmem.unbind		= nv50_instmem_unbind;
-		engine->instmem.prepare_access	= nv50_instmem_prepare_access;
-		engine->instmem.finish_access	= nv50_instmem_finish_access;
+		engine->instmem.flush		= nv50_instmem_flush;
 		engine->mc.init			= nv50_mc_init;
 		engine->mc.init			= nv50_mc_init;
 		engine->mc.takedown		= nv50_mc_takedown;
 		engine->mc.takedown		= nv50_mc_takedown;
 		engine->timer.init		= nv04_timer_init;
 		engine->timer.init		= nv04_timer_init;

+ 0 - 8
drivers/gpu/drm/nouveau/nv04_fifo.c

@@ -137,7 +137,6 @@ nv04_fifo_create_context(struct nouveau_channel *chan)
 	spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
 	spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
 
 
 	/* Setup initial state */
 	/* Setup initial state */
-	dev_priv->engine.instmem.prepare_access(dev, true);
 	RAMFC_WR(DMA_PUT, chan->pushbuf_base);
 	RAMFC_WR(DMA_PUT, chan->pushbuf_base);
 	RAMFC_WR(DMA_GET, chan->pushbuf_base);
 	RAMFC_WR(DMA_GET, chan->pushbuf_base);
 	RAMFC_WR(DMA_INSTANCE, chan->pushbuf->instance >> 4);
 	RAMFC_WR(DMA_INSTANCE, chan->pushbuf->instance >> 4);
@@ -145,7 +144,6 @@ nv04_fifo_create_context(struct nouveau_channel *chan)
 			     NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
 			     NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
 			     NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 |
 			     NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 |
 			     DMA_FETCH_ENDIANNESS));
 			     DMA_FETCH_ENDIANNESS));
-	dev_priv->engine.instmem.finish_access(dev);
 
 
 	/* enable the fifo dma operation */
 	/* enable the fifo dma operation */
 	nv_wr32(dev, NV04_PFIFO_MODE,
 	nv_wr32(dev, NV04_PFIFO_MODE,
@@ -172,8 +170,6 @@ nv04_fifo_do_load_context(struct drm_device *dev, int chid)
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	uint32_t fc = NV04_RAMFC(chid), tmp;
 	uint32_t fc = NV04_RAMFC(chid), tmp;
 
 
-	dev_priv->engine.instmem.prepare_access(dev, false);
-
 	nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUT, nv_ri32(dev, fc + 0));
 	nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUT, nv_ri32(dev, fc + 0));
 	nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET, nv_ri32(dev, fc + 4));
 	nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET, nv_ri32(dev, fc + 4));
 	tmp = nv_ri32(dev, fc + 8);
 	tmp = nv_ri32(dev, fc + 8);
@@ -184,8 +180,6 @@ nv04_fifo_do_load_context(struct drm_device *dev, int chid)
 	nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_ri32(dev, fc + 20));
 	nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_ri32(dev, fc + 20));
 	nv_wr32(dev, NV04_PFIFO_CACHE1_PULL1, nv_ri32(dev, fc + 24));
 	nv_wr32(dev, NV04_PFIFO_CACHE1_PULL1, nv_ri32(dev, fc + 24));
 
 
-	dev_priv->engine.instmem.finish_access(dev);
-
 	nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
 	nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
 	nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0);
 	nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0);
 }
 }
@@ -226,7 +220,6 @@ nv04_fifo_unload_context(struct drm_device *dev)
 		return -EINVAL;
 		return -EINVAL;
 	}
 	}
 
 
-	dev_priv->engine.instmem.prepare_access(dev, true);
 	RAMFC_WR(DMA_PUT, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT));
 	RAMFC_WR(DMA_PUT, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT));
 	RAMFC_WR(DMA_GET, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET));
 	RAMFC_WR(DMA_GET, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET));
 	tmp  = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT) << 16;
 	tmp  = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT) << 16;
@@ -236,7 +229,6 @@ nv04_fifo_unload_context(struct drm_device *dev)
 	RAMFC_WR(DMA_FETCH, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_FETCH));
 	RAMFC_WR(DMA_FETCH, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_FETCH));
 	RAMFC_WR(ENGINE, nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE));
 	RAMFC_WR(ENGINE, nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE));
 	RAMFC_WR(PULL1_ENGINE, nv_rd32(dev, NV04_PFIFO_CACHE1_PULL1));
 	RAMFC_WR(PULL1_ENGINE, nv_rd32(dev, NV04_PFIFO_CACHE1_PULL1));
-	dev_priv->engine.instmem.finish_access(dev);
 
 
 	nv04_fifo_do_load_context(dev, pfifo->channels - 1);
 	nv04_fifo_do_load_context(dev, pfifo->channels - 1);
 	nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1);
 	nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1);

+ 1 - 8
drivers/gpu/drm/nouveau/nv04_instmem.c

@@ -49,10 +49,8 @@ nv04_instmem_determine_amount(struct drm_device *dev)
 	NV_DEBUG(dev, "RAMIN size: %dKiB\n", dev_priv->ramin_rsvd_vram >> 10);
 	NV_DEBUG(dev, "RAMIN size: %dKiB\n", dev_priv->ramin_rsvd_vram >> 10);
 
 
 	/* Clear all of it, except the BIOS image that's in the first 64KiB */
 	/* Clear all of it, except the BIOS image that's in the first 64KiB */
-	dev_priv->engine.instmem.prepare_access(dev, true);
 	for (i = 64 * 1024; i < dev_priv->ramin_rsvd_vram; i += 4)
 	for (i = 64 * 1024; i < dev_priv->ramin_rsvd_vram; i += 4)
 		nv_wi32(dev, i, 0x00000000);
 		nv_wi32(dev, i, 0x00000000);
-	dev_priv->engine.instmem.finish_access(dev);
 }
 }
 
 
 static void
 static void
@@ -186,12 +184,7 @@ nv04_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
 }
 }
 
 
 void
 void
-nv04_instmem_prepare_access(struct drm_device *dev, bool write)
-{
-}
-
-void
-nv04_instmem_finish_access(struct drm_device *dev)
+nv04_instmem_flush(struct drm_device *dev)
 {
 {
 }
 }
 
 

+ 0 - 10
drivers/gpu/drm/nouveau/nv10_fifo.c

@@ -55,7 +55,6 @@ nv10_fifo_create_context(struct nouveau_channel *chan)
 	/* Fill entries that are seen filled in dumps of nvidia driver just
 	/* Fill entries that are seen filled in dumps of nvidia driver just
 	 * after channel's is put into DMA mode
 	 * after channel's is put into DMA mode
 	 */
 	 */
-	dev_priv->engine.instmem.prepare_access(dev, true);
 	nv_wi32(dev, fc +  0, chan->pushbuf_base);
 	nv_wi32(dev, fc +  0, chan->pushbuf_base);
 	nv_wi32(dev, fc +  4, chan->pushbuf_base);
 	nv_wi32(dev, fc +  4, chan->pushbuf_base);
 	nv_wi32(dev, fc + 12, chan->pushbuf->instance >> 4);
 	nv_wi32(dev, fc + 12, chan->pushbuf->instance >> 4);
@@ -66,7 +65,6 @@ nv10_fifo_create_context(struct nouveau_channel *chan)
 			      NV_PFIFO_CACHE1_BIG_ENDIAN |
 			      NV_PFIFO_CACHE1_BIG_ENDIAN |
 #endif
 #endif
 			      0);
 			      0);
-	dev_priv->engine.instmem.finish_access(dev);
 
 
 	/* enable the fifo dma operation */
 	/* enable the fifo dma operation */
 	nv_wr32(dev, NV04_PFIFO_MODE,
 	nv_wr32(dev, NV04_PFIFO_MODE,
@@ -91,8 +89,6 @@ nv10_fifo_do_load_context(struct drm_device *dev, int chid)
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	uint32_t fc = NV10_RAMFC(chid), tmp;
 	uint32_t fc = NV10_RAMFC(chid), tmp;
 
 
-	dev_priv->engine.instmem.prepare_access(dev, false);
-
 	nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUT, nv_ri32(dev, fc + 0));
 	nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUT, nv_ri32(dev, fc + 0));
 	nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET, nv_ri32(dev, fc + 4));
 	nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET, nv_ri32(dev, fc + 4));
 	nv_wr32(dev, NV10_PFIFO_CACHE1_REF_CNT, nv_ri32(dev, fc + 8));
 	nv_wr32(dev, NV10_PFIFO_CACHE1_REF_CNT, nv_ri32(dev, fc + 8));
@@ -117,8 +113,6 @@ nv10_fifo_do_load_context(struct drm_device *dev, int chid)
 	nv_wr32(dev, NV10_PFIFO_CACHE1_DMA_SUBROUTINE, nv_ri32(dev, fc + 48));
 	nv_wr32(dev, NV10_PFIFO_CACHE1_DMA_SUBROUTINE, nv_ri32(dev, fc + 48));
 
 
 out:
 out:
-	dev_priv->engine.instmem.finish_access(dev);
-
 	nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
 	nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
 	nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0);
 	nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0);
 }
 }
@@ -155,8 +149,6 @@ nv10_fifo_unload_context(struct drm_device *dev)
 		return 0;
 		return 0;
 	fc = NV10_RAMFC(chid);
 	fc = NV10_RAMFC(chid);
 
 
-	dev_priv->engine.instmem.prepare_access(dev, true);
-
 	nv_wi32(dev, fc +  0, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT));
 	nv_wi32(dev, fc +  0, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT));
 	nv_wi32(dev, fc +  4, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET));
 	nv_wi32(dev, fc +  4, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET));
 	nv_wi32(dev, fc +  8, nv_rd32(dev, NV10_PFIFO_CACHE1_REF_CNT));
 	nv_wi32(dev, fc +  8, nv_rd32(dev, NV10_PFIFO_CACHE1_REF_CNT));
@@ -179,8 +171,6 @@ nv10_fifo_unload_context(struct drm_device *dev)
 	nv_wi32(dev, fc + 48, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET));
 	nv_wi32(dev, fc + 48, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET));
 
 
 out:
 out:
-	dev_priv->engine.instmem.finish_access(dev);
-
 	nv10_fifo_do_load_context(dev, pfifo->channels - 1);
 	nv10_fifo_do_load_context(dev, pfifo->channels - 1);
 	nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1);
 	nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1);
 	return 0;
 	return 0;

+ 0 - 5
drivers/gpu/drm/nouveau/nv20_graph.c

@@ -421,7 +421,6 @@ nv20_graph_create_context(struct nouveau_channel *chan)
 		return ret;
 		return ret;
 
 
 	/* Initialise default context values */
 	/* Initialise default context values */
-	dev_priv->engine.instmem.prepare_access(dev, true);
 	ctx_init(dev, chan->ramin_grctx->gpuobj);
 	ctx_init(dev, chan->ramin_grctx->gpuobj);
 
 
 	/* nv20: nv_wo32(dev, chan->ramin_grctx->gpuobj, 10, chan->id<<24); */
 	/* nv20: nv_wo32(dev, chan->ramin_grctx->gpuobj, 10, chan->id<<24); */
@@ -430,8 +429,6 @@ nv20_graph_create_context(struct nouveau_channel *chan)
 
 
 	nv_wo32(dev, dev_priv->ctx_table->gpuobj, chan->id,
 	nv_wo32(dev, dev_priv->ctx_table->gpuobj, chan->id,
 			chan->ramin_grctx->instance >> 4);
 			chan->ramin_grctx->instance >> 4);
-
-	dev_priv->engine.instmem.finish_access(dev);
 	return 0;
 	return 0;
 }
 }
 
 
@@ -444,9 +441,7 @@ nv20_graph_destroy_context(struct nouveau_channel *chan)
 	if (chan->ramin_grctx)
 	if (chan->ramin_grctx)
 		nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx);
 		nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx);
 
 
-	dev_priv->engine.instmem.prepare_access(dev, true);
 	nv_wo32(dev, dev_priv->ctx_table->gpuobj, chan->id, 0);
 	nv_wo32(dev, dev_priv->ctx_table->gpuobj, chan->id, 0);
-	dev_priv->engine.instmem.finish_access(dev);
 }
 }
 
 
 int
 int

+ 0 - 8
drivers/gpu/drm/nouveau/nv40_fifo.c

@@ -48,7 +48,6 @@ nv40_fifo_create_context(struct nouveau_channel *chan)
 
 
 	spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
 	spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
 
 
-	dev_priv->engine.instmem.prepare_access(dev, true);
 	nv_wi32(dev, fc +  0, chan->pushbuf_base);
 	nv_wi32(dev, fc +  0, chan->pushbuf_base);
 	nv_wi32(dev, fc +  4, chan->pushbuf_base);
 	nv_wi32(dev, fc +  4, chan->pushbuf_base);
 	nv_wi32(dev, fc + 12, chan->pushbuf->instance >> 4);
 	nv_wi32(dev, fc + 12, chan->pushbuf->instance >> 4);
@@ -61,7 +60,6 @@ nv40_fifo_create_context(struct nouveau_channel *chan)
 			      0x30000000 /* no idea.. */);
 			      0x30000000 /* no idea.. */);
 	nv_wi32(dev, fc + 56, chan->ramin_grctx->instance >> 4);
 	nv_wi32(dev, fc + 56, chan->ramin_grctx->instance >> 4);
 	nv_wi32(dev, fc + 60, 0x0001FFFF);
 	nv_wi32(dev, fc + 60, 0x0001FFFF);
-	dev_priv->engine.instmem.finish_access(dev);
 
 
 	/* enable the fifo dma operation */
 	/* enable the fifo dma operation */
 	nv_wr32(dev, NV04_PFIFO_MODE,
 	nv_wr32(dev, NV04_PFIFO_MODE,
@@ -89,8 +87,6 @@ nv40_fifo_do_load_context(struct drm_device *dev, int chid)
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	uint32_t fc = NV40_RAMFC(chid), tmp, tmp2;
 	uint32_t fc = NV40_RAMFC(chid), tmp, tmp2;
 
 
-	dev_priv->engine.instmem.prepare_access(dev, false);
-
 	nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUT, nv_ri32(dev, fc + 0));
 	nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUT, nv_ri32(dev, fc + 0));
 	nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET, nv_ri32(dev, fc + 4));
 	nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET, nv_ri32(dev, fc + 4));
 	nv_wr32(dev, NV10_PFIFO_CACHE1_REF_CNT, nv_ri32(dev, fc + 8));
 	nv_wr32(dev, NV10_PFIFO_CACHE1_REF_CNT, nv_ri32(dev, fc + 8));
@@ -127,8 +123,6 @@ nv40_fifo_do_load_context(struct drm_device *dev, int chid)
 	nv_wr32(dev, 0x2088, nv_ri32(dev, fc + 76));
 	nv_wr32(dev, 0x2088, nv_ri32(dev, fc + 76));
 	nv_wr32(dev, 0x3300, nv_ri32(dev, fc + 80));
 	nv_wr32(dev, 0x3300, nv_ri32(dev, fc + 80));
 
 
-	dev_priv->engine.instmem.finish_access(dev);
-
 	nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
 	nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
 	nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0);
 	nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0);
 }
 }
@@ -166,7 +160,6 @@ nv40_fifo_unload_context(struct drm_device *dev)
 		return 0;
 		return 0;
 	fc = NV40_RAMFC(chid);
 	fc = NV40_RAMFC(chid);
 
 
-	dev_priv->engine.instmem.prepare_access(dev, true);
 	nv_wi32(dev, fc + 0, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT));
 	nv_wi32(dev, fc + 0, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT));
 	nv_wi32(dev, fc + 4, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET));
 	nv_wi32(dev, fc + 4, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET));
 	nv_wi32(dev, fc + 8, nv_rd32(dev, NV10_PFIFO_CACHE1_REF_CNT));
 	nv_wi32(dev, fc + 8, nv_rd32(dev, NV10_PFIFO_CACHE1_REF_CNT));
@@ -200,7 +193,6 @@ nv40_fifo_unload_context(struct drm_device *dev)
 	tmp |= (nv_rd32(dev, NV04_PFIFO_CACHE1_PUT) << 16);
 	tmp |= (nv_rd32(dev, NV04_PFIFO_CACHE1_PUT) << 16);
 	nv_wi32(dev, fc + 72, tmp);
 	nv_wi32(dev, fc + 72, tmp);
 #endif
 #endif
-	dev_priv->engine.instmem.finish_access(dev);
 
 
 	nv40_fifo_do_load_context(dev, pfifo->channels - 1);
 	nv40_fifo_do_load_context(dev, pfifo->channels - 1);
 	nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1,
 	nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1,

+ 0 - 2
drivers/gpu/drm/nouveau/nv40_graph.c

@@ -67,7 +67,6 @@ nv40_graph_create_context(struct nouveau_channel *chan)
 		return ret;
 		return ret;
 
 
 	/* Initialise default context values */
 	/* Initialise default context values */
-	dev_priv->engine.instmem.prepare_access(dev, true);
 	if (!pgraph->ctxprog) {
 	if (!pgraph->ctxprog) {
 		struct nouveau_grctx ctx = {};
 		struct nouveau_grctx ctx = {};
 
 
@@ -80,7 +79,6 @@ nv40_graph_create_context(struct nouveau_channel *chan)
 	}
 	}
 	nv_wo32(dev, chan->ramin_grctx->gpuobj, 0,
 	nv_wo32(dev, chan->ramin_grctx->gpuobj, 0,
 		     chan->ramin_grctx->gpuobj->im_pramin->start);
 		     chan->ramin_grctx->gpuobj->im_pramin->start);
-	dev_priv->engine.instmem.finish_access(dev);
 	return 0;
 	return 0;
 }
 }
 
 

+ 1 - 2
drivers/gpu/drm/nouveau/nv50_display.c

@@ -71,14 +71,13 @@ nv50_evo_dmaobj_new(struct nouveau_channel *evo, uint32_t class, uint32_t name,
 		return ret;
 		return ret;
 	}
 	}
 
 
-	dev_priv->engine.instmem.prepare_access(dev, true);
 	nv_wo32(dev, obj, 0, (tile_flags << 22) | (magic_flags << 16) | class);
 	nv_wo32(dev, obj, 0, (tile_flags << 22) | (magic_flags << 16) | class);
 	nv_wo32(dev, obj, 1, limit);
 	nv_wo32(dev, obj, 1, limit);
 	nv_wo32(dev, obj, 2, offset);
 	nv_wo32(dev, obj, 2, offset);
 	nv_wo32(dev, obj, 3, 0x00000000);
 	nv_wo32(dev, obj, 3, 0x00000000);
 	nv_wo32(dev, obj, 4, 0x00000000);
 	nv_wo32(dev, obj, 4, 0x00000000);
 	nv_wo32(dev, obj, 5, 0x00010000);
 	nv_wo32(dev, obj, 5, 0x00010000);
-	dev_priv->engine.instmem.finish_access(dev);
+	dev_priv->engine.instmem.flush(dev);
 
 
 	return 0;
 	return 0;
 }
 }

+ 3 - 12
drivers/gpu/drm/nouveau/nv50_fifo.c

@@ -49,12 +49,11 @@ nv50_fifo_init_thingo(struct drm_device *dev)
 	priv->cur_thingo = !priv->cur_thingo;
 	priv->cur_thingo = !priv->cur_thingo;
 
 
 	/* We never schedule channel 0 or 127 */
 	/* We never schedule channel 0 or 127 */
-	dev_priv->engine.instmem.prepare_access(dev, true);
 	for (i = 1, nr = 0; i < 127; i++) {
 	for (i = 1, nr = 0; i < 127; i++) {
 		if (dev_priv->fifos[i] && dev_priv->fifos[i]->ramfc)
 		if (dev_priv->fifos[i] && dev_priv->fifos[i]->ramfc)
 			nv_wo32(dev, cur->gpuobj, nr++, i);
 			nv_wo32(dev, cur->gpuobj, nr++, i);
 	}
 	}
-	dev_priv->engine.instmem.finish_access(dev);
+	dev_priv->engine.instmem.flush(dev);
 
 
 	nv_wr32(dev, 0x32f4, cur->instance >> 12);
 	nv_wr32(dev, 0x32f4, cur->instance >> 12);
 	nv_wr32(dev, 0x32ec, nr);
 	nv_wr32(dev, 0x32ec, nr);
@@ -281,8 +280,6 @@ nv50_fifo_create_context(struct nouveau_channel *chan)
 
 
 	spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
 	spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
 
 
-	dev_priv->engine.instmem.prepare_access(dev, true);
-
 	nv_wo32(dev, ramfc, 0x48/4, chan->pushbuf->instance >> 4);
 	nv_wo32(dev, ramfc, 0x48/4, chan->pushbuf->instance >> 4);
 	nv_wo32(dev, ramfc, 0x80/4, (0xc << 24) | (chan->ramht->instance >> 4));
 	nv_wo32(dev, ramfc, 0x80/4, (0xc << 24) | (chan->ramht->instance >> 4));
 	nv_wo32(dev, ramfc, 0x44/4, 0x2101ffff);
 	nv_wo32(dev, ramfc, 0x44/4, 0x2101ffff);
@@ -304,7 +301,7 @@ nv50_fifo_create_context(struct nouveau_channel *chan)
 		nv_wo32(dev, ramfc, 0x98/4, chan->ramin->instance >> 12);
 		nv_wo32(dev, ramfc, 0x98/4, chan->ramin->instance >> 12);
 	}
 	}
 
 
-	dev_priv->engine.instmem.finish_access(dev);
+	dev_priv->engine.instmem.flush(dev);
 
 
 	ret = nv50_fifo_channel_enable(dev, chan->id, false);
 	ret = nv50_fifo_channel_enable(dev, chan->id, false);
 	if (ret) {
 	if (ret) {
@@ -349,8 +346,6 @@ nv50_fifo_load_context(struct nouveau_channel *chan)
 
 
 	NV_DEBUG(dev, "ch%d\n", chan->id);
 	NV_DEBUG(dev, "ch%d\n", chan->id);
 
 
-	dev_priv->engine.instmem.prepare_access(dev, false);
-
 	nv_wr32(dev, 0x3330, nv_ro32(dev, ramfc, 0x00/4));
 	nv_wr32(dev, 0x3330, nv_ro32(dev, ramfc, 0x00/4));
 	nv_wr32(dev, 0x3334, nv_ro32(dev, ramfc, 0x04/4));
 	nv_wr32(dev, 0x3334, nv_ro32(dev, ramfc, 0x04/4));
 	nv_wr32(dev, 0x3240, nv_ro32(dev, ramfc, 0x08/4));
 	nv_wr32(dev, 0x3240, nv_ro32(dev, ramfc, 0x08/4));
@@ -404,8 +399,6 @@ nv50_fifo_load_context(struct nouveau_channel *chan)
 		nv_wr32(dev, 0x3410, nv_ro32(dev, ramfc, 0x98/4));
 		nv_wr32(dev, 0x3410, nv_ro32(dev, ramfc, 0x98/4));
 	}
 	}
 
 
-	dev_priv->engine.instmem.finish_access(dev);
-
 	nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, chan->id | (1<<16));
 	nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, chan->id | (1<<16));
 	return 0;
 	return 0;
 }
 }
@@ -434,8 +427,6 @@ nv50_fifo_unload_context(struct drm_device *dev)
 	ramfc = chan->ramfc->gpuobj;
 	ramfc = chan->ramfc->gpuobj;
 	cache = chan->cache->gpuobj;
 	cache = chan->cache->gpuobj;
 
 
-	dev_priv->engine.instmem.prepare_access(dev, true);
-
 	nv_wo32(dev, ramfc, 0x00/4, nv_rd32(dev, 0x3330));
 	nv_wo32(dev, ramfc, 0x00/4, nv_rd32(dev, 0x3330));
 	nv_wo32(dev, ramfc, 0x04/4, nv_rd32(dev, 0x3334));
 	nv_wo32(dev, ramfc, 0x04/4, nv_rd32(dev, 0x3334));
 	nv_wo32(dev, ramfc, 0x08/4, nv_rd32(dev, 0x3240));
 	nv_wo32(dev, ramfc, 0x08/4, nv_rd32(dev, 0x3240));
@@ -491,7 +482,7 @@ nv50_fifo_unload_context(struct drm_device *dev)
 		nv_wo32(dev, ramfc, 0x98/4, nv_rd32(dev, 0x3410));
 		nv_wo32(dev, ramfc, 0x98/4, nv_rd32(dev, 0x3410));
 	}
 	}
 
 
-	dev_priv->engine.instmem.finish_access(dev);
+	dev_priv->engine.instmem.flush(dev);
 
 
 	/*XXX: probably reload ch127 (NULL) state back too */
 	/*XXX: probably reload ch127 (NULL) state back too */
 	nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, 127);
 	nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, 127);

+ 2 - 6
drivers/gpu/drm/nouveau/nv50_graph.c

@@ -226,7 +226,6 @@ nv50_graph_create_context(struct nouveau_channel *chan)
 	obj = chan->ramin_grctx->gpuobj;
 	obj = chan->ramin_grctx->gpuobj;
 
 
 	hdr = IS_G80 ? 0x200 : 0x20;
 	hdr = IS_G80 ? 0x200 : 0x20;
-	dev_priv->engine.instmem.prepare_access(dev, true);
 	nv_wo32(dev, ramin, (hdr + 0x00)/4, 0x00190002);
 	nv_wo32(dev, ramin, (hdr + 0x00)/4, 0x00190002);
 	nv_wo32(dev, ramin, (hdr + 0x04)/4, chan->ramin_grctx->instance +
 	nv_wo32(dev, ramin, (hdr + 0x04)/4, chan->ramin_grctx->instance +
 					   pgraph->grctx_size - 1);
 					   pgraph->grctx_size - 1);
@@ -234,9 +233,7 @@ nv50_graph_create_context(struct nouveau_channel *chan)
 	nv_wo32(dev, ramin, (hdr + 0x0c)/4, 0);
 	nv_wo32(dev, ramin, (hdr + 0x0c)/4, 0);
 	nv_wo32(dev, ramin, (hdr + 0x10)/4, 0);
 	nv_wo32(dev, ramin, (hdr + 0x10)/4, 0);
 	nv_wo32(dev, ramin, (hdr + 0x14)/4, 0x00010000);
 	nv_wo32(dev, ramin, (hdr + 0x14)/4, 0x00010000);
-	dev_priv->engine.instmem.finish_access(dev);
 
 
-	dev_priv->engine.instmem.prepare_access(dev, true);
 	if (!pgraph->ctxprog) {
 	if (!pgraph->ctxprog) {
 		struct nouveau_grctx ctx = {};
 		struct nouveau_grctx ctx = {};
 		ctx.dev = chan->dev;
 		ctx.dev = chan->dev;
@@ -247,8 +244,8 @@ nv50_graph_create_context(struct nouveau_channel *chan)
 		nouveau_grctx_vals_load(dev, obj);
 		nouveau_grctx_vals_load(dev, obj);
 	}
 	}
 	nv_wo32(dev, obj, 0x00000/4, chan->ramin->instance >> 12);
 	nv_wo32(dev, obj, 0x00000/4, chan->ramin->instance >> 12);
-	dev_priv->engine.instmem.finish_access(dev);
 
 
+	dev_priv->engine.instmem.flush(dev);
 	return 0;
 	return 0;
 }
 }
 
 
@@ -264,10 +261,9 @@ nv50_graph_destroy_context(struct nouveau_channel *chan)
 	if (!chan->ramin || !chan->ramin->gpuobj)
 	if (!chan->ramin || !chan->ramin->gpuobj)
 		return;
 		return;
 
 
-	dev_priv->engine.instmem.prepare_access(dev, true);
 	for (i = hdr; i < hdr + 24; i += 4)
 	for (i = hdr; i < hdr + 24; i += 4)
 		nv_wo32(dev, chan->ramin->gpuobj, i/4, 0);
 		nv_wo32(dev, chan->ramin->gpuobj, i/4, 0);
-	dev_priv->engine.instmem.finish_access(dev);
+	dev_priv->engine.instmem.flush(dev);
 
 
 	nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx);
 	nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx);
 }
 }

+ 6 - 27
drivers/gpu/drm/nouveau/nv50_instmem.c

@@ -35,8 +35,6 @@ struct nv50_instmem_priv {
 	struct nouveau_gpuobj_ref *pramin_pt;
 	struct nouveau_gpuobj_ref *pramin_pt;
 	struct nouveau_gpuobj_ref *pramin_bar;
 	struct nouveau_gpuobj_ref *pramin_bar;
 	struct nouveau_gpuobj_ref *fb_bar;
 	struct nouveau_gpuobj_ref *fb_bar;
-
-	bool last_access_wr;
 };
 };
 
 
 #define NV50_INSTMEM_PAGE_SHIFT 12
 #define NV50_INSTMEM_PAGE_SHIFT 12
@@ -262,16 +260,13 @@ nv50_instmem_init(struct drm_device *dev)
 
 
 	/* Assume that praying isn't enough, check that we can re-read the
 	/* Assume that praying isn't enough, check that we can re-read the
 	 * entire fake channel back from the PRAMIN BAR */
 	 * entire fake channel back from the PRAMIN BAR */
-	dev_priv->engine.instmem.prepare_access(dev, false);
 	for (i = 0; i < c_size; i += 4) {
 	for (i = 0; i < c_size; i += 4) {
 		if (nv_rd32(dev, NV_RAMIN + i) != nv_ri32(dev, i)) {
 		if (nv_rd32(dev, NV_RAMIN + i) != nv_ri32(dev, i)) {
 			NV_ERROR(dev, "Error reading back PRAMIN at 0x%08x\n",
 			NV_ERROR(dev, "Error reading back PRAMIN at 0x%08x\n",
 									i);
 									i);
-			dev_priv->engine.instmem.finish_access(dev);
 			return -EINVAL;
 			return -EINVAL;
 		}
 		}
 	}
 	}
-	dev_priv->engine.instmem.finish_access(dev);
 
 
 	nv_wr32(dev, NV50_PUNK_BAR0_PRAMIN, save_nv001700);
 	nv_wr32(dev, NV50_PUNK_BAR0_PRAMIN, save_nv001700);
 
 
@@ -451,13 +446,12 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
 		vram |= 0x30;
 		vram |= 0x30;
 	}
 	}
 
 
-	dev_priv->engine.instmem.prepare_access(dev, true);
 	while (pte < pte_end) {
 	while (pte < pte_end) {
 		nv_wo32(dev, pramin_pt, pte++, lower_32_bits(vram));
 		nv_wo32(dev, pramin_pt, pte++, lower_32_bits(vram));
 		nv_wo32(dev, pramin_pt, pte++, upper_32_bits(vram));
 		nv_wo32(dev, pramin_pt, pte++, upper_32_bits(vram));
 		vram += NV50_INSTMEM_PAGE_SIZE;
 		vram += NV50_INSTMEM_PAGE_SIZE;
 	}
 	}
-	dev_priv->engine.instmem.finish_access(dev);
+	dev_priv->engine.instmem.flush(dev);
 
 
 	nv_wr32(dev, 0x100c80, 0x00040001);
 	nv_wr32(dev, 0x100c80, 0x00040001);
 	if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
 	if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
@@ -490,36 +484,21 @@ nv50_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
 	pte     = (gpuobj->im_pramin->start >> 12) << 1;
 	pte     = (gpuobj->im_pramin->start >> 12) << 1;
 	pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte;
 	pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte;
 
 
-	dev_priv->engine.instmem.prepare_access(dev, true);
 	while (pte < pte_end) {
 	while (pte < pte_end) {
 		nv_wo32(dev, priv->pramin_pt->gpuobj, pte++, 0x00000000);
 		nv_wo32(dev, priv->pramin_pt->gpuobj, pte++, 0x00000000);
 		nv_wo32(dev, priv->pramin_pt->gpuobj, pte++, 0x00000000);
 		nv_wo32(dev, priv->pramin_pt->gpuobj, pte++, 0x00000000);
 	}
 	}
-	dev_priv->engine.instmem.finish_access(dev);
+	dev_priv->engine.instmem.flush(dev);
 
 
 	gpuobj->im_bound = 0;
 	gpuobj->im_bound = 0;
 	return 0;
 	return 0;
 }
 }
 
 
 void
 void
-nv50_instmem_prepare_access(struct drm_device *dev, bool write)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
-
-	priv->last_access_wr = write;
-}
-
-void
-nv50_instmem_finish_access(struct drm_device *dev)
+nv50_instmem_flush(struct drm_device *dev)
 {
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
-
-	if (priv->last_access_wr) {
-		nv_wr32(dev, 0x070000, 0x00000001);
-		if (!nv_wait(0x070000, 0x00000001, 0x00000000))
-			NV_ERROR(dev, "PRAMIN flush timeout\n");
-	}
+	nv_wr32(dev, 0x070000, 0x00000001);
+	if (!nv_wait(0x070000, 0x00000001, 0x00000000))
+		NV_ERROR(dev, "PRAMIN flush timeout\n");
 }
 }