1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072 |
- /*
- * Copyright 2007 Dave Airlied
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
- /*
- * Authors: Dave Airlied <airlied@linux.ie>
- * Ben Skeggs <darktama@iinet.net.au>
- * Jeremy Kolb <jkolb@brandeis.edu>
- */
- #include "drmP.h"
- #include "nouveau_drm.h"
- #include "nouveau_drv.h"
- #include "nouveau_dma.h"
- #include "nouveau_mm.h"
- #include "nouveau_vm.h"
- #include <linux/log2.h>
- #include <linux/slab.h>
- static void
- nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
- {
- struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
- struct drm_device *dev = dev_priv->dev;
- struct nouveau_bo *nvbo = nouveau_bo(bo);
- if (unlikely(nvbo->gem))
- DRM_ERROR("bo %p still attached to GEM object\n", bo);
- nv10_mem_put_tile_region(dev, nvbo->tile, NULL);
- nouveau_vm_put(&nvbo->vma);
- kfree(nvbo);
- }
- static void
- nouveau_bo_fixup_align(struct nouveau_bo *nvbo, int *align, int *size,
- int *page_shift)
- {
- struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
- if (dev_priv->card_type < NV_50) {
- if (nvbo->tile_mode) {
- if (dev_priv->chipset >= 0x40) {
- *align = 65536;
- *size = roundup(*size, 64 * nvbo->tile_mode);
- } else if (dev_priv->chipset >= 0x30) {
- *align = 32768;
- *size = roundup(*size, 64 * nvbo->tile_mode);
- } else if (dev_priv->chipset >= 0x20) {
- *align = 16384;
- *size = roundup(*size, 64 * nvbo->tile_mode);
- } else if (dev_priv->chipset >= 0x10) {
- *align = 16384;
- *size = roundup(*size, 32 * nvbo->tile_mode);
- }
- }
- } else {
- if (likely(dev_priv->chan_vm)) {
- if (*size > 256 * 1024)
- *page_shift = dev_priv->chan_vm->lpg_shift;
- else
- *page_shift = dev_priv->chan_vm->spg_shift;
- } else {
- *page_shift = 12;
- }
- *size = roundup(*size, (1 << *page_shift));
- *align = max((1 << *page_shift), *align);
- }
- *size = roundup(*size, PAGE_SIZE);
- }
- int
- nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
- int size, int align, uint32_t flags, uint32_t tile_mode,
- uint32_t tile_flags, bool no_vm, bool mappable,
- struct nouveau_bo **pnvbo)
- {
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_bo *nvbo;
- int ret = 0, page_shift = 0;
- nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
- if (!nvbo)
- return -ENOMEM;
- INIT_LIST_HEAD(&nvbo->head);
- INIT_LIST_HEAD(&nvbo->entry);
- nvbo->mappable = mappable;
- nvbo->no_vm = no_vm;
- nvbo->tile_mode = tile_mode;
- nvbo->tile_flags = tile_flags;
- nvbo->bo.bdev = &dev_priv->ttm.bdev;
- nouveau_bo_fixup_align(nvbo, &align, &size, &page_shift);
- align >>= PAGE_SHIFT;
- if (!nvbo->no_vm && dev_priv->chan_vm) {
- ret = nouveau_vm_get(dev_priv->chan_vm, size, page_shift,
- NV_MEM_ACCESS_RW, &nvbo->vma);
- if (ret) {
- kfree(nvbo);
- return ret;
- }
- }
- nouveau_bo_placement_set(nvbo, flags, 0);
- nvbo->channel = chan;
- ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
- ttm_bo_type_device, &nvbo->placement, align, 0,
- false, NULL, size, nouveau_bo_del_ttm);
- if (ret) {
- /* ttm will call nouveau_bo_del_ttm if it fails.. */
- return ret;
- }
- nvbo->channel = NULL;
- if (nvbo->vma.node) {
- if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
- nvbo->bo.offset = nvbo->vma.offset;
- }
- *pnvbo = nvbo;
- return 0;
- }
- static void
- set_placement_list(uint32_t *pl, unsigned *n, uint32_t type, uint32_t flags)
- {
- *n = 0;
- if (type & TTM_PL_FLAG_VRAM)
- pl[(*n)++] = TTM_PL_FLAG_VRAM | flags;
- if (type & TTM_PL_FLAG_TT)
- pl[(*n)++] = TTM_PL_FLAG_TT | flags;
- if (type & TTM_PL_FLAG_SYSTEM)
- pl[(*n)++] = TTM_PL_FLAG_SYSTEM | flags;
- }
- static void
- set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
- {
- struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
- if (dev_priv->card_type == NV_10 &&
- nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM)) {
- /*
- * Make sure that the color and depth buffers are handled
- * by independent memory controller units. Up to a 9x
- * speed up when alpha-blending and depth-test are enabled
- * at the same time.
- */
- int vram_pages = dev_priv->vram_size >> PAGE_SHIFT;
- if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) {
- nvbo->placement.fpfn = vram_pages / 2;
- nvbo->placement.lpfn = ~0;
- } else {
- nvbo->placement.fpfn = 0;
- nvbo->placement.lpfn = vram_pages / 2;
- }
- }
- }
- void
- nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
- {
- struct ttm_placement *pl = &nvbo->placement;
- uint32_t flags = TTM_PL_MASK_CACHING |
- (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
- pl->placement = nvbo->placements;
- set_placement_list(nvbo->placements, &pl->num_placement,
- type, flags);
- pl->busy_placement = nvbo->busy_placements;
- set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
- type | busy, flags);
- set_placement_range(nvbo, type);
- }
- int
- nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
- {
- struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
- struct ttm_buffer_object *bo = &nvbo->bo;
- int ret;
- if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) {
- NV_ERROR(nouveau_bdev(bo->bdev)->dev,
- "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo,
- 1 << bo->mem.mem_type, memtype);
- return -EINVAL;
- }
- if (nvbo->pin_refcnt++)
- return 0;
- ret = ttm_bo_reserve(bo, false, false, false, 0);
- if (ret)
- goto out;
- nouveau_bo_placement_set(nvbo, memtype, 0);
- ret = nouveau_bo_validate(nvbo, false, false, false);
- if (ret == 0) {
- switch (bo->mem.mem_type) {
- case TTM_PL_VRAM:
- dev_priv->fb_aper_free -= bo->mem.size;
- break;
- case TTM_PL_TT:
- dev_priv->gart_info.aper_free -= bo->mem.size;
- break;
- default:
- break;
- }
- }
- ttm_bo_unreserve(bo);
- out:
- if (unlikely(ret))
- nvbo->pin_refcnt--;
- return ret;
- }
- int
- nouveau_bo_unpin(struct nouveau_bo *nvbo)
- {
- struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
- struct ttm_buffer_object *bo = &nvbo->bo;
- int ret;
- if (--nvbo->pin_refcnt)
- return 0;
- ret = ttm_bo_reserve(bo, false, false, false, 0);
- if (ret)
- return ret;
- nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
- ret = nouveau_bo_validate(nvbo, false, false, false);
- if (ret == 0) {
- switch (bo->mem.mem_type) {
- case TTM_PL_VRAM:
- dev_priv->fb_aper_free += bo->mem.size;
- break;
- case TTM_PL_TT:
- dev_priv->gart_info.aper_free += bo->mem.size;
- break;
- default:
- break;
- }
- }
- ttm_bo_unreserve(bo);
- return ret;
- }
- int
- nouveau_bo_map(struct nouveau_bo *nvbo)
- {
- int ret;
- ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
- if (ret)
- return ret;
- ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
- ttm_bo_unreserve(&nvbo->bo);
- return ret;
- }
- void
- nouveau_bo_unmap(struct nouveau_bo *nvbo)
- {
- if (nvbo)
- ttm_bo_kunmap(&nvbo->kmap);
- }
- int
- nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
- bool no_wait_reserve, bool no_wait_gpu)
- {
- int ret;
- ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, interruptible,
- no_wait_reserve, no_wait_gpu);
- if (ret)
- return ret;
- if (nvbo->vma.node) {
- if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
- nvbo->bo.offset = nvbo->vma.offset;
- }
- return 0;
- }
- u16
- nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index)
- {
- bool is_iomem;
- u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
- mem = &mem[index];
- if (is_iomem)
- return ioread16_native((void __force __iomem *)mem);
- else
- return *mem;
- }
- void
- nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
- {
- bool is_iomem;
- u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
- mem = &mem[index];
- if (is_iomem)
- iowrite16_native(val, (void __force __iomem *)mem);
- else
- *mem = val;
- }
- u32
- nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
- {
- bool is_iomem;
- u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
- mem = &mem[index];
- if (is_iomem)
- return ioread32_native((void __force __iomem *)mem);
- else
- return *mem;
- }
- void
- nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
- {
- bool is_iomem;
- u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
- mem = &mem[index];
- if (is_iomem)
- iowrite32_native(val, (void __force __iomem *)mem);
- else
- *mem = val;
- }
- static struct ttm_backend *
- nouveau_bo_create_ttm_backend_entry(struct ttm_bo_device *bdev)
- {
- struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
- struct drm_device *dev = dev_priv->dev;
- switch (dev_priv->gart_info.type) {
- #if __OS_HAS_AGP
- case NOUVEAU_GART_AGP:
- return ttm_agp_backend_init(bdev, dev->agp->bridge);
- #endif
- case NOUVEAU_GART_SGDMA:
- return nouveau_sgdma_init_ttm(dev);
- default:
- NV_ERROR(dev, "Unknown GART type %d\n",
- dev_priv->gart_info.type);
- break;
- }
- return NULL;
- }
- static int
- nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
- {
- /* We'll do this from user space. */
- return 0;
- }
- static int
- nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
- struct ttm_mem_type_manager *man)
- {
- struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
- struct drm_device *dev = dev_priv->dev;
- switch (type) {
- case TTM_PL_SYSTEM:
- man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
- man->available_caching = TTM_PL_MASK_CACHING;
- man->default_caching = TTM_PL_FLAG_CACHED;
- break;
- case TTM_PL_VRAM:
- if (dev_priv->card_type >= NV_50) {
- man->func = &nouveau_vram_manager;
- man->io_reserve_fastpath = false;
- man->use_io_reserve_lru = true;
- } else {
- man->func = &ttm_bo_manager_func;
- }
- man->flags = TTM_MEMTYPE_FLAG_FIXED |
- TTM_MEMTYPE_FLAG_MAPPABLE;
- man->available_caching = TTM_PL_FLAG_UNCACHED |
- TTM_PL_FLAG_WC;
- man->default_caching = TTM_PL_FLAG_WC;
- break;
- case TTM_PL_TT:
- man->func = &ttm_bo_manager_func;
- switch (dev_priv->gart_info.type) {
- case NOUVEAU_GART_AGP:
- man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
- man->available_caching = TTM_PL_FLAG_UNCACHED |
- TTM_PL_FLAG_WC;
- man->default_caching = TTM_PL_FLAG_WC;
- break;
- case NOUVEAU_GART_SGDMA:
- man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
- TTM_MEMTYPE_FLAG_CMA;
- man->available_caching = TTM_PL_MASK_CACHING;
- man->default_caching = TTM_PL_FLAG_CACHED;
- man->gpu_offset = dev_priv->gart_info.aper_base;
- break;
- default:
- NV_ERROR(dev, "Unknown GART type: %d\n",
- dev_priv->gart_info.type);
- return -EINVAL;
- }
- break;
- default:
- NV_ERROR(dev, "Unsupported memory type %u\n", (unsigned)type);
- return -EINVAL;
- }
- return 0;
- }
- static void
- nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
- {
- struct nouveau_bo *nvbo = nouveau_bo(bo);
- switch (bo->mem.mem_type) {
- case TTM_PL_VRAM:
- nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT,
- TTM_PL_FLAG_SYSTEM);
- break;
- default:
- nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0);
- break;
- }
- *pl = nvbo->placement;
- }
- /* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access
- * TTM_PL_{VRAM,TT} directly.
- */
- static int
- nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
- struct nouveau_bo *nvbo, bool evict,
- bool no_wait_reserve, bool no_wait_gpu,
- struct ttm_mem_reg *new_mem)
- {
- struct nouveau_fence *fence = NULL;
- int ret;
- ret = nouveau_fence_new(chan, &fence, true);
- if (ret)
- return ret;
- ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, evict,
- no_wait_reserve, no_wait_gpu, new_mem);
- nouveau_fence_unref(&fence);
- return ret;
- }
- static inline uint32_t
- nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
- struct nouveau_channel *chan, struct ttm_mem_reg *mem)
- {
- struct nouveau_bo *nvbo = nouveau_bo(bo);
- if (nvbo->no_vm) {
- if (mem->mem_type == TTM_PL_TT)
- return NvDmaGART;
- return NvDmaVRAM;
- }
- if (mem->mem_type == TTM_PL_TT)
- return chan->gart_handle;
- return chan->vram_handle;
- }
- static int
- nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
- struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
- {
- struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
- struct nouveau_bo *nvbo = nouveau_bo(bo);
- u64 src_offset = old_mem->start << PAGE_SHIFT;
- u64 dst_offset = new_mem->start << PAGE_SHIFT;
- u32 page_count = new_mem->num_pages;
- int ret;
- if (!nvbo->no_vm) {
- if (old_mem->mem_type == TTM_PL_VRAM)
- src_offset = nvbo->vma.offset;
- else
- src_offset += dev_priv->gart_info.aper_base;
- if (new_mem->mem_type == TTM_PL_VRAM)
- dst_offset = nvbo->vma.offset;
- else
- dst_offset += dev_priv->gart_info.aper_base;
- }
- page_count = new_mem->num_pages;
- while (page_count) {
- int line_count = (page_count > 2047) ? 2047 : page_count;
- ret = RING_SPACE(chan, 12);
- if (ret)
- return ret;
- BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0238, 2);
- OUT_RING (chan, upper_32_bits(dst_offset));
- OUT_RING (chan, lower_32_bits(dst_offset));
- BEGIN_NVC0(chan, 2, NvSubM2MF, 0x030c, 6);
- OUT_RING (chan, upper_32_bits(src_offset));
- OUT_RING (chan, lower_32_bits(src_offset));
- OUT_RING (chan, PAGE_SIZE); /* src_pitch */
- OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
- OUT_RING (chan, PAGE_SIZE); /* line_length */
- OUT_RING (chan, line_count);
- BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0300, 1);
- OUT_RING (chan, 0x00100110);
- page_count -= line_count;
- src_offset += (PAGE_SIZE * line_count);
- dst_offset += (PAGE_SIZE * line_count);
- }
- return 0;
- }
- static int
- nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
- struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
- {
- struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
- struct nouveau_bo *nvbo = nouveau_bo(bo);
- u64 length = (new_mem->num_pages << PAGE_SHIFT);
- u64 src_offset, dst_offset;
- int ret;
- src_offset = old_mem->start << PAGE_SHIFT;
- dst_offset = new_mem->start << PAGE_SHIFT;
- if (!nvbo->no_vm) {
- if (old_mem->mem_type == TTM_PL_VRAM)
- src_offset = nvbo->vma.offset;
- else
- src_offset += dev_priv->gart_info.aper_base;
- if (new_mem->mem_type == TTM_PL_VRAM)
- dst_offset = nvbo->vma.offset;
- else
- dst_offset += dev_priv->gart_info.aper_base;
- }
- ret = RING_SPACE(chan, 3);
- if (ret)
- return ret;
- BEGIN_RING(chan, NvSubM2MF, 0x0184, 2);
- OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
- OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
- while (length) {
- u32 amount, stride, height;
- amount = min(length, (u64)(4 * 1024 * 1024));
- stride = 16 * 4;
- height = amount / stride;
- if (new_mem->mem_type == TTM_PL_VRAM &&
- nouveau_bo_tile_layout(nvbo)) {
- ret = RING_SPACE(chan, 8);
- if (ret)
- return ret;
- BEGIN_RING(chan, NvSubM2MF, 0x0200, 7);
- OUT_RING (chan, 0);
- OUT_RING (chan, 0);
- OUT_RING (chan, stride);
- OUT_RING (chan, height);
- OUT_RING (chan, 1);
- OUT_RING (chan, 0);
- OUT_RING (chan, 0);
- } else {
- ret = RING_SPACE(chan, 2);
- if (ret)
- return ret;
- BEGIN_RING(chan, NvSubM2MF, 0x0200, 1);
- OUT_RING (chan, 1);
- }
- if (old_mem->mem_type == TTM_PL_VRAM &&
- nouveau_bo_tile_layout(nvbo)) {
- ret = RING_SPACE(chan, 8);
- if (ret)
- return ret;
- BEGIN_RING(chan, NvSubM2MF, 0x021c, 7);
- OUT_RING (chan, 0);
- OUT_RING (chan, 0);
- OUT_RING (chan, stride);
- OUT_RING (chan, height);
- OUT_RING (chan, 1);
- OUT_RING (chan, 0);
- OUT_RING (chan, 0);
- } else {
- ret = RING_SPACE(chan, 2);
- if (ret)
- return ret;
- BEGIN_RING(chan, NvSubM2MF, 0x021c, 1);
- OUT_RING (chan, 1);
- }
- ret = RING_SPACE(chan, 14);
- if (ret)
- return ret;
- BEGIN_RING(chan, NvSubM2MF, 0x0238, 2);
- OUT_RING (chan, upper_32_bits(src_offset));
- OUT_RING (chan, upper_32_bits(dst_offset));
- BEGIN_RING(chan, NvSubM2MF, 0x030c, 8);
- OUT_RING (chan, lower_32_bits(src_offset));
- OUT_RING (chan, lower_32_bits(dst_offset));
- OUT_RING (chan, stride);
- OUT_RING (chan, stride);
- OUT_RING (chan, stride);
- OUT_RING (chan, height);
- OUT_RING (chan, 0x00000101);
- OUT_RING (chan, 0x00000000);
- BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
- OUT_RING (chan, 0);
- length -= amount;
- src_offset += amount;
- dst_offset += amount;
- }
- return 0;
- }
- static int
- nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
- struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
- {
- u32 src_offset = old_mem->start << PAGE_SHIFT;
- u32 dst_offset = new_mem->start << PAGE_SHIFT;
- u32 page_count = new_mem->num_pages;
- int ret;
- ret = RING_SPACE(chan, 3);
- if (ret)
- return ret;
- BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
- OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
- OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
- page_count = new_mem->num_pages;
- while (page_count) {
- int line_count = (page_count > 2047) ? 2047 : page_count;
- ret = RING_SPACE(chan, 11);
- if (ret)
- return ret;
- BEGIN_RING(chan, NvSubM2MF,
- NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
- OUT_RING (chan, src_offset);
- OUT_RING (chan, dst_offset);
- OUT_RING (chan, PAGE_SIZE); /* src_pitch */
- OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
- OUT_RING (chan, PAGE_SIZE); /* line_length */
- OUT_RING (chan, line_count);
- OUT_RING (chan, 0x00000101);
- OUT_RING (chan, 0x00000000);
- BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
- OUT_RING (chan, 0);
- page_count -= line_count;
- src_offset += (PAGE_SIZE * line_count);
- dst_offset += (PAGE_SIZE * line_count);
- }
- return 0;
- }
- static int
- nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
- bool no_wait_reserve, bool no_wait_gpu,
- struct ttm_mem_reg *new_mem)
- {
- struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
- struct nouveau_bo *nvbo = nouveau_bo(bo);
- struct nouveau_channel *chan;
- int ret;
- chan = nvbo->channel;
- if (!chan || nvbo->no_vm) {
- chan = dev_priv->channel;
- mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX);
- }
- if (dev_priv->card_type < NV_50)
- ret = nv04_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
- else
- if (dev_priv->card_type < NV_C0)
- ret = nv50_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
- else
- ret = nvc0_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
- if (ret == 0) {
- ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict,
- no_wait_reserve,
- no_wait_gpu, new_mem);
- }
- if (chan == dev_priv->channel)
- mutex_unlock(&chan->mutex);
- return ret;
- }
- static int
- nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
- bool no_wait_reserve, bool no_wait_gpu,
- struct ttm_mem_reg *new_mem)
- {
- u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
- struct ttm_placement placement;
- struct ttm_mem_reg tmp_mem;
- int ret;
- placement.fpfn = placement.lpfn = 0;
- placement.num_placement = placement.num_busy_placement = 1;
- placement.placement = placement.busy_placement = &placement_memtype;
- tmp_mem = *new_mem;
- tmp_mem.mm_node = NULL;
- ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
- if (ret)
- return ret;
- ret = ttm_tt_bind(bo->ttm, &tmp_mem);
- if (ret)
- goto out;
- ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem);
- if (ret)
- goto out;
- ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
- out:
- ttm_bo_mem_put(bo, &tmp_mem);
- return ret;
- }
- static int
- nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
- bool no_wait_reserve, bool no_wait_gpu,
- struct ttm_mem_reg *new_mem)
- {
- u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
- struct ttm_placement placement;
- struct ttm_mem_reg tmp_mem;
- int ret;
- placement.fpfn = placement.lpfn = 0;
- placement.num_placement = placement.num_busy_placement = 1;
- placement.placement = placement.busy_placement = &placement_memtype;
- tmp_mem = *new_mem;
- tmp_mem.mm_node = NULL;
- ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
- if (ret)
- return ret;
- ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, &tmp_mem);
- if (ret)
- goto out;
- ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
- if (ret)
- goto out;
- out:
- ttm_bo_mem_put(bo, &tmp_mem);
- return ret;
- }
- static int
- nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
- struct nouveau_tile_reg **new_tile)
- {
- struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
- struct drm_device *dev = dev_priv->dev;
- struct nouveau_bo *nvbo = nouveau_bo(bo);
- uint64_t offset;
- if (nvbo->no_vm || new_mem->mem_type != TTM_PL_VRAM) {
- /* Nothing to do. */
- *new_tile = NULL;
- return 0;
- }
- offset = new_mem->start << PAGE_SHIFT;
- if (dev_priv->chan_vm) {
- nouveau_vm_map(&nvbo->vma, new_mem->mm_node);
- } else if (dev_priv->card_type >= NV_10) {
- *new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size,
- nvbo->tile_mode,
- nvbo->tile_flags);
- }
- return 0;
- }
- static void
- nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
- struct nouveau_tile_reg *new_tile,
- struct nouveau_tile_reg **old_tile)
- {
- struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
- struct drm_device *dev = dev_priv->dev;
- if (dev_priv->card_type >= NV_10 &&
- dev_priv->card_type < NV_50) {
- nv10_mem_put_tile_region(dev, *old_tile, bo->sync_obj);
- *old_tile = new_tile;
- }
- }
- static int
- nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
- bool no_wait_reserve, bool no_wait_gpu,
- struct ttm_mem_reg *new_mem)
- {
- struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
- struct nouveau_bo *nvbo = nouveau_bo(bo);
- struct ttm_mem_reg *old_mem = &bo->mem;
- struct nouveau_tile_reg *new_tile = NULL;
- int ret = 0;
- ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
- if (ret)
- return ret;
- /* Fake bo copy. */
- if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
- BUG_ON(bo->mem.mm_node != NULL);
- bo->mem = *new_mem;
- new_mem->mm_node = NULL;
- goto out;
- }
- /* Software copy if the card isn't up and running yet. */
- if (!dev_priv->channel) {
- ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
- goto out;
- }
- /* Hardware assisted copy. */
- if (new_mem->mem_type == TTM_PL_SYSTEM)
- ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
- else if (old_mem->mem_type == TTM_PL_SYSTEM)
- ret = nouveau_bo_move_flips(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
- else
- ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
- if (!ret)
- goto out;
- /* Fallback to software copy. */
- ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
- out:
- if (ret)
- nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
- else
- nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
- return ret;
- }
- static int
- nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
- {
- return 0;
- }
- static int
- nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
- {
- struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
- struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
- struct drm_device *dev = dev_priv->dev;
- int ret;
- mem->bus.addr = NULL;
- mem->bus.offset = 0;
- mem->bus.size = mem->num_pages << PAGE_SHIFT;
- mem->bus.base = 0;
- mem->bus.is_iomem = false;
- if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
- return -EINVAL;
- switch (mem->mem_type) {
- case TTM_PL_SYSTEM:
- /* System memory */
- return 0;
- case TTM_PL_TT:
- #if __OS_HAS_AGP
- if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
- mem->bus.offset = mem->start << PAGE_SHIFT;
- mem->bus.base = dev_priv->gart_info.aper_base;
- mem->bus.is_iomem = true;
- }
- #endif
- break;
- case TTM_PL_VRAM:
- {
- struct nouveau_vram *vram = mem->mm_node;
- u8 page_shift;
- if (!dev_priv->bar1_vm) {
- mem->bus.offset = mem->start << PAGE_SHIFT;
- mem->bus.base = pci_resource_start(dev->pdev, 1);
- mem->bus.is_iomem = true;
- break;
- }
- if (dev_priv->card_type == NV_C0)
- page_shift = vram->page_shift;
- else
- page_shift = 12;
- ret = nouveau_vm_get(dev_priv->bar1_vm, mem->bus.size,
- page_shift, NV_MEM_ACCESS_RW,
- &vram->bar_vma);
- if (ret)
- return ret;
- nouveau_vm_map(&vram->bar_vma, vram);
- if (ret) {
- nouveau_vm_put(&vram->bar_vma);
- return ret;
- }
- mem->bus.offset = vram->bar_vma.offset;
- if (dev_priv->card_type == NV_50) /*XXX*/
- mem->bus.offset -= 0x0020000000ULL;
- mem->bus.base = pci_resource_start(dev->pdev, 1);
- mem->bus.is_iomem = true;
- }
- break;
- default:
- return -EINVAL;
- }
- return 0;
- }
- static void
- nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
- {
- struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
- struct nouveau_vram *vram = mem->mm_node;
- if (!dev_priv->bar1_vm || mem->mem_type != TTM_PL_VRAM)
- return;
- if (!vram->bar_vma.node)
- return;
- nouveau_vm_unmap(&vram->bar_vma);
- nouveau_vm_put(&vram->bar_vma);
- }
- static int
- nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
- {
- struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
- struct nouveau_bo *nvbo = nouveau_bo(bo);
- /* as long as the bo isn't in vram, and isn't tiled, we've got
- * nothing to do here.
- */
- if (bo->mem.mem_type != TTM_PL_VRAM) {
- if (dev_priv->card_type < NV_50 ||
- !nouveau_bo_tile_layout(nvbo))
- return 0;
- }
- /* make sure bo is in mappable vram */
- if (bo->mem.start + bo->mem.num_pages < dev_priv->fb_mappable_pages)
- return 0;
- nvbo->placement.fpfn = 0;
- nvbo->placement.lpfn = dev_priv->fb_mappable_pages;
- nouveau_bo_placement_set(nvbo, TTM_PL_VRAM, 0);
- return nouveau_bo_validate(nvbo, false, true, false);
- }
- void
- nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
- {
- struct nouveau_fence *old_fence;
- if (likely(fence))
- nouveau_fence_ref(fence);
- spin_lock(&nvbo->bo.bdev->fence_lock);
- old_fence = nvbo->bo.sync_obj;
- nvbo->bo.sync_obj = fence;
- spin_unlock(&nvbo->bo.bdev->fence_lock);
- nouveau_fence_unref(&old_fence);
- }
- struct ttm_bo_driver nouveau_bo_driver = {
- .create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry,
- .invalidate_caches = nouveau_bo_invalidate_caches,
- .init_mem_type = nouveau_bo_init_mem_type,
- .evict_flags = nouveau_bo_evict_flags,
- .move = nouveau_bo_move,
- .verify_access = nouveau_bo_verify_access,
- .sync_obj_signaled = __nouveau_fence_signalled,
- .sync_obj_wait = __nouveau_fence_wait,
- .sync_obj_flush = __nouveau_fence_flush,
- .sync_obj_unref = __nouveau_fence_unref,
- .sync_obj_ref = __nouveau_fence_ref,
- .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
- .io_mem_reserve = &nouveau_ttm_io_mem_reserve,
- .io_mem_free = &nouveau_ttm_io_mem_free,
- };
|