|
@@ -34,74 +34,32 @@
|
|
|
#include "radeon_drm.h"
|
|
|
#include "radeon.h"
|
|
|
|
|
|
-struct radeon_object {
|
|
|
- struct ttm_buffer_object tobj;
|
|
|
- struct list_head list;
|
|
|
- struct radeon_device *rdev;
|
|
|
- struct drm_gem_object *gobj;
|
|
|
- struct ttm_bo_kmap_obj kmap;
|
|
|
- unsigned pin_count;
|
|
|
- uint64_t gpu_addr;
|
|
|
- void *kptr;
|
|
|
- bool is_iomem;
|
|
|
- uint32_t tiling_flags;
|
|
|
- uint32_t pitch;
|
|
|
- int surface_reg;
|
|
|
-};
|
|
|
|
|
|
int radeon_ttm_init(struct radeon_device *rdev);
|
|
|
void radeon_ttm_fini(struct radeon_device *rdev);
|
|
|
+static void radeon_bo_clear_surface_reg(struct radeon_bo *bo);
|
|
|
|
|
|
/*
|
|
|
* To exclude mutual BO access we rely on bo_reserve exclusion, as all
|
|
|
* function are calling it.
|
|
|
*/
|
|
|
|
|
|
-static int radeon_object_reserve(struct radeon_object *robj, bool interruptible)
|
|
|
+static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
|
|
|
{
|
|
|
- return ttm_bo_reserve(&robj->tobj, interruptible, false, false, 0);
|
|
|
-}
|
|
|
+ struct radeon_bo *bo;
|
|
|
|
|
|
-static void radeon_object_unreserve(struct radeon_object *robj)
|
|
|
-{
|
|
|
- ttm_bo_unreserve(&robj->tobj);
|
|
|
+ bo = container_of(tbo, struct radeon_bo, tbo);
|
|
|
+ mutex_lock(&bo->rdev->gem.mutex);
|
|
|
+ list_del_init(&bo->list);
|
|
|
+ mutex_unlock(&bo->rdev->gem.mutex);
|
|
|
+ radeon_bo_clear_surface_reg(bo);
|
|
|
+ kfree(bo);
|
|
|
}
|
|
|
|
|
|
-static void radeon_ttm_object_object_destroy(struct ttm_buffer_object *tobj)
|
|
|
+static inline u32 radeon_ttm_flags_from_domain(u32 domain)
|
|
|
{
|
|
|
- struct radeon_object *robj;
|
|
|
-
|
|
|
- robj = container_of(tobj, struct radeon_object, tobj);
|
|
|
- list_del_init(&robj->list);
|
|
|
- radeon_object_clear_surface_reg(robj);
|
|
|
- kfree(robj);
|
|
|
-}
|
|
|
-
|
|
|
-static inline void radeon_object_gpu_addr(struct radeon_object *robj)
|
|
|
-{
|
|
|
- /* Default gpu address */
|
|
|
- robj->gpu_addr = 0xFFFFFFFFFFFFFFFFULL;
|
|
|
- if (robj->tobj.mem.mm_node == NULL) {
|
|
|
- return;
|
|
|
- }
|
|
|
- robj->gpu_addr = ((u64)robj->tobj.mem.mm_node->start) << PAGE_SHIFT;
|
|
|
- switch (robj->tobj.mem.mem_type) {
|
|
|
- case TTM_PL_VRAM:
|
|
|
- robj->gpu_addr += (u64)robj->rdev->mc.vram_location;
|
|
|
- break;
|
|
|
- case TTM_PL_TT:
|
|
|
- robj->gpu_addr += (u64)robj->rdev->mc.gtt_location;
|
|
|
- break;
|
|
|
- default:
|
|
|
- DRM_ERROR("Unknown placement %d\n", robj->tobj.mem.mem_type);
|
|
|
- robj->gpu_addr = 0xFFFFFFFFFFFFFFFFULL;
|
|
|
- return;
|
|
|
- }
|
|
|
-}
|
|
|
+ u32 flags = 0;
|
|
|
|
|
|
-static inline uint32_t radeon_object_flags_from_domain(uint32_t domain)
|
|
|
-{
|
|
|
- uint32_t flags = 0;
|
|
|
if (domain & RADEON_GEM_DOMAIN_VRAM) {
|
|
|
flags |= TTM_PL_FLAG_VRAM | TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED;
|
|
|
}
|
|
@@ -117,17 +75,13 @@ static inline uint32_t radeon_object_flags_from_domain(uint32_t domain)
|
|
|
return flags;
|
|
|
}
|
|
|
|
|
|
-int radeon_object_create(struct radeon_device *rdev,
|
|
|
- struct drm_gem_object *gobj,
|
|
|
- unsigned long size,
|
|
|
- bool kernel,
|
|
|
- uint32_t domain,
|
|
|
- bool interruptible,
|
|
|
- struct radeon_object **robj_ptr)
|
|
|
+int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
|
|
|
+ unsigned long size, bool kernel, u32 domain,
|
|
|
+ struct radeon_bo **bo_ptr)
|
|
|
{
|
|
|
- struct radeon_object *robj;
|
|
|
+ struct radeon_bo *bo;
|
|
|
enum ttm_bo_type type;
|
|
|
- uint32_t flags;
|
|
|
+ u32 flags;
|
|
|
int r;
|
|
|
|
|
|
if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) {
|
|
@@ -138,207 +92,140 @@ int radeon_object_create(struct radeon_device *rdev,
|
|
|
} else {
|
|
|
type = ttm_bo_type_device;
|
|
|
}
|
|
|
- *robj_ptr = NULL;
|
|
|
- robj = kzalloc(sizeof(struct radeon_object), GFP_KERNEL);
|
|
|
- if (robj == NULL) {
|
|
|
+ *bo_ptr = NULL;
|
|
|
+ bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
|
|
|
+ if (bo == NULL)
|
|
|
return -ENOMEM;
|
|
|
- }
|
|
|
- robj->rdev = rdev;
|
|
|
- robj->gobj = gobj;
|
|
|
- robj->surface_reg = -1;
|
|
|
- INIT_LIST_HEAD(&robj->list);
|
|
|
-
|
|
|
- flags = radeon_object_flags_from_domain(domain);
|
|
|
- r = ttm_buffer_object_init(&rdev->mman.bdev, &robj->tobj, size, type, flags,
|
|
|
- 0, 0, false, NULL, size,
|
|
|
- &radeon_ttm_object_object_destroy);
|
|
|
+ bo->rdev = rdev;
|
|
|
+ bo->gobj = gobj;
|
|
|
+ bo->surface_reg = -1;
|
|
|
+ INIT_LIST_HEAD(&bo->list);
|
|
|
+
|
|
|
+ flags = radeon_ttm_flags_from_domain(domain);
|
|
|
+retry:
|
|
|
+ r = ttm_buffer_object_init(&rdev->mman.bdev, &bo->tbo, size, type,
|
|
|
+ flags, 0, 0, true, NULL, size,
|
|
|
+ &radeon_ttm_bo_destroy);
|
|
|
if (unlikely(r != 0)) {
|
|
|
+ if (r == -ERESTART)
|
|
|
+ goto retry;
|
|
|
/* ttm call radeon_ttm_object_object_destroy if error happen */
|
|
|
- DRM_ERROR("Failed to allocate TTM object (%ld, 0x%08X, %u)\n",
|
|
|
- size, flags, 0);
|
|
|
+ dev_err(rdev->dev, "object_init failed for (%ld, 0x%08X)\n",
|
|
|
+ size, flags);
|
|
|
return r;
|
|
|
}
|
|
|
- *robj_ptr = robj;
|
|
|
+ *bo_ptr = bo;
|
|
|
if (gobj) {
|
|
|
- list_add_tail(&robj->list, &rdev->gem.objects);
|
|
|
+ mutex_lock(&bo->rdev->gem.mutex);
|
|
|
+ list_add_tail(&bo->list, &rdev->gem.objects);
|
|
|
+ mutex_unlock(&bo->rdev->gem.mutex);
|
|
|
}
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-int radeon_object_kmap(struct radeon_object *robj, void **ptr)
|
|
|
+int radeon_bo_kmap(struct radeon_bo *bo, void **ptr)
|
|
|
{
|
|
|
+ bool is_iomem;
|
|
|
int r;
|
|
|
|
|
|
- spin_lock(&robj->tobj.lock);
|
|
|
- if (robj->kptr) {
|
|
|
+ if (bo->kptr) {
|
|
|
if (ptr) {
|
|
|
- *ptr = robj->kptr;
|
|
|
+ *ptr = bo->kptr;
|
|
|
}
|
|
|
- spin_unlock(&robj->tobj.lock);
|
|
|
return 0;
|
|
|
}
|
|
|
- spin_unlock(&robj->tobj.lock);
|
|
|
- r = ttm_bo_kmap(&robj->tobj, 0, robj->tobj.num_pages, &robj->kmap);
|
|
|
+ r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
|
|
|
if (r) {
|
|
|
return r;
|
|
|
}
|
|
|
- spin_lock(&robj->tobj.lock);
|
|
|
- robj->kptr = ttm_kmap_obj_virtual(&robj->kmap, &robj->is_iomem);
|
|
|
- spin_unlock(&robj->tobj.lock);
|
|
|
+ bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
|
|
|
if (ptr) {
|
|
|
- *ptr = robj->kptr;
|
|
|
+ *ptr = bo->kptr;
|
|
|
}
|
|
|
- radeon_object_check_tiling(robj, 0, 0);
|
|
|
+ radeon_bo_check_tiling(bo, 0, 0);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-void radeon_object_kunmap(struct radeon_object *robj)
|
|
|
+void radeon_bo_kunmap(struct radeon_bo *bo)
|
|
|
{
|
|
|
- spin_lock(&robj->tobj.lock);
|
|
|
- if (robj->kptr == NULL) {
|
|
|
- spin_unlock(&robj->tobj.lock);
|
|
|
+ if (bo->kptr == NULL)
|
|
|
return;
|
|
|
- }
|
|
|
- robj->kptr = NULL;
|
|
|
- spin_unlock(&robj->tobj.lock);
|
|
|
- radeon_object_check_tiling(robj, 0, 0);
|
|
|
- ttm_bo_kunmap(&robj->kmap);
|
|
|
+ bo->kptr = NULL;
|
|
|
+ radeon_bo_check_tiling(bo, 0, 0);
|
|
|
+ ttm_bo_kunmap(&bo->kmap);
|
|
|
}
|
|
|
|
|
|
-void radeon_object_unref(struct radeon_object **robj)
|
|
|
+void radeon_bo_unref(struct radeon_bo **bo)
|
|
|
{
|
|
|
- struct ttm_buffer_object *tobj;
|
|
|
+ struct ttm_buffer_object *tbo;
|
|
|
|
|
|
- if ((*robj) == NULL) {
|
|
|
+ if ((*bo) == NULL)
|
|
|
return;
|
|
|
- }
|
|
|
- tobj = &((*robj)->tobj);
|
|
|
- ttm_bo_unref(&tobj);
|
|
|
- if (tobj == NULL) {
|
|
|
- *robj = NULL;
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
-int radeon_object_mmap(struct radeon_object *robj, uint64_t *offset)
|
|
|
-{
|
|
|
- *offset = robj->tobj.addr_space_offset;
|
|
|
- return 0;
|
|
|
+ tbo = &((*bo)->tbo);
|
|
|
+ ttm_bo_unref(&tbo);
|
|
|
+ if (tbo == NULL)
|
|
|
+ *bo = NULL;
|
|
|
}
|
|
|
|
|
|
-int radeon_object_pin(struct radeon_object *robj, uint32_t domain,
|
|
|
- uint64_t *gpu_addr)
|
|
|
+int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
|
|
|
{
|
|
|
- uint32_t flags;
|
|
|
- uint32_t tmp;
|
|
|
+ u32 flags;
|
|
|
+ u32 tmp;
|
|
|
int r;
|
|
|
|
|
|
- flags = radeon_object_flags_from_domain(domain);
|
|
|
- spin_lock(&robj->tobj.lock);
|
|
|
- if (robj->pin_count) {
|
|
|
- robj->pin_count++;
|
|
|
- if (gpu_addr != NULL) {
|
|
|
- *gpu_addr = robj->gpu_addr;
|
|
|
- }
|
|
|
- spin_unlock(&robj->tobj.lock);
|
|
|
+ flags = radeon_ttm_flags_from_domain(domain);
|
|
|
+ if (bo->pin_count) {
|
|
|
+ bo->pin_count++;
|
|
|
+ if (gpu_addr)
|
|
|
+ *gpu_addr = radeon_bo_gpu_offset(bo);
|
|
|
return 0;
|
|
|
}
|
|
|
- spin_unlock(&robj->tobj.lock);
|
|
|
- r = radeon_object_reserve(robj, false);
|
|
|
- if (unlikely(r != 0)) {
|
|
|
- DRM_ERROR("radeon: failed to reserve object for pinning it.\n");
|
|
|
- return r;
|
|
|
- }
|
|
|
- tmp = robj->tobj.mem.placement;
|
|
|
+ tmp = bo->tbo.mem.placement;
|
|
|
ttm_flag_masked(&tmp, flags, TTM_PL_MASK_MEM);
|
|
|
- robj->tobj.proposed_placement = tmp | TTM_PL_FLAG_NO_EVICT | TTM_PL_MASK_CACHING;
|
|
|
- r = ttm_buffer_object_validate(&robj->tobj,
|
|
|
- robj->tobj.proposed_placement,
|
|
|
- false, false);
|
|
|
- radeon_object_gpu_addr(robj);
|
|
|
- if (gpu_addr != NULL) {
|
|
|
- *gpu_addr = robj->gpu_addr;
|
|
|
+ bo->tbo.proposed_placement = tmp | TTM_PL_FLAG_NO_EVICT |
|
|
|
+ TTM_PL_MASK_CACHING;
|
|
|
+retry:
|
|
|
+ r = ttm_buffer_object_validate(&bo->tbo, bo->tbo.proposed_placement,
|
|
|
+ true, false);
|
|
|
+ if (likely(r == 0)) {
|
|
|
+ bo->pin_count = 1;
|
|
|
+ if (gpu_addr != NULL)
|
|
|
+ *gpu_addr = radeon_bo_gpu_offset(bo);
|
|
|
}
|
|
|
- robj->pin_count = 1;
|
|
|
if (unlikely(r != 0)) {
|
|
|
- DRM_ERROR("radeon: failed to pin object.\n");
|
|
|
+ if (r == -ERESTART)
|
|
|
+ goto retry;
|
|
|
+ dev_err(bo->rdev->dev, "%p pin failed\n", bo);
|
|
|
}
|
|
|
- radeon_object_unreserve(robj);
|
|
|
return r;
|
|
|
}
|
|
|
|
|
|
-void radeon_object_unpin(struct radeon_object *robj)
|
|
|
+int radeon_bo_unpin(struct radeon_bo *bo)
|
|
|
{
|
|
|
- uint32_t flags;
|
|
|
int r;
|
|
|
|
|
|
- spin_lock(&robj->tobj.lock);
|
|
|
- if (!robj->pin_count) {
|
|
|
- spin_unlock(&robj->tobj.lock);
|
|
|
- printk(KERN_WARNING "Unpin not necessary for %p !\n", robj);
|
|
|
- return;
|
|
|
- }
|
|
|
- robj->pin_count--;
|
|
|
- if (robj->pin_count) {
|
|
|
- spin_unlock(&robj->tobj.lock);
|
|
|
- return;
|
|
|
- }
|
|
|
- spin_unlock(&robj->tobj.lock);
|
|
|
- r = radeon_object_reserve(robj, false);
|
|
|
- if (unlikely(r != 0)) {
|
|
|
- DRM_ERROR("radeon: failed to reserve object for unpinning it.\n");
|
|
|
- return;
|
|
|
- }
|
|
|
- flags = robj->tobj.mem.placement;
|
|
|
- robj->tobj.proposed_placement = flags & ~TTM_PL_FLAG_NO_EVICT;
|
|
|
- r = ttm_buffer_object_validate(&robj->tobj,
|
|
|
- robj->tobj.proposed_placement,
|
|
|
- false, false);
|
|
|
- if (unlikely(r != 0)) {
|
|
|
- DRM_ERROR("radeon: failed to unpin buffer.\n");
|
|
|
- }
|
|
|
- radeon_object_unreserve(robj);
|
|
|
-}
|
|
|
-
|
|
|
-int radeon_object_wait(struct radeon_object *robj)
|
|
|
-{
|
|
|
- int r = 0;
|
|
|
-
|
|
|
- /* FIXME: should use block reservation instead */
|
|
|
- r = radeon_object_reserve(robj, true);
|
|
|
- if (unlikely(r != 0)) {
|
|
|
- DRM_ERROR("radeon: failed to reserve object for waiting.\n");
|
|
|
- return r;
|
|
|
- }
|
|
|
- spin_lock(&robj->tobj.lock);
|
|
|
- if (robj->tobj.sync_obj) {
|
|
|
- r = ttm_bo_wait(&robj->tobj, true, true, false);
|
|
|
+ if (!bo->pin_count) {
|
|
|
+ dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo);
|
|
|
+ return 0;
|
|
|
}
|
|
|
- spin_unlock(&robj->tobj.lock);
|
|
|
- radeon_object_unreserve(robj);
|
|
|
- radeon_hdp_flush(robj->rdev);
|
|
|
- return r;
|
|
|
-}
|
|
|
-
|
|
|
-int radeon_object_busy_domain(struct radeon_object *robj, uint32_t *cur_placement)
|
|
|
-{
|
|
|
- int r = 0;
|
|
|
-
|
|
|
- r = radeon_object_reserve(robj, true);
|
|
|
+ bo->pin_count--;
|
|
|
+ if (bo->pin_count)
|
|
|
+ return 0;
|
|
|
+ bo->tbo.proposed_placement = bo->tbo.mem.placement &
|
|
|
+ ~TTM_PL_FLAG_NO_EVICT;
|
|
|
+retry:
|
|
|
+ r = ttm_buffer_object_validate(&bo->tbo, bo->tbo.proposed_placement,
|
|
|
+ true, false);
|
|
|
if (unlikely(r != 0)) {
|
|
|
- DRM_ERROR("radeon: failed to reserve object for waiting.\n");
|
|
|
+ if (r == -ERESTART)
|
|
|
+ goto retry;
|
|
|
+ dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
|
|
|
return r;
|
|
|
}
|
|
|
- spin_lock(&robj->tobj.lock);
|
|
|
- *cur_placement = robj->tobj.mem.mem_type;
|
|
|
- if (robj->tobj.sync_obj) {
|
|
|
- r = ttm_bo_wait(&robj->tobj, true, true, true);
|
|
|
- }
|
|
|
- spin_unlock(&robj->tobj.lock);
|
|
|
- radeon_object_unreserve(robj);
|
|
|
- return r;
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
-int radeon_object_evict_vram(struct radeon_device *rdev)
|
|
|
+int radeon_bo_evict_vram(struct radeon_device *rdev)
|
|
|
{
|
|
|
if (rdev->flags & RADEON_IS_IGP) {
|
|
|
/* Useless to evict on IGP chips */
|
|
@@ -347,30 +234,32 @@ int radeon_object_evict_vram(struct radeon_device *rdev)
|
|
|
return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM);
|
|
|
}
|
|
|
|
|
|
-void radeon_object_force_delete(struct radeon_device *rdev)
|
|
|
+void radeon_bo_force_delete(struct radeon_device *rdev)
|
|
|
{
|
|
|
- struct radeon_object *robj, *n;
|
|
|
+ struct radeon_bo *bo, *n;
|
|
|
struct drm_gem_object *gobj;
|
|
|
|
|
|
if (list_empty(&rdev->gem.objects)) {
|
|
|
return;
|
|
|
}
|
|
|
- DRM_ERROR("Userspace still has active objects !\n");
|
|
|
- list_for_each_entry_safe(robj, n, &rdev->gem.objects, list) {
|
|
|
+ dev_err(rdev->dev, "Userspace still has active objects !\n");
|
|
|
+ list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
|
|
|
mutex_lock(&rdev->ddev->struct_mutex);
|
|
|
- gobj = robj->gobj;
|
|
|
- DRM_ERROR("Force free for (%p,%p,%lu,%lu)\n",
|
|
|
- gobj, robj, (unsigned long)gobj->size,
|
|
|
- *((unsigned long *)&gobj->refcount));
|
|
|
- list_del_init(&robj->list);
|
|
|
- radeon_object_unref(&robj);
|
|
|
+ gobj = bo->gobj;
|
|
|
+ dev_err(rdev->dev, "%p %p %lu %lu force free\n",
|
|
|
+ gobj, bo, (unsigned long)gobj->size,
|
|
|
+ *((unsigned long *)&gobj->refcount));
|
|
|
+ mutex_lock(&bo->rdev->gem.mutex);
|
|
|
+ list_del_init(&bo->list);
|
|
|
+ mutex_unlock(&bo->rdev->gem.mutex);
|
|
|
+ radeon_bo_unref(&bo);
|
|
|
gobj->driver_private = NULL;
|
|
|
drm_gem_object_unreference(gobj);
|
|
|
mutex_unlock(&rdev->ddev->struct_mutex);
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-int radeon_object_init(struct radeon_device *rdev)
|
|
|
+int radeon_bo_init(struct radeon_device *rdev)
|
|
|
{
|
|
|
/* Add an MTRR for the VRAM */
|
|
|
rdev->mc.vram_mtrr = mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size,
|
|
@@ -383,13 +272,13 @@ int radeon_object_init(struct radeon_device *rdev)
|
|
|
return radeon_ttm_init(rdev);
|
|
|
}
|
|
|
|
|
|
-void radeon_object_fini(struct radeon_device *rdev)
|
|
|
+void radeon_bo_fini(struct radeon_device *rdev)
|
|
|
{
|
|
|
radeon_ttm_fini(rdev);
|
|
|
}
|
|
|
|
|
|
-void radeon_object_list_add_object(struct radeon_object_list *lobj,
|
|
|
- struct list_head *head)
|
|
|
+void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
|
|
|
+ struct list_head *head)
|
|
|
{
|
|
|
if (lobj->wdomain) {
|
|
|
list_add(&lobj->list, head);
|
|
@@ -398,72 +287,67 @@ void radeon_object_list_add_object(struct radeon_object_list *lobj,
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-int radeon_object_list_reserve(struct list_head *head)
|
|
|
+int radeon_bo_list_reserve(struct list_head *head)
|
|
|
{
|
|
|
- struct radeon_object_list *lobj;
|
|
|
+ struct radeon_bo_list *lobj;
|
|
|
int r;
|
|
|
|
|
|
list_for_each_entry(lobj, head, list){
|
|
|
- if (!lobj->robj->pin_count) {
|
|
|
- r = radeon_object_reserve(lobj->robj, true);
|
|
|
- if (unlikely(r != 0)) {
|
|
|
- DRM_ERROR("radeon: failed to reserve object.\n");
|
|
|
- return r;
|
|
|
- }
|
|
|
- } else {
|
|
|
- }
|
|
|
+ r = radeon_bo_reserve(lobj->bo, false);
|
|
|
+ if (unlikely(r != 0))
|
|
|
+ return r;
|
|
|
}
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-void radeon_object_list_unreserve(struct list_head *head)
|
|
|
+void radeon_bo_list_unreserve(struct list_head *head)
|
|
|
{
|
|
|
- struct radeon_object_list *lobj;
|
|
|
+ struct radeon_bo_list *lobj;
|
|
|
|
|
|
list_for_each_entry(lobj, head, list) {
|
|
|
- if (!lobj->robj->pin_count) {
|
|
|
- radeon_object_unreserve(lobj->robj);
|
|
|
- }
|
|
|
+ /* only unreserve object we successfully reserved */
|
|
|
+ if (radeon_bo_is_reserved(lobj->bo))
|
|
|
+ radeon_bo_unreserve(lobj->bo);
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-int radeon_object_list_validate(struct list_head *head, void *fence)
|
|
|
+int radeon_bo_list_validate(struct list_head *head, void *fence)
|
|
|
{
|
|
|
- struct radeon_object_list *lobj;
|
|
|
- struct radeon_object *robj;
|
|
|
+ struct radeon_bo_list *lobj;
|
|
|
+ struct radeon_bo *bo;
|
|
|
struct radeon_fence *old_fence = NULL;
|
|
|
int r;
|
|
|
|
|
|
- r = radeon_object_list_reserve(head);
|
|
|
+ r = radeon_bo_list_reserve(head);
|
|
|
if (unlikely(r != 0)) {
|
|
|
- radeon_object_list_unreserve(head);
|
|
|
return r;
|
|
|
}
|
|
|
list_for_each_entry(lobj, head, list) {
|
|
|
- robj = lobj->robj;
|
|
|
- if (!robj->pin_count) {
|
|
|
+ bo = lobj->bo;
|
|
|
+ if (!bo->pin_count) {
|
|
|
if (lobj->wdomain) {
|
|
|
- robj->tobj.proposed_placement =
|
|
|
- radeon_object_flags_from_domain(lobj->wdomain);
|
|
|
+ bo->tbo.proposed_placement =
|
|
|
+ radeon_ttm_flags_from_domain(lobj->wdomain);
|
|
|
} else {
|
|
|
- robj->tobj.proposed_placement =
|
|
|
- radeon_object_flags_from_domain(lobj->rdomain);
|
|
|
+ bo->tbo.proposed_placement =
|
|
|
+ radeon_ttm_flags_from_domain(lobj->rdomain);
|
|
|
}
|
|
|
- r = ttm_buffer_object_validate(&robj->tobj,
|
|
|
- robj->tobj.proposed_placement,
|
|
|
- true, false);
|
|
|
+retry:
|
|
|
+ r = ttm_buffer_object_validate(&bo->tbo,
|
|
|
+ bo->tbo.proposed_placement,
|
|
|
+ true, false);
|
|
|
if (unlikely(r)) {
|
|
|
- DRM_ERROR("radeon: failed to validate.\n");
|
|
|
+ if (r == -ERESTART)
|
|
|
+ goto retry;
|
|
|
return r;
|
|
|
}
|
|
|
- radeon_object_gpu_addr(robj);
|
|
|
}
|
|
|
- lobj->gpu_offset = robj->gpu_addr;
|
|
|
- lobj->tiling_flags = robj->tiling_flags;
|
|
|
+ lobj->gpu_offset = radeon_bo_gpu_offset(bo);
|
|
|
+ lobj->tiling_flags = bo->tiling_flags;
|
|
|
if (fence) {
|
|
|
- old_fence = (struct radeon_fence *)robj->tobj.sync_obj;
|
|
|
- robj->tobj.sync_obj = radeon_fence_ref(fence);
|
|
|
- robj->tobj.sync_obj_arg = NULL;
|
|
|
+ old_fence = (struct radeon_fence *)bo->tbo.sync_obj;
|
|
|
+ bo->tbo.sync_obj = radeon_fence_ref(fence);
|
|
|
+ bo->tbo.sync_obj_arg = NULL;
|
|
|
}
|
|
|
if (old_fence) {
|
|
|
radeon_fence_unref(&old_fence);
|
|
@@ -472,51 +356,44 @@ int radeon_object_list_validate(struct list_head *head, void *fence)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-void radeon_object_list_unvalidate(struct list_head *head)
|
|
|
+void radeon_bo_list_unvalidate(struct list_head *head, void *fence)
|
|
|
{
|
|
|
- struct radeon_object_list *lobj;
|
|
|
- struct radeon_fence *old_fence = NULL;
|
|
|
+ struct radeon_bo_list *lobj;
|
|
|
+ struct radeon_fence *old_fence;
|
|
|
|
|
|
- list_for_each_entry(lobj, head, list) {
|
|
|
- old_fence = (struct radeon_fence *)lobj->robj->tobj.sync_obj;
|
|
|
- lobj->robj->tobj.sync_obj = NULL;
|
|
|
- if (old_fence) {
|
|
|
- radeon_fence_unref(&old_fence);
|
|
|
+ if (fence)
|
|
|
+ list_for_each_entry(lobj, head, list) {
|
|
|
+ old_fence = to_radeon_fence(lobj->bo->tbo.sync_obj);
|
|
|
+ if (old_fence == fence) {
|
|
|
+ lobj->bo->tbo.sync_obj = NULL;
|
|
|
+ radeon_fence_unref(&old_fence);
|
|
|
+ }
|
|
|
}
|
|
|
- }
|
|
|
- radeon_object_list_unreserve(head);
|
|
|
-}
|
|
|
-
|
|
|
-void radeon_object_list_clean(struct list_head *head)
|
|
|
-{
|
|
|
- radeon_object_list_unreserve(head);
|
|
|
+ radeon_bo_list_unreserve(head);
|
|
|
}
|
|
|
|
|
|
-int radeon_object_fbdev_mmap(struct radeon_object *robj,
|
|
|
+int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
|
|
|
struct vm_area_struct *vma)
|
|
|
{
|
|
|
- return ttm_fbdev_mmap(vma, &robj->tobj);
|
|
|
+ return ttm_fbdev_mmap(vma, &bo->tbo);
|
|
|
}
|
|
|
|
|
|
-unsigned long radeon_object_size(struct radeon_object *robj)
|
|
|
+static int radeon_bo_get_surface_reg(struct radeon_bo *bo)
|
|
|
{
|
|
|
- return robj->tobj.num_pages << PAGE_SHIFT;
|
|
|
-}
|
|
|
-
|
|
|
-int radeon_object_get_surface_reg(struct radeon_object *robj)
|
|
|
-{
|
|
|
- struct radeon_device *rdev = robj->rdev;
|
|
|
+ struct radeon_device *rdev = bo->rdev;
|
|
|
struct radeon_surface_reg *reg;
|
|
|
- struct radeon_object *old_object;
|
|
|
+ struct radeon_bo *old_object;
|
|
|
int steal;
|
|
|
int i;
|
|
|
|
|
|
- if (!robj->tiling_flags)
|
|
|
+ BUG_ON(!atomic_read(&bo->tbo.reserved));
|
|
|
+
|
|
|
+ if (!bo->tiling_flags)
|
|
|
return 0;
|
|
|
|
|
|
- if (robj->surface_reg >= 0) {
|
|
|
- reg = &rdev->surface_regs[robj->surface_reg];
|
|
|
- i = robj->surface_reg;
|
|
|
+ if (bo->surface_reg >= 0) {
|
|
|
+ reg = &rdev->surface_regs[bo->surface_reg];
|
|
|
+ i = bo->surface_reg;
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
@@ -524,10 +401,10 @@ int radeon_object_get_surface_reg(struct radeon_object *robj)
|
|
|
for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
|
|
|
|
|
|
reg = &rdev->surface_regs[i];
|
|
|
- if (!reg->robj)
|
|
|
+ if (!reg->bo)
|
|
|
break;
|
|
|
|
|
|
- old_object = reg->robj;
|
|
|
+ old_object = reg->bo;
|
|
|
if (old_object->pin_count == 0)
|
|
|
steal = i;
|
|
|
}
|
|
@@ -538,91 +415,101 @@ int radeon_object_get_surface_reg(struct radeon_object *robj)
|
|
|
return -ENOMEM;
|
|
|
/* find someone with a surface reg and nuke their BO */
|
|
|
reg = &rdev->surface_regs[steal];
|
|
|
- old_object = reg->robj;
|
|
|
+ old_object = reg->bo;
|
|
|
/* blow away the mapping */
|
|
|
DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object);
|
|
|
- ttm_bo_unmap_virtual(&old_object->tobj);
|
|
|
+ ttm_bo_unmap_virtual(&old_object->tbo);
|
|
|
old_object->surface_reg = -1;
|
|
|
i = steal;
|
|
|
}
|
|
|
|
|
|
- robj->surface_reg = i;
|
|
|
- reg->robj = robj;
|
|
|
+ bo->surface_reg = i;
|
|
|
+ reg->bo = bo;
|
|
|
|
|
|
out:
|
|
|
- radeon_set_surface_reg(rdev, i, robj->tiling_flags, robj->pitch,
|
|
|
- robj->tobj.mem.mm_node->start << PAGE_SHIFT,
|
|
|
- robj->tobj.num_pages << PAGE_SHIFT);
|
|
|
+ radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch,
|
|
|
+ bo->tbo.mem.mm_node->start << PAGE_SHIFT,
|
|
|
+ bo->tbo.num_pages << PAGE_SHIFT);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-void radeon_object_clear_surface_reg(struct radeon_object *robj)
|
|
|
+static void radeon_bo_clear_surface_reg(struct radeon_bo *bo)
|
|
|
{
|
|
|
- struct radeon_device *rdev = robj->rdev;
|
|
|
+ struct radeon_device *rdev = bo->rdev;
|
|
|
struct radeon_surface_reg *reg;
|
|
|
|
|
|
- if (robj->surface_reg == -1)
|
|
|
+ if (bo->surface_reg == -1)
|
|
|
return;
|
|
|
|
|
|
- reg = &rdev->surface_regs[robj->surface_reg];
|
|
|
- radeon_clear_surface_reg(rdev, robj->surface_reg);
|
|
|
+ reg = &rdev->surface_regs[bo->surface_reg];
|
|
|
+ radeon_clear_surface_reg(rdev, bo->surface_reg);
|
|
|
|
|
|
- reg->robj = NULL;
|
|
|
- robj->surface_reg = -1;
|
|
|
+ reg->bo = NULL;
|
|
|
+ bo->surface_reg = -1;
|
|
|
}
|
|
|
|
|
|
-void radeon_object_set_tiling_flags(struct radeon_object *robj,
|
|
|
- uint32_t tiling_flags, uint32_t pitch)
|
|
|
+int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
|
|
|
+ uint32_t tiling_flags, uint32_t pitch)
|
|
|
{
|
|
|
- robj->tiling_flags = tiling_flags;
|
|
|
- robj->pitch = pitch;
|
|
|
+ int r;
|
|
|
+
|
|
|
+ r = radeon_bo_reserve(bo, false);
|
|
|
+ if (unlikely(r != 0))
|
|
|
+ return r;
|
|
|
+ bo->tiling_flags = tiling_flags;
|
|
|
+ bo->pitch = pitch;
|
|
|
+ radeon_bo_unreserve(bo);
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
-void radeon_object_get_tiling_flags(struct radeon_object *robj,
|
|
|
- uint32_t *tiling_flags,
|
|
|
- uint32_t *pitch)
|
|
|
+void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
|
|
|
+ uint32_t *tiling_flags,
|
|
|
+ uint32_t *pitch)
|
|
|
{
|
|
|
+ BUG_ON(!atomic_read(&bo->tbo.reserved));
|
|
|
if (tiling_flags)
|
|
|
- *tiling_flags = robj->tiling_flags;
|
|
|
+ *tiling_flags = bo->tiling_flags;
|
|
|
if (pitch)
|
|
|
- *pitch = robj->pitch;
|
|
|
+ *pitch = bo->pitch;
|
|
|
}
|
|
|
|
|
|
-int radeon_object_check_tiling(struct radeon_object *robj, bool has_moved,
|
|
|
- bool force_drop)
|
|
|
+int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
|
|
|
+ bool force_drop)
|
|
|
{
|
|
|
- if (!(robj->tiling_flags & RADEON_TILING_SURFACE))
|
|
|
+ BUG_ON(!atomic_read(&bo->tbo.reserved));
|
|
|
+
|
|
|
+ if (!(bo->tiling_flags & RADEON_TILING_SURFACE))
|
|
|
return 0;
|
|
|
|
|
|
if (force_drop) {
|
|
|
- radeon_object_clear_surface_reg(robj);
|
|
|
+ radeon_bo_clear_surface_reg(bo);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
- if (robj->tobj.mem.mem_type != TTM_PL_VRAM) {
|
|
|
+ if (bo->tbo.mem.mem_type != TTM_PL_VRAM) {
|
|
|
if (!has_moved)
|
|
|
return 0;
|
|
|
|
|
|
- if (robj->surface_reg >= 0)
|
|
|
- radeon_object_clear_surface_reg(robj);
|
|
|
+ if (bo->surface_reg >= 0)
|
|
|
+ radeon_bo_clear_surface_reg(bo);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
- if ((robj->surface_reg >= 0) && !has_moved)
|
|
|
+ if ((bo->surface_reg >= 0) && !has_moved)
|
|
|
return 0;
|
|
|
|
|
|
- return radeon_object_get_surface_reg(robj);
|
|
|
+ return radeon_bo_get_surface_reg(bo);
|
|
|
}
|
|
|
|
|
|
void radeon_bo_move_notify(struct ttm_buffer_object *bo,
|
|
|
- struct ttm_mem_reg *mem)
|
|
|
+ struct ttm_mem_reg *mem)
|
|
|
{
|
|
|
- struct radeon_object *robj = container_of(bo, struct radeon_object, tobj);
|
|
|
- radeon_object_check_tiling(robj, 0, 1);
|
|
|
+ struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
|
|
|
+ radeon_bo_check_tiling(rbo, 0, 1);
|
|
|
}
|
|
|
|
|
|
void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
|
|
|
{
|
|
|
- struct radeon_object *robj = container_of(bo, struct radeon_object, tobj);
|
|
|
- radeon_object_check_tiling(robj, 0, 0);
|
|
|
+ struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
|
|
|
+ radeon_bo_check_tiling(rbo, 0, 0);
|
|
|
}
|