|
@@ -28,199 +28,71 @@
|
|
#include "radeon.h"
|
|
#include "radeon.h"
|
|
#include <drm/radeon_drm.h>
|
|
#include <drm/radeon_drm.h>
|
|
|
|
|
|
-#include <linux/dma-buf.h>
|
|
|
|
-
|
|
|
|
-static struct sg_table *radeon_gem_map_dma_buf(struct dma_buf_attachment *attachment,
|
|
|
|
- enum dma_data_direction dir)
|
|
|
|
|
|
+struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj)
|
|
{
|
|
{
|
|
- struct radeon_bo *bo = attachment->dmabuf->priv;
|
|
|
|
- struct drm_device *dev = bo->rdev->ddev;
|
|
|
|
|
|
+ struct radeon_bo *bo = gem_to_radeon_bo(obj);
|
|
int npages = bo->tbo.num_pages;
|
|
int npages = bo->tbo.num_pages;
|
|
- struct sg_table *sg;
|
|
|
|
- int nents;
|
|
|
|
-
|
|
|
|
- mutex_lock(&dev->struct_mutex);
|
|
|
|
- sg = drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages);
|
|
|
|
- nents = dma_map_sg(attachment->dev, sg->sgl, sg->nents, dir);
|
|
|
|
- mutex_unlock(&dev->struct_mutex);
|
|
|
|
- return sg;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static void radeon_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
|
|
|
|
- struct sg_table *sg, enum dma_data_direction dir)
|
|
|
|
-{
|
|
|
|
- dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
|
|
|
|
- sg_free_table(sg);
|
|
|
|
- kfree(sg);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static void radeon_gem_dmabuf_release(struct dma_buf *dma_buf)
|
|
|
|
-{
|
|
|
|
- struct radeon_bo *bo = dma_buf->priv;
|
|
|
|
-
|
|
|
|
- if (bo->gem_base.export_dma_buf == dma_buf) {
|
|
|
|
- DRM_ERROR("unreference dmabuf %p\n", &bo->gem_base);
|
|
|
|
- bo->gem_base.export_dma_buf = NULL;
|
|
|
|
- drm_gem_object_unreference_unlocked(&bo->gem_base);
|
|
|
|
- }
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static void *radeon_gem_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
|
|
|
|
-{
|
|
|
|
- return NULL;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static void radeon_gem_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
|
|
|
|
-{
|
|
|
|
-
|
|
|
|
-}
|
|
|
|
-static void *radeon_gem_kmap(struct dma_buf *dma_buf, unsigned long page_num)
|
|
|
|
-{
|
|
|
|
- return NULL;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static void radeon_gem_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
|
|
|
|
-{
|
|
|
|
|
|
|
|
|
|
+ return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages);
|
|
}
|
|
}
|
|
|
|
|
|
-static int radeon_gem_prime_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
|
|
|
|
|
|
+void *radeon_gem_prime_vmap(struct drm_gem_object *obj)
|
|
{
|
|
{
|
|
- return -EINVAL;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static void *radeon_gem_prime_vmap(struct dma_buf *dma_buf)
|
|
|
|
-{
|
|
|
|
- struct radeon_bo *bo = dma_buf->priv;
|
|
|
|
- struct drm_device *dev = bo->rdev->ddev;
|
|
|
|
|
|
+ struct radeon_bo *bo = gem_to_radeon_bo(obj);
|
|
int ret;
|
|
int ret;
|
|
|
|
|
|
- mutex_lock(&dev->struct_mutex);
|
|
|
|
- if (bo->vmapping_count) {
|
|
|
|
- bo->vmapping_count++;
|
|
|
|
- goto out_unlock;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages,
|
|
ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages,
|
|
&bo->dma_buf_vmap);
|
|
&bo->dma_buf_vmap);
|
|
- if (ret) {
|
|
|
|
- mutex_unlock(&dev->struct_mutex);
|
|
|
|
|
|
+ if (ret)
|
|
return ERR_PTR(ret);
|
|
return ERR_PTR(ret);
|
|
- }
|
|
|
|
- bo->vmapping_count = 1;
|
|
|
|
-out_unlock:
|
|
|
|
- mutex_unlock(&dev->struct_mutex);
|
|
|
|
|
|
+
|
|
return bo->dma_buf_vmap.virtual;
|
|
return bo->dma_buf_vmap.virtual;
|
|
}
|
|
}
|
|
|
|
|
|
-static void radeon_gem_prime_vunmap(struct dma_buf *dma_buf, void *vaddr)
|
|
|
|
|
|
+void radeon_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
|
|
{
|
|
{
|
|
- struct radeon_bo *bo = dma_buf->priv;
|
|
|
|
- struct drm_device *dev = bo->rdev->ddev;
|
|
|
|
|
|
+ struct radeon_bo *bo = gem_to_radeon_bo(obj);
|
|
|
|
|
|
- mutex_lock(&dev->struct_mutex);
|
|
|
|
- bo->vmapping_count--;
|
|
|
|
- if (bo->vmapping_count == 0) {
|
|
|
|
- ttm_bo_kunmap(&bo->dma_buf_vmap);
|
|
|
|
- }
|
|
|
|
- mutex_unlock(&dev->struct_mutex);
|
|
|
|
|
|
+ ttm_bo_kunmap(&bo->dma_buf_vmap);
|
|
}
|
|
}
|
|
-const static struct dma_buf_ops radeon_dmabuf_ops = {
|
|
|
|
- .map_dma_buf = radeon_gem_map_dma_buf,
|
|
|
|
- .unmap_dma_buf = radeon_gem_unmap_dma_buf,
|
|
|
|
- .release = radeon_gem_dmabuf_release,
|
|
|
|
- .kmap = radeon_gem_kmap,
|
|
|
|
- .kmap_atomic = radeon_gem_kmap_atomic,
|
|
|
|
- .kunmap = radeon_gem_kunmap,
|
|
|
|
- .kunmap_atomic = radeon_gem_kunmap_atomic,
|
|
|
|
- .mmap = radeon_gem_prime_mmap,
|
|
|
|
- .vmap = radeon_gem_prime_vmap,
|
|
|
|
- .vunmap = radeon_gem_prime_vunmap,
|
|
|
|
-};
|
|
|
|
-
|
|
|
|
-static int radeon_prime_create(struct drm_device *dev,
|
|
|
|
- size_t size,
|
|
|
|
- struct sg_table *sg,
|
|
|
|
- struct radeon_bo **pbo)
|
|
|
|
|
|
+
|
|
|
|
+struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev,
|
|
|
|
+ size_t size,
|
|
|
|
+ struct sg_table *sg)
|
|
{
|
|
{
|
|
struct radeon_device *rdev = dev->dev_private;
|
|
struct radeon_device *rdev = dev->dev_private;
|
|
struct radeon_bo *bo;
|
|
struct radeon_bo *bo;
|
|
int ret;
|
|
int ret;
|
|
|
|
|
|
ret = radeon_bo_create(rdev, size, PAGE_SIZE, false,
|
|
ret = radeon_bo_create(rdev, size, PAGE_SIZE, false,
|
|
- RADEON_GEM_DOMAIN_GTT, sg, pbo);
|
|
|
|
|
|
+ RADEON_GEM_DOMAIN_GTT, sg, &bo);
|
|
if (ret)
|
|
if (ret)
|
|
- return ret;
|
|
|
|
- bo = *pbo;
|
|
|
|
|
|
+ return ERR_PTR(ret);
|
|
bo->gem_base.driver_private = bo;
|
|
bo->gem_base.driver_private = bo;
|
|
|
|
|
|
mutex_lock(&rdev->gem.mutex);
|
|
mutex_lock(&rdev->gem.mutex);
|
|
list_add_tail(&bo->list, &rdev->gem.objects);
|
|
list_add_tail(&bo->list, &rdev->gem.objects);
|
|
mutex_unlock(&rdev->gem.mutex);
|
|
mutex_unlock(&rdev->gem.mutex);
|
|
|
|
|
|
- return 0;
|
|
|
|
|
|
+ return &bo->gem_base;
|
|
}
|
|
}
|
|
|
|
|
|
-struct dma_buf *radeon_gem_prime_export(struct drm_device *dev,
|
|
|
|
- struct drm_gem_object *obj,
|
|
|
|
- int flags)
|
|
|
|
|
|
+int radeon_gem_prime_pin(struct drm_gem_object *obj)
|
|
{
|
|
{
|
|
struct radeon_bo *bo = gem_to_radeon_bo(obj);
|
|
struct radeon_bo *bo = gem_to_radeon_bo(obj);
|
|
int ret = 0;
|
|
int ret = 0;
|
|
|
|
|
|
ret = radeon_bo_reserve(bo, false);
|
|
ret = radeon_bo_reserve(bo, false);
|
|
if (unlikely(ret != 0))
|
|
if (unlikely(ret != 0))
|
|
- return ERR_PTR(ret);
|
|
|
|
|
|
+ return ret;
|
|
|
|
|
|
/* pin buffer into GTT */
|
|
/* pin buffer into GTT */
|
|
ret = radeon_bo_pin(bo, RADEON_GEM_DOMAIN_GTT, NULL);
|
|
ret = radeon_bo_pin(bo, RADEON_GEM_DOMAIN_GTT, NULL);
|
|
if (ret) {
|
|
if (ret) {
|
|
radeon_bo_unreserve(bo);
|
|
radeon_bo_unreserve(bo);
|
|
- return ERR_PTR(ret);
|
|
|
|
|
|
+ return ret;
|
|
}
|
|
}
|
|
radeon_bo_unreserve(bo);
|
|
radeon_bo_unreserve(bo);
|
|
- return dma_buf_export(bo, &radeon_dmabuf_ops, obj->size, flags);
|
|
|
|
-}
|
|
|
|
|
|
|
|
-struct drm_gem_object *radeon_gem_prime_import(struct drm_device *dev,
|
|
|
|
- struct dma_buf *dma_buf)
|
|
|
|
-{
|
|
|
|
- struct dma_buf_attachment *attach;
|
|
|
|
- struct sg_table *sg;
|
|
|
|
- struct radeon_bo *bo;
|
|
|
|
- int ret;
|
|
|
|
-
|
|
|
|
- if (dma_buf->ops == &radeon_dmabuf_ops) {
|
|
|
|
- bo = dma_buf->priv;
|
|
|
|
- if (bo->gem_base.dev == dev) {
|
|
|
|
- drm_gem_object_reference(&bo->gem_base);
|
|
|
|
- dma_buf_put(dma_buf);
|
|
|
|
- return &bo->gem_base;
|
|
|
|
- }
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- /* need to attach */
|
|
|
|
- attach = dma_buf_attach(dma_buf, dev->dev);
|
|
|
|
- if (IS_ERR(attach))
|
|
|
|
- return ERR_CAST(attach);
|
|
|
|
-
|
|
|
|
- sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
|
|
|
|
- if (IS_ERR(sg)) {
|
|
|
|
- ret = PTR_ERR(sg);
|
|
|
|
- goto fail_detach;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- ret = radeon_prime_create(dev, dma_buf->size, sg, &bo);
|
|
|
|
- if (ret)
|
|
|
|
- goto fail_unmap;
|
|
|
|
-
|
|
|
|
- bo->gem_base.import_attach = attach;
|
|
|
|
-
|
|
|
|
- return &bo->gem_base;
|
|
|
|
-
|
|
|
|
-fail_unmap:
|
|
|
|
- dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
|
|
|
|
-fail_detach:
|
|
|
|
- dma_buf_detach(dma_buf, attach);
|
|
|
|
- return ERR_PTR(ret);
|
|
|
|
|
|
+ return 0;
|
|
}
|
|
}
|