|
@@ -53,7 +53,8 @@
|
|
|
* Self-importing: if userspace is using PRIME as a replacement for flink
|
|
|
* then it will get a fd->handle request for a GEM object that it created.
|
|
|
* Drivers should detect this situation and return back the gem object
|
|
|
- * from the dma-buf private.
|
|
|
+ * from the dma-buf private. Prime will do this automatically for drivers that
|
|
|
+ * use the drm_gem_prime_{import,export} helpers.
|
|
|
*/
|
|
|
|
|
|
struct drm_prime_member {
|
|
@@ -62,6 +63,137 @@ struct drm_prime_member {
|
|
|
uint32_t handle;
|
|
|
};
|
|
|
|
|
|
+static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
|
|
|
+ enum dma_data_direction dir)
|
|
|
+{
|
|
|
+ struct drm_gem_object *obj = attach->dmabuf->priv;
|
|
|
+ struct sg_table *sgt;
|
|
|
+
|
|
|
+ mutex_lock(&obj->dev->struct_mutex);
|
|
|
+
|
|
|
+ sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
|
|
|
+
|
|
|
+ if (!IS_ERR_OR_NULL(sgt))
|
|
|
+ dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir);
|
|
|
+
|
|
|
+ mutex_unlock(&obj->dev->struct_mutex);
|
|
|
+ return sgt;
|
|
|
+}
|
|
|
+
|
|
|
+static void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
|
|
|
+ struct sg_table *sgt, enum dma_data_direction dir)
|
|
|
+{
|
|
|
+ dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
|
|
|
+ sg_free_table(sgt);
|
|
|
+ kfree(sgt);
|
|
|
+}
|
|
|
+
|
|
|
+static void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
|
|
|
+{
|
|
|
+ struct drm_gem_object *obj = dma_buf->priv;
|
|
|
+
|
|
|
+ if (obj->export_dma_buf == dma_buf) {
|
|
|
+ /* drop the reference on the export fd holds */
|
|
|
+ obj->export_dma_buf = NULL;
|
|
|
+ drm_gem_object_unreference_unlocked(obj);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+static void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf)
|
|
|
+{
|
|
|
+ struct drm_gem_object *obj = dma_buf->priv;
|
|
|
+ struct drm_device *dev = obj->dev;
|
|
|
+
|
|
|
+ return dev->driver->gem_prime_vmap(obj);
|
|
|
+}
|
|
|
+
|
|
|
+static void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
|
|
|
+{
|
|
|
+ struct drm_gem_object *obj = dma_buf->priv;
|
|
|
+ struct drm_device *dev = obj->dev;
|
|
|
+
|
|
|
+ dev->driver->gem_prime_vunmap(obj, vaddr);
|
|
|
+}
|
|
|
+
|
|
|
+static void *drm_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
|
|
|
+ unsigned long page_num)
|
|
|
+{
|
|
|
+ return NULL;
|
|
|
+}
|
|
|
+
|
|
|
+static void drm_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
|
|
|
+ unsigned long page_num, void *addr)
|
|
|
+{
|
|
|
+
|
|
|
+}
|
|
|
+static void *drm_gem_dmabuf_kmap(struct dma_buf *dma_buf,
|
|
|
+ unsigned long page_num)
|
|
|
+{
|
|
|
+ return NULL;
|
|
|
+}
|
|
|
+
|
|
|
+static void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf,
|
|
|
+ unsigned long page_num, void *addr)
|
|
|
+{
|
|
|
+
|
|
|
+}
|
|
|
+
|
|
|
+static int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf,
|
|
|
+ struct vm_area_struct *vma)
|
|
|
+{
|
|
|
+ return -EINVAL;
|
|
|
+}
|
|
|
+
|
|
|
+static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
|
|
|
+ .map_dma_buf = drm_gem_map_dma_buf,
|
|
|
+ .unmap_dma_buf = drm_gem_unmap_dma_buf,
|
|
|
+ .release = drm_gem_dmabuf_release,
|
|
|
+ .kmap = drm_gem_dmabuf_kmap,
|
|
|
+ .kmap_atomic = drm_gem_dmabuf_kmap_atomic,
|
|
|
+ .kunmap = drm_gem_dmabuf_kunmap,
|
|
|
+ .kunmap_atomic = drm_gem_dmabuf_kunmap_atomic,
|
|
|
+ .mmap = drm_gem_dmabuf_mmap,
|
|
|
+ .vmap = drm_gem_dmabuf_vmap,
|
|
|
+ .vunmap = drm_gem_dmabuf_vunmap,
|
|
|
+};
|
|
|
+
|
|
|
+/**
|
|
|
+ * DOC: PRIME Helpers
|
|
|
+ *
|
|
|
+ * Drivers can implement @gem_prime_export and @gem_prime_import in terms of
|
|
|
+ * simpler APIs by using the helper functions @drm_gem_prime_export and
|
|
|
+ * @drm_gem_prime_import. These functions implement dma-buf support in terms of
|
|
|
+ * five lower-level driver callbacks:
|
|
|
+ *
|
|
|
+ * Export callbacks:
|
|
|
+ *
|
|
|
+ * - @gem_prime_pin (optional): prepare a GEM object for exporting
|
|
|
+ *
|
|
|
+ * - @gem_prime_get_sg_table: provide a scatter/gather table of pinned pages
|
|
|
+ *
|
|
|
+ * - @gem_prime_vmap: vmap a buffer exported by your driver
|
|
|
+ *
|
|
|
+ * - @gem_prime_vunmap: vunmap a buffer exported by your driver
|
|
|
+ *
|
|
|
+ * Import callback:
|
|
|
+ *
|
|
|
+ * - @gem_prime_import_sg_table (import): produce a GEM object from another
|
|
|
+ * driver's scatter/gather table
|
|
|
+ */
|
|
|
+
|
|
|
+struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
|
|
|
+ struct drm_gem_object *obj, int flags)
|
|
|
+{
|
|
|
+ if (dev->driver->gem_prime_pin) {
|
|
|
+ int ret = dev->driver->gem_prime_pin(obj);
|
|
|
+ if (ret)
|
|
|
+ return ERR_PTR(ret);
|
|
|
+ }
|
|
|
+ return dma_buf_export(obj, &drm_gem_prime_dmabuf_ops, obj->size,
|
|
|
+ 0600);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(drm_gem_prime_export);
|
|
|
+
|
|
|
int drm_gem_prime_handle_to_fd(struct drm_device *dev,
|
|
|
struct drm_file *file_priv, uint32_t handle, uint32_t flags,
|
|
|
int *prime_fd)
|
|
@@ -117,6 +249,58 @@ int drm_gem_prime_handle_to_fd(struct drm_device *dev,
|
|
|
}
|
|
|
EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
|
|
|
|
|
|
+struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
|
|
|
+ struct dma_buf *dma_buf)
|
|
|
+{
|
|
|
+ struct dma_buf_attachment *attach;
|
|
|
+ struct sg_table *sgt;
|
|
|
+ struct drm_gem_object *obj;
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ if (!dev->driver->gem_prime_import_sg_table)
|
|
|
+ return ERR_PTR(-EINVAL);
|
|
|
+
|
|
|
+ if (dma_buf->ops == &drm_gem_prime_dmabuf_ops) {
|
|
|
+ obj = dma_buf->priv;
|
|
|
+ if (obj->dev == dev) {
|
|
|
+ /*
|
|
|
+ * Importing dmabuf exported from out own gem increases
|
|
|
+ * refcount on gem itself instead of f_count of dmabuf.
|
|
|
+ */
|
|
|
+ drm_gem_object_reference(obj);
|
|
|
+ dma_buf_put(dma_buf);
|
|
|
+ return obj;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ attach = dma_buf_attach(dma_buf, dev->dev);
|
|
|
+ if (IS_ERR(attach))
|
|
|
+ return ERR_PTR(PTR_ERR(attach));
|
|
|
+
|
|
|
+ sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
|
|
|
+ if (IS_ERR_OR_NULL(sgt)) {
|
|
|
+ ret = PTR_ERR(sgt);
|
|
|
+ goto fail_detach;
|
|
|
+ }
|
|
|
+
|
|
|
+ obj = dev->driver->gem_prime_import_sg_table(dev, dma_buf->size, sgt);
|
|
|
+ if (IS_ERR(obj)) {
|
|
|
+ ret = PTR_ERR(obj);
|
|
|
+ goto fail_unmap;
|
|
|
+ }
|
|
|
+
|
|
|
+ obj->import_attach = attach;
|
|
|
+
|
|
|
+ return obj;
|
|
|
+
|
|
|
+fail_unmap:
|
|
|
+ dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
|
|
|
+fail_detach:
|
|
|
+ dma_buf_detach(dma_buf, attach);
|
|
|
+ return ERR_PTR(ret);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(drm_gem_prime_import);
|
|
|
+
|
|
|
int drm_gem_prime_fd_to_handle(struct drm_device *dev,
|
|
|
struct drm_file *file_priv, int prime_fd, uint32_t *handle)
|
|
|
{
|