|
@@ -74,6 +74,59 @@ static void i915_gem_dmabuf_release(struct dma_buf *dma_buf)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
|
|
|
|
+{
|
|
|
|
+ struct drm_i915_gem_object *obj = dma_buf->priv;
|
|
|
|
+ struct drm_device *dev = obj->base.dev;
|
|
|
|
+ int ret;
|
|
|
|
+
|
|
|
|
+ ret = i915_mutex_lock_interruptible(dev);
|
|
|
|
+ if (ret)
|
|
|
|
+ return ERR_PTR(ret);
|
|
|
|
+
|
|
|
|
+ if (obj->dma_buf_vmapping) {
|
|
|
|
+ obj->vmapping_count++;
|
|
|
|
+ goto out_unlock;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (!obj->pages) {
|
|
|
|
+ ret = i915_gem_object_get_pages_gtt(obj, __GFP_NORETRY | __GFP_NOWARN);
|
|
|
|
+ if (ret) {
|
|
|
|
+ mutex_unlock(&dev->struct_mutex);
|
|
|
|
+ return ERR_PTR(ret);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ obj->dma_buf_vmapping = vmap(obj->pages, obj->base.size / PAGE_SIZE, 0, PAGE_KERNEL);
|
|
|
|
+ if (!obj->dma_buf_vmapping) {
|
|
|
|
+ DRM_ERROR("failed to vmap object\n");
|
|
|
|
+ goto out_unlock;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ obj->vmapping_count = 1;
|
|
|
|
+out_unlock:
|
|
|
|
+ mutex_unlock(&dev->struct_mutex);
|
|
|
|
+ return obj->dma_buf_vmapping;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
|
|
|
|
+{
|
|
|
|
+ struct drm_i915_gem_object *obj = dma_buf->priv;
|
|
|
|
+ struct drm_device *dev = obj->base.dev;
|
|
|
|
+ int ret;
|
|
|
|
+
|
|
|
|
+ ret = i915_mutex_lock_interruptible(dev);
|
|
|
|
+ if (ret)
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ --obj->vmapping_count;
|
|
|
|
+ if (obj->vmapping_count == 0) {
|
|
|
|
+ vunmap(obj->dma_buf_vmapping);
|
|
|
|
+ obj->dma_buf_vmapping = NULL;
|
|
|
|
+ }
|
|
|
|
+ mutex_unlock(&dev->struct_mutex);
|
|
|
|
+}
|
|
|
|
+
|
|
static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
|
|
static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
|
|
{
|
|
{
|
|
return NULL;
|
|
return NULL;
|
|
@@ -93,6 +146,11 @@ static void i915_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_n
|
|
|
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
|
|
|
|
+{
|
|
|
|
+ return -EINVAL;
|
|
|
|
+}
|
|
|
|
+
|
|
static const struct dma_buf_ops i915_dmabuf_ops = {
|
|
static const struct dma_buf_ops i915_dmabuf_ops = {
|
|
.map_dma_buf = i915_gem_map_dma_buf,
|
|
.map_dma_buf = i915_gem_map_dma_buf,
|
|
.unmap_dma_buf = i915_gem_unmap_dma_buf,
|
|
.unmap_dma_buf = i915_gem_unmap_dma_buf,
|
|
@@ -101,6 +159,9 @@ static const struct dma_buf_ops i915_dmabuf_ops = {
|
|
.kmap_atomic = i915_gem_dmabuf_kmap_atomic,
|
|
.kmap_atomic = i915_gem_dmabuf_kmap_atomic,
|
|
.kunmap = i915_gem_dmabuf_kunmap,
|
|
.kunmap = i915_gem_dmabuf_kunmap,
|
|
.kunmap_atomic = i915_gem_dmabuf_kunmap_atomic,
|
|
.kunmap_atomic = i915_gem_dmabuf_kunmap_atomic,
|
|
|
|
+ .mmap = i915_gem_dmabuf_mmap,
|
|
|
|
+ .vmap = i915_gem_dmabuf_vmap,
|
|
|
|
+ .vunmap = i915_gem_dmabuf_vunmap,
|
|
};
|
|
};
|
|
|
|
|
|
struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
|
|
struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
|