|
@@ -3199,7 +3199,7 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
|
|
|
static int
|
|
|
i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
|
|
|
struct drm_file *file_priv,
|
|
|
- struct drm_i915_gem_exec_object *entry,
|
|
|
+ struct drm_i915_gem_exec_object2 *entry,
|
|
|
struct drm_i915_gem_relocation_entry *relocs)
|
|
|
{
|
|
|
struct drm_device *dev = obj->dev;
|
|
@@ -3207,12 +3207,35 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
|
|
|
struct drm_i915_gem_object *obj_priv = obj->driver_private;
|
|
|
int i, ret;
|
|
|
void __iomem *reloc_page;
|
|
|
+ bool need_fence;
|
|
|
+
|
|
|
+ need_fence = entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
|
|
|
+ obj_priv->tiling_mode != I915_TILING_NONE;
|
|
|
+
|
|
|
+ /* Check fence reg constraints and rebind if necessary */
|
|
|
+ if (need_fence && !i915_obj_fenceable(dev, obj))
|
|
|
+ i915_gem_object_unbind(obj);
|
|
|
|
|
|
/* Choose the GTT offset for our buffer and put it there. */
|
|
|
ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
|
|
|
if (ret)
|
|
|
return ret;
|
|
|
|
|
|
+ /*
|
|
|
+ * Pre-965 chips need a fence register set up in order to
|
|
|
+ * properly handle blits to/from tiled surfaces.
|
|
|
+ */
|
|
|
+ if (need_fence) {
|
|
|
+ ret = i915_gem_object_get_fence_reg(obj);
|
|
|
+ if (ret != 0) {
|
|
|
+ if (ret != -EBUSY && ret != -ERESTARTSYS)
|
|
|
+ DRM_ERROR("Failure to install fence: %d\n",
|
|
|
+ ret);
|
|
|
+ i915_gem_object_unpin(obj);
|
|
|
+ return ret;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
entry->offset = obj_priv->gtt_offset;
|
|
|
|
|
|
/* Apply the relocations, using the GTT aperture to avoid cache
|
|
@@ -3374,7 +3397,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
|
|
|
*/
|
|
|
static int
|
|
|
i915_dispatch_gem_execbuffer(struct drm_device *dev,
|
|
|
- struct drm_i915_gem_execbuffer *exec,
|
|
|
+ struct drm_i915_gem_execbuffer2 *exec,
|
|
|
struct drm_clip_rect *cliprects,
|
|
|
uint64_t exec_offset)
|
|
|
{
|
|
@@ -3464,7 +3487,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
|
|
|
}
|
|
|
|
|
|
static int
|
|
|
-i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list,
|
|
|
+i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object2 *exec_list,
|
|
|
uint32_t buffer_count,
|
|
|
struct drm_i915_gem_relocation_entry **relocs)
|
|
|
{
|
|
@@ -3479,8 +3502,10 @@ i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list,
|
|
|
}
|
|
|
|
|
|
*relocs = drm_calloc_large(reloc_count, sizeof(**relocs));
|
|
|
- if (*relocs == NULL)
|
|
|
+ if (*relocs == NULL) {
|
|
|
+ DRM_ERROR("failed to alloc relocs, count %d\n", reloc_count);
|
|
|
return -ENOMEM;
|
|
|
+ }
|
|
|
|
|
|
for (i = 0; i < buffer_count; i++) {
|
|
|
struct drm_i915_gem_relocation_entry __user *user_relocs;
|
|
@@ -3504,7 +3529,7 @@ i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list,
|
|
|
}
|
|
|
|
|
|
static int
|
|
|
-i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object *exec_list,
|
|
|
+i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object2 *exec_list,
|
|
|
uint32_t buffer_count,
|
|
|
struct drm_i915_gem_relocation_entry *relocs)
|
|
|
{
|
|
@@ -3537,7 +3562,7 @@ err:
|
|
|
}
|
|
|
|
|
|
static int
|
|
|
-i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer *exec,
|
|
|
+i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer2 *exec,
|
|
|
uint64_t exec_offset)
|
|
|
{
|
|
|
uint32_t exec_start, exec_len;
|
|
@@ -3590,18 +3615,18 @@ i915_gem_wait_for_pending_flip(struct drm_device *dev,
|
|
|
}
|
|
|
|
|
|
int
|
|
|
-i915_gem_execbuffer(struct drm_device *dev, void *data,
|
|
|
- struct drm_file *file_priv)
|
|
|
+i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
|
|
+ struct drm_file *file_priv,
|
|
|
+ struct drm_i915_gem_execbuffer2 *args,
|
|
|
+ struct drm_i915_gem_exec_object2 *exec_list)
|
|
|
{
|
|
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
|
|
- struct drm_i915_gem_execbuffer *args = data;
|
|
|
- struct drm_i915_gem_exec_object *exec_list = NULL;
|
|
|
struct drm_gem_object **object_list = NULL;
|
|
|
struct drm_gem_object *batch_obj;
|
|
|
struct drm_i915_gem_object *obj_priv;
|
|
|
struct drm_clip_rect *cliprects = NULL;
|
|
|
struct drm_i915_gem_relocation_entry *relocs;
|
|
|
- int ret, ret2, i, pinned = 0;
|
|
|
+ int ret = 0, ret2, i, pinned = 0;
|
|
|
uint64_t exec_offset;
|
|
|
uint32_t seqno, flush_domains, reloc_index;
|
|
|
int pin_tries, flips;
|
|
@@ -3615,25 +3640,13 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
|
|
|
DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
- /* Copy in the exec list from userland */
|
|
|
- exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
|
|
|
object_list = drm_malloc_ab(sizeof(*object_list), args->buffer_count);
|
|
|
- if (exec_list == NULL || object_list == NULL) {
|
|
|
- DRM_ERROR("Failed to allocate exec or object list "
|
|
|
- "for %d buffers\n",
|
|
|
+ if (object_list == NULL) {
|
|
|
+ DRM_ERROR("Failed to allocate object list for %d buffers\n",
|
|
|
args->buffer_count);
|
|
|
ret = -ENOMEM;
|
|
|
goto pre_mutex_err;
|
|
|
}
|
|
|
- ret = copy_from_user(exec_list,
|
|
|
- (struct drm_i915_relocation_entry __user *)
|
|
|
- (uintptr_t) args->buffers_ptr,
|
|
|
- sizeof(*exec_list) * args->buffer_count);
|
|
|
- if (ret != 0) {
|
|
|
- DRM_ERROR("copy %d exec entries failed %d\n",
|
|
|
- args->buffer_count, ret);
|
|
|
- goto pre_mutex_err;
|
|
|
- }
|
|
|
|
|
|
if (args->num_cliprects != 0) {
|
|
|
cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects),
|
|
@@ -3885,20 +3898,6 @@ err:
|
|
|
|
|
|
mutex_unlock(&dev->struct_mutex);
|
|
|
|
|
|
- if (!ret) {
|
|
|
- /* Copy the new buffer offsets back to the user's exec list. */
|
|
|
- ret = copy_to_user((struct drm_i915_relocation_entry __user *)
|
|
|
- (uintptr_t) args->buffers_ptr,
|
|
|
- exec_list,
|
|
|
- sizeof(*exec_list) * args->buffer_count);
|
|
|
- if (ret) {
|
|
|
- ret = -EFAULT;
|
|
|
- DRM_ERROR("failed to copy %d exec entries "
|
|
|
- "back to user (%d)\n",
|
|
|
- args->buffer_count, ret);
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
/* Copy the updated relocations out regardless of current error
|
|
|
* state. Failure to update the relocs would mean that the next
|
|
|
* time userland calls execbuf, it would do so with presumed offset
|
|
@@ -3915,12 +3914,158 @@ err:
|
|
|
|
|
|
pre_mutex_err:
|
|
|
drm_free_large(object_list);
|
|
|
- drm_free_large(exec_list);
|
|
|
kfree(cliprects);
|
|
|
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * Legacy execbuffer just creates an exec2 list from the original exec object
|
|
|
+ * list array and passes it to the real function.
|
|
|
+ */
|
|
|
+int
|
|
|
+i915_gem_execbuffer(struct drm_device *dev, void *data,
|
|
|
+ struct drm_file *file_priv)
|
|
|
+{
|
|
|
+ struct drm_i915_gem_execbuffer *args = data;
|
|
|
+ struct drm_i915_gem_execbuffer2 exec2;
|
|
|
+ struct drm_i915_gem_exec_object *exec_list = NULL;
|
|
|
+ struct drm_i915_gem_exec_object2 *exec2_list = NULL;
|
|
|
+ int ret, i;
|
|
|
+
|
|
|
+#if WATCH_EXEC
|
|
|
+ DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
|
|
|
+ (int) args->buffers_ptr, args->buffer_count, args->batch_len);
|
|
|
+#endif
|
|
|
+
|
|
|
+ if (args->buffer_count < 1) {
|
|
|
+ DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Copy in the exec list from userland */
|
|
|
+ exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
|
|
|
+ exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
|
|
|
+ if (exec_list == NULL || exec2_list == NULL) {
|
|
|
+ DRM_ERROR("Failed to allocate exec list for %d buffers\n",
|
|
|
+ args->buffer_count);
|
|
|
+ drm_free_large(exec_list);
|
|
|
+ drm_free_large(exec2_list);
|
|
|
+ return -ENOMEM;
|
|
|
+ }
|
|
|
+ ret = copy_from_user(exec_list,
|
|
|
+ (struct drm_i915_relocation_entry __user *)
|
|
|
+ (uintptr_t) args->buffers_ptr,
|
|
|
+ sizeof(*exec_list) * args->buffer_count);
|
|
|
+ if (ret != 0) {
|
|
|
+ DRM_ERROR("copy %d exec entries failed %d\n",
|
|
|
+ args->buffer_count, ret);
|
|
|
+ drm_free_large(exec_list);
|
|
|
+ drm_free_large(exec2_list);
|
|
|
+ return -EFAULT;
|
|
|
+ }
|
|
|
+
|
|
|
+ for (i = 0; i < args->buffer_count; i++) {
|
|
|
+ exec2_list[i].handle = exec_list[i].handle;
|
|
|
+ exec2_list[i].relocation_count = exec_list[i].relocation_count;
|
|
|
+ exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
|
|
|
+ exec2_list[i].alignment = exec_list[i].alignment;
|
|
|
+ exec2_list[i].offset = exec_list[i].offset;
|
|
|
+ if (!IS_I965G(dev))
|
|
|
+ exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
|
|
|
+ else
|
|
|
+ exec2_list[i].flags = 0;
|
|
|
+ }
|
|
|
+
|
|
|
+ exec2.buffers_ptr = args->buffers_ptr;
|
|
|
+ exec2.buffer_count = args->buffer_count;
|
|
|
+ exec2.batch_start_offset = args->batch_start_offset;
|
|
|
+ exec2.batch_len = args->batch_len;
|
|
|
+ exec2.DR1 = args->DR1;
|
|
|
+ exec2.DR4 = args->DR4;
|
|
|
+ exec2.num_cliprects = args->num_cliprects;
|
|
|
+ exec2.cliprects_ptr = args->cliprects_ptr;
|
|
|
+ exec2.flags = 0;
|
|
|
+
|
|
|
+ ret = i915_gem_do_execbuffer(dev, data, file_priv, &exec2, exec2_list);
|
|
|
+ if (!ret) {
|
|
|
+ /* Copy the new buffer offsets back to the user's exec list. */
|
|
|
+ for (i = 0; i < args->buffer_count; i++)
|
|
|
+ exec_list[i].offset = exec2_list[i].offset;
|
|
|
+ /* ... and back out to userspace */
|
|
|
+ ret = copy_to_user((struct drm_i915_relocation_entry __user *)
|
|
|
+ (uintptr_t) args->buffers_ptr,
|
|
|
+ exec_list,
|
|
|
+ sizeof(*exec_list) * args->buffer_count);
|
|
|
+ if (ret) {
|
|
|
+ ret = -EFAULT;
|
|
|
+ DRM_ERROR("failed to copy %d exec entries "
|
|
|
+ "back to user (%d)\n",
|
|
|
+ args->buffer_count, ret);
|
|
|
+ }
|
|
|
+ } else {
|
|
|
+ DRM_ERROR("i915_gem_do_execbuffer returns %d\n", ret);
|
|
|
+ }
|
|
|
+
|
|
|
+ drm_free_large(exec_list);
|
|
|
+ drm_free_large(exec2_list);
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
+int
|
|
|
+i915_gem_execbuffer2(struct drm_device *dev, void *data,
|
|
|
+ struct drm_file *file_priv)
|
|
|
+{
|
|
|
+ struct drm_i915_gem_execbuffer2 *args = data;
|
|
|
+ struct drm_i915_gem_exec_object2 *exec2_list = NULL;
|
|
|
+ int ret;
|
|
|
+
|
|
|
+#if WATCH_EXEC
|
|
|
+ DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
|
|
|
+ (int) args->buffers_ptr, args->buffer_count, args->batch_len);
|
|
|
+#endif
|
|
|
+
|
|
|
+ if (args->buffer_count < 1) {
|
|
|
+ DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count);
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+
|
|
|
+ exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
|
|
|
+ if (exec2_list == NULL) {
|
|
|
+ DRM_ERROR("Failed to allocate exec list for %d buffers\n",
|
|
|
+ args->buffer_count);
|
|
|
+ return -ENOMEM;
|
|
|
+ }
|
|
|
+ ret = copy_from_user(exec2_list,
|
|
|
+ (struct drm_i915_relocation_entry __user *)
|
|
|
+ (uintptr_t) args->buffers_ptr,
|
|
|
+ sizeof(*exec2_list) * args->buffer_count);
|
|
|
+ if (ret != 0) {
|
|
|
+ DRM_ERROR("copy %d exec entries failed %d\n",
|
|
|
+ args->buffer_count, ret);
|
|
|
+ drm_free_large(exec2_list);
|
|
|
+ return -EFAULT;
|
|
|
+ }
|
|
|
+
|
|
|
+ ret = i915_gem_do_execbuffer(dev, data, file_priv, args, exec2_list);
|
|
|
+ if (!ret) {
|
|
|
+ /* Copy the new buffer offsets back to the user's exec list. */
|
|
|
+ ret = copy_to_user((struct drm_i915_relocation_entry __user *)
|
|
|
+ (uintptr_t) args->buffers_ptr,
|
|
|
+ exec2_list,
|
|
|
+ sizeof(*exec2_list) * args->buffer_count);
|
|
|
+ if (ret) {
|
|
|
+ ret = -EFAULT;
|
|
|
+ DRM_ERROR("failed to copy %d exec entries "
|
|
|
+ "back to user (%d)\n",
|
|
|
+ args->buffer_count, ret);
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ drm_free_large(exec2_list);
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
int
|
|
|
i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
|
|
|
{
|
|
@@ -3934,19 +4079,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
|
|
|
if (ret)
|
|
|
return ret;
|
|
|
}
|
|
|
- /*
|
|
|
- * Pre-965 chips need a fence register set up in order to
|
|
|
- * properly handle tiled surfaces.
|
|
|
- */
|
|
|
- if (!IS_I965G(dev) && obj_priv->tiling_mode != I915_TILING_NONE) {
|
|
|
- ret = i915_gem_object_get_fence_reg(obj);
|
|
|
- if (ret != 0) {
|
|
|
- if (ret != -EBUSY && ret != -ERESTARTSYS)
|
|
|
- DRM_ERROR("Failure to install fence: %d\n",
|
|
|
- ret);
|
|
|
- return ret;
|
|
|
- }
|
|
|
- }
|
|
|
+
|
|
|
obj_priv->pin_count++;
|
|
|
|
|
|
/* If the object is not active and not pending a flush,
|