|
@@ -636,6 +636,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
|
|
|
{
|
|
|
struct drm_i915_gem_relocation_entry *reloc;
|
|
|
struct drm_i915_gem_object *obj;
|
|
|
+ int *reloc_offset;
|
|
|
int i, total, ret;
|
|
|
|
|
|
/* We may process another execbuffer during the unlock... */
|
|
@@ -653,8 +654,11 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
|
|
|
for (i = 0; i < count; i++)
|
|
|
total += exec[i].relocation_count;
|
|
|
|
|
|
+ reloc_offset = drm_malloc_ab(count, sizeof(*reloc_offset));
|
|
|
reloc = drm_malloc_ab(total, sizeof(*reloc));
|
|
|
- if (reloc == NULL) {
|
|
|
+ if (reloc == NULL || reloc_offset == NULL) {
|
|
|
+ drm_free_large(reloc);
|
|
|
+ drm_free_large(reloc_offset);
|
|
|
mutex_lock(&dev->struct_mutex);
|
|
|
return -ENOMEM;
|
|
|
}
|
|
@@ -672,6 +676,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
|
|
|
goto err;
|
|
|
}
|
|
|
|
|
|
+ reloc_offset[i] = total;
|
|
|
total += exec[i].relocation_count;
|
|
|
}
|
|
|
|
|
@@ -705,17 +710,14 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
|
|
|
if (ret)
|
|
|
goto err;
|
|
|
|
|
|
- total = 0;
|
|
|
list_for_each_entry(obj, objects, exec_list) {
|
|
|
+ int offset = obj->exec_entry - exec;
|
|
|
obj->base.pending_read_domains = 0;
|
|
|
obj->base.pending_write_domain = 0;
|
|
|
ret = i915_gem_execbuffer_relocate_object_slow(obj, eb,
|
|
|
- reloc + total);
|
|
|
+ reloc + reloc_offset[offset]);
|
|
|
if (ret)
|
|
|
goto err;
|
|
|
-
|
|
|
- total += exec->relocation_count;
|
|
|
- exec++;
|
|
|
}
|
|
|
|
|
|
/* Leave the user relocations as are, this is the painfully slow path,
|
|
@@ -726,6 +728,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
|
|
|
|
|
|
err:
|
|
|
drm_free_large(reloc);
|
|
|
+ drm_free_large(reloc_offset);
|
|
|
return ret;
|
|
|
}
|
|
|
|