|
@@ -539,6 +539,8 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
|
|
|
total = 0;
|
|
|
for (i = 0; i < count; i++) {
|
|
|
struct drm_i915_gem_relocation_entry __user *user_relocs;
|
|
|
+ u64 invalid_offset = (u64)-1;
|
|
|
+ int j;
|
|
|
|
|
|
user_relocs = (void __user *)(uintptr_t)exec[i].relocs_ptr;
|
|
|
|
|
@@ -549,6 +551,25 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
|
|
|
goto err;
|
|
|
}
|
|
|
|
|
|
+ /* As we do not update the known relocation offsets after
|
|
|
+ * relocating (due to the complexities in lock handling),
|
|
|
+ * we need to mark them as invalid now so that we force the
|
|
|
+ * relocation processing next time. Just in case the target
|
|
|
+ * object is evicted and then rebound into its old
|
|
|
+ * presumed_offset before the next execbuffer - if that
|
|
|
+ * happened we would make the mistake of assuming that the
|
|
|
+ * relocations were valid.
|
|
|
+ */
|
|
|
+ for (j = 0; j < exec[i].relocation_count; j++) {
|
|
|
+ if (copy_to_user(&user_relocs[j].presumed_offset,
|
|
|
+ &invalid_offset,
|
|
|
+ sizeof(invalid_offset))) {
|
|
|
+ ret = -EFAULT;
|
|
|
+ mutex_lock(&dev->struct_mutex);
|
|
|
+ goto err;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
reloc_offset[i] = total;
|
|
|
total += exec[i].relocation_count;
|
|
|
}
|