|
@@ -405,30 +405,46 @@ static int
|
|
|
i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
|
|
|
struct eb_objects *eb)
|
|
|
{
|
|
|
+#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
|
|
|
+ struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
|
|
|
struct drm_i915_gem_relocation_entry __user *user_relocs;
|
|
|
struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
|
|
|
- int i, ret;
|
|
|
+ int remain, ret;
|
|
|
|
|
|
user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr;
|
|
|
- for (i = 0; i < entry->relocation_count; i++) {
|
|
|
- struct drm_i915_gem_relocation_entry reloc;
|
|
|
|
|
|
- if (__copy_from_user_inatomic(&reloc,
|
|
|
- user_relocs+i,
|
|
|
- sizeof(reloc)))
|
|
|
+ remain = entry->relocation_count;
|
|
|
+ while (remain) {
|
|
|
+ struct drm_i915_gem_relocation_entry *r = stack_reloc;
|
|
|
+ int count = remain;
|
|
|
+ if (count > ARRAY_SIZE(stack_reloc))
|
|
|
+ count = ARRAY_SIZE(stack_reloc);
|
|
|
+ remain -= count;
|
|
|
+
|
|
|
+ if (__copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0])))
|
|
|
return -EFAULT;
|
|
|
|
|
|
- ret = i915_gem_execbuffer_relocate_entry(obj, eb, &reloc);
|
|
|
- if (ret)
|
|
|
- return ret;
|
|
|
+ do {
|
|
|
+ u64 offset = r->presumed_offset;
|
|
|
|
|
|
- if (__copy_to_user_inatomic(&user_relocs[i].presumed_offset,
|
|
|
- &reloc.presumed_offset,
|
|
|
- sizeof(reloc.presumed_offset)))
|
|
|
- return -EFAULT;
|
|
|
+ ret = i915_gem_execbuffer_relocate_entry(obj, eb, r);
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
+
|
|
|
+ if (r->presumed_offset != offset &&
|
|
|
+ __copy_to_user_inatomic(&user_relocs->presumed_offset,
|
|
|
+ &r->presumed_offset,
|
|
|
+ sizeof(r->presumed_offset))) {
|
|
|
+ return -EFAULT;
|
|
|
+ }
|
|
|
+
|
|
|
+ user_relocs++;
|
|
|
+ r++;
|
|
|
+ } while (--count);
|
|
|
}
|
|
|
|
|
|
return 0;
|
|
|
+#undef N_RELOC
|
|
|
}
|
|
|
|
|
|
static int
|