|
@@ -367,6 +367,10 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
|
|
|
uint32_t __iomem *reloc_entry;
|
|
|
void __iomem *reloc_page;
|
|
|
|
|
|
+ /* We can't wait for rendering with pagefaults disabled */
|
|
|
+ if (obj->active && in_atomic())
|
|
|
+ return -EFAULT;
|
|
|
+
|
|
|
ret = i915_gem_object_set_to_gtt_domain(obj, 1);
|
|
|
if (ret)
|
|
|
return ret;
|
|
@@ -440,15 +444,24 @@ i915_gem_execbuffer_relocate(struct drm_device *dev,
|
|
|
struct list_head *objects)
|
|
|
{
|
|
|
struct drm_i915_gem_object *obj;
|
|
|
- int ret;
|
|
|
-
|
|
|
+ int ret = 0;
|
|
|
+
|
|
|
+ /* This is the fast path and we cannot handle a pagefault whilst
|
|
|
+ * holding the struct mutex lest the user pass in the relocations
|
|
|
+ * contained within a mmaped bo. For in such a case we, the page
|
|
|
+ * fault handler would call i915_gem_fault() and we would try to
|
|
|
+ * acquire the struct mutex again. Obviously this is bad and so
|
|
|
+ * lockdep complains vehemently.
|
|
|
+ */
|
|
|
+ pagefault_disable();
|
|
|
list_for_each_entry(obj, objects, exec_list) {
|
|
|
ret = i915_gem_execbuffer_relocate_object(obj, eb);
|
|
|
if (ret)
|
|
|
- return ret;
|
|
|
+ break;
|
|
|
}
|
|
|
+ pagefault_enable();
|
|
|
|
|
|
- return 0;
|
|
|
+ return ret;
|
|
|
}
|
|
|
|
|
|
static int
|