浏览代码

drm/i915: Clear the gpu_write_list on resetting write_domain upon hang

Otherwise we will hit a list handling assertion when moving the object
to the inactive list.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Chris Wilson 14 年之前
父节点
当前提交
dfaae392f4
共有 3 个文件被更改,包括 42 次插入28 次删除
  1. 1 15
      drivers/gpu/drm/i915/i915_drv.c
  2. 1 2
      drivers/gpu/drm/i915/i915_drv.h
  3. 40 11
      drivers/gpu/drm/i915/i915_gem.c

+ 1 - 15
drivers/gpu/drm/i915/i915_drv.c

@@ -395,21 +395,7 @@ int i915_reset(struct drm_device *dev, u8 flags)
 
 
 	mutex_lock(&dev->struct_mutex);
 	mutex_lock(&dev->struct_mutex);
 
 
-	/*
-	 * Clear request list
-	 */
-	i915_gem_retire_requests(dev);
-
-	/* Remove anything from the flushing lists. The GPU cache is likely
-	 * to be lost on reset along with the data, so simply move the
-	 * lost bo to the inactive list.
-	 */
-	i915_gem_reset_flushing_list(dev);
-
-	/* Move everything out of the GPU domains to ensure we do any
-	 * necessary invalidation upon reuse.
-	 */
-	i915_gem_reset_inactive_gpu_domains(dev);
+	i915_gem_reset_lists(dev);
 
 
 	/*
 	/*
 	 * Set the domains we want to reset (GRDOM/bits 2 and 3) as
 	 * Set the domains we want to reset (GRDOM/bits 2 and 3) as

+ 1 - 2
drivers/gpu/drm/i915/i915_drv.h

@@ -1005,8 +1005,7 @@ int i915_gem_object_get_fence_reg(struct drm_gem_object *obj,
 int i915_gem_object_put_fence_reg(struct drm_gem_object *obj,
 int i915_gem_object_put_fence_reg(struct drm_gem_object *obj,
 				  bool interruptible);
 				  bool interruptible);
 void i915_gem_retire_requests(struct drm_device *dev);
 void i915_gem_retire_requests(struct drm_device *dev);
-void i915_gem_reset_flushing_list(struct drm_device *dev);
-void i915_gem_reset_inactive_gpu_domains(struct drm_device *dev);
+void i915_gem_reset_lists(struct drm_device *dev);
 void i915_gem_clflush_object(struct drm_gem_object *obj);
 void i915_gem_clflush_object(struct drm_gem_object *obj);
 void i915_gem_flush_ring(struct drm_device *dev,
 void i915_gem_flush_ring(struct drm_device *dev,
 			 struct drm_file *file_priv,
 			 struct drm_file *file_priv,

+ 40 - 11
drivers/gpu/drm/i915/i915_gem.c

@@ -1682,27 +1682,60 @@ i915_get_gem_seqno(struct drm_device *dev,
 	return ring->get_gem_seqno(dev, ring);
 	return ring->get_gem_seqno(dev, ring);
 }
 }
 
 
-void i915_gem_reset_flushing_list(struct drm_device *dev)
+static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
+				      struct intel_ring_buffer *ring)
 {
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	while (!list_empty(&ring->request_list)) {
+		struct drm_i915_gem_request *request;
 
 
-	while (!list_empty(&dev_priv->mm.flushing_list)) {
+		request = list_first_entry(&ring->request_list,
+					   struct drm_i915_gem_request,
+					   list);
+
+		list_del(&request->list);
+		list_del(&request->client_list);
+		kfree(request);
+	}
+
+	while (!list_empty(&ring->active_list)) {
 		struct drm_i915_gem_object *obj_priv;
 		struct drm_i915_gem_object *obj_priv;
 
 
-		obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
+		obj_priv = list_first_entry(&ring->active_list,
 					    struct drm_i915_gem_object,
 					    struct drm_i915_gem_object,
 					    list);
 					    list);
 
 
 		obj_priv->base.write_domain = 0;
 		obj_priv->base.write_domain = 0;
+		list_del_init(&obj_priv->gpu_write_list);
 		i915_gem_object_move_to_inactive(&obj_priv->base);
 		i915_gem_object_move_to_inactive(&obj_priv->base);
 	}
 	}
 }
 }
 
 
-void i915_gem_reset_inactive_gpu_domains(struct drm_device *dev)
+void i915_gem_reset_lists(struct drm_device *dev)
 {
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_gem_object *obj_priv;
 	struct drm_i915_gem_object *obj_priv;
 
 
+	i915_gem_reset_ring_lists(dev_priv, &dev_priv->render_ring);
+	if (HAS_BSD(dev))
+		i915_gem_reset_ring_lists(dev_priv, &dev_priv->bsd_ring);
+
+	/* Remove anything from the flushing lists. The GPU cache is likely
+	 * to be lost on reset along with the data, so simply move the
+	 * lost bo to the inactive list.
+	 */
+	while (!list_empty(&dev_priv->mm.flushing_list)) {
+		obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
+					    struct drm_i915_gem_object,
+					    list);
+
+		obj_priv->base.write_domain = 0;
+		list_del_init(&obj_priv->gpu_write_list);
+		i915_gem_object_move_to_inactive(&obj_priv->base);
+	}
+
+	/* Move everything out of the GPU domains to ensure we do any
+	 * necessary invalidation upon reuse.
+	 */
 	list_for_each_entry(obj_priv,
 	list_for_each_entry(obj_priv,
 			    &dev_priv->mm.inactive_list,
 			    &dev_priv->mm.inactive_list,
 			    list)
 			    list)
@@ -1720,15 +1753,12 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
 {
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	uint32_t seqno;
 	uint32_t seqno;
-	bool wedged;
 
 
 	if (!ring->status_page.page_addr ||
 	if (!ring->status_page.page_addr ||
 	    list_empty(&ring->request_list))
 	    list_empty(&ring->request_list))
 		return;
 		return;
 
 
 	seqno = i915_get_gem_seqno(dev, ring);
 	seqno = i915_get_gem_seqno(dev, ring);
-	wedged = atomic_read(&dev_priv->mm.wedged);
-
 	while (!list_empty(&ring->request_list)) {
 	while (!list_empty(&ring->request_list)) {
 		struct drm_i915_gem_request *request;
 		struct drm_i915_gem_request *request;
 
 
@@ -1736,7 +1766,7 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
 					   struct drm_i915_gem_request,
 					   struct drm_i915_gem_request,
 					   list);
 					   list);
 
 
-		if (!wedged && !i915_seqno_passed(seqno, request->seqno))
+		if (!i915_seqno_passed(seqno, request->seqno))
 			break;
 			break;
 
 
 		trace_i915_gem_request_retire(dev, request->seqno);
 		trace_i915_gem_request_retire(dev, request->seqno);
@@ -1757,8 +1787,7 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
 					    struct drm_i915_gem_object,
 					    struct drm_i915_gem_object,
 					    list);
 					    list);
 
 
-		if (!wedged &&
-		    !i915_seqno_passed(seqno, obj_priv->last_rendering_seqno))
+		if (!i915_seqno_passed(seqno, obj_priv->last_rendering_seqno))
 			break;
 			break;
 
 
 		obj = &obj_priv->base;
 		obj = &obj_priv->base;