|
@@ -407,7 +407,6 @@ i915_gem_shmem_pread(struct drm_device *dev,
|
|
|
loff_t offset;
|
|
|
int shmem_page_offset, page_length, ret = 0;
|
|
|
int obj_do_bit17_swizzling, page_do_bit17_swizzling;
|
|
|
- int hit_slowpath = 0;
|
|
|
int prefaulted = 0;
|
|
|
int needs_clflush = 0;
|
|
|
struct scatterlist *sg;
|
|
@@ -469,7 +468,6 @@ i915_gem_shmem_pread(struct drm_device *dev,
|
|
|
if (ret == 0)
|
|
|
goto next_page;
|
|
|
|
|
|
- hit_slowpath = 1;
|
|
|
mutex_unlock(&dev->struct_mutex);
|
|
|
|
|
|
if (!prefaulted) {
|
|
@@ -502,12 +500,6 @@ next_page:
|
|
|
out:
|
|
|
i915_gem_object_unpin_pages(obj);
|
|
|
|
|
|
- if (hit_slowpath) {
|
|
|
- /* Fixup: Kill any reinstated backing storage pages */
|
|
|
- if (obj->madv == __I915_MADV_PURGED)
|
|
|
- i915_gem_object_truncate(obj);
|
|
|
- }
|
|
|
-
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
@@ -838,11 +830,8 @@ out:
|
|
|
i915_gem_object_unpin_pages(obj);
|
|
|
|
|
|
if (hit_slowpath) {
|
|
|
- /* Fixup: Kill any reinstated backing storage pages */
|
|
|
- if (obj->madv == __I915_MADV_PURGED)
|
|
|
- i915_gem_object_truncate(obj);
|
|
|
- /* and flush dirty cachelines in case the object isn't in the cpu write
|
|
|
- * domain anymore. */
|
|
|
+ /* Fixup: Flush dirty cachelines in case the object isn't in the
|
|
|
+ * cpu write domain anymore. */
|
|
|
if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
|
|
|
i915_gem_clflush_object(obj);
|
|
|
i915_gem_chipset_flush(dev);
|