|
@@ -55,6 +55,8 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
|
|
|
|
|
|
static int i915_gem_inactive_shrink(struct shrinker *shrinker,
|
|
|
struct shrink_control *sc);
|
|
|
+static long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
|
|
|
+static void i915_gem_shrink_all(struct drm_i915_private *dev_priv);
|
|
|
static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
|
|
|
|
|
|
static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
|
|
@@ -140,7 +142,7 @@ int i915_mutex_lock_interruptible(struct drm_device *dev)
|
|
|
static inline bool
|
|
|
i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
|
|
|
{
|
|
|
- return !obj->active;
|
|
|
+ return obj->gtt_space && !obj->active;
|
|
|
}
|
|
|
|
|
|
int
|
|
@@ -179,7 +181,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
|
|
|
|
|
|
pinned = 0;
|
|
|
mutex_lock(&dev->struct_mutex);
|
|
|
- list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list)
|
|
|
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
|
|
|
if (obj->pin_count)
|
|
|
pinned += obj->gtt_space->size;
|
|
|
mutex_unlock(&dev->struct_mutex);
|
|
@@ -423,9 +425,11 @@ i915_gem_shmem_pread(struct drm_device *dev,
|
|
|
* anyway again before the next pread happens. */
|
|
|
if (obj->cache_level == I915_CACHE_NONE)
|
|
|
needs_clflush = 1;
|
|
|
- ret = i915_gem_object_set_to_gtt_domain(obj, false);
|
|
|
- if (ret)
|
|
|
- return ret;
|
|
|
+ if (obj->gtt_space) {
|
|
|
+ ret = i915_gem_object_set_to_gtt_domain(obj, false);
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
offset = args->offset;
|
|
@@ -751,9 +755,11 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
|
|
|
* right away and we therefore have to clflush anyway. */
|
|
|
if (obj->cache_level == I915_CACHE_NONE)
|
|
|
needs_clflush_after = 1;
|
|
|
- ret = i915_gem_object_set_to_gtt_domain(obj, true);
|
|
|
- if (ret)
|
|
|
- return ret;
|
|
|
+ if (obj->gtt_space) {
|
|
|
+ ret = i915_gem_object_set_to_gtt_domain(obj, true);
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
+ }
|
|
|
}
|
|
|
/* Same trick applies for invalidate partially written cachelines before
|
|
|
* writing. */
|
|
@@ -1366,17 +1372,28 @@ i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
|
|
|
return obj->madv == I915_MADV_DONTNEED;
|
|
|
}
|
|
|
|
|
|
-static void
|
|
|
+static int
|
|
|
i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
|
|
|
{
|
|
|
int page_count = obj->base.size / PAGE_SIZE;
|
|
|
- int i;
|
|
|
+ int ret, i;
|
|
|
|
|
|
- if (!obj->pages)
|
|
|
- return;
|
|
|
+ if (obj->pages == NULL)
|
|
|
+ return 0;
|
|
|
|
|
|
+ BUG_ON(obj->gtt_space);
|
|
|
BUG_ON(obj->madv == __I915_MADV_PURGED);
|
|
|
|
|
|
+ ret = i915_gem_object_set_to_cpu_domain(obj, true);
|
|
|
+ if (ret) {
|
|
|
+ /* In the event of a disaster, abandon all caches and
|
|
|
+ * hope for the best.
|
|
|
+ */
|
|
|
+ WARN_ON(ret != -EIO);
|
|
|
+ i915_gem_clflush_object(obj);
|
|
|
+ obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
|
|
|
+ }
|
|
|
+
|
|
|
if (i915_gem_object_needs_bit17_swizzle(obj))
|
|
|
i915_gem_object_save_bit_17_swizzle(obj);
|
|
|
|
|
@@ -1396,37 +1413,112 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
|
|
|
|
|
|
drm_free_large(obj->pages);
|
|
|
obj->pages = NULL;
|
|
|
+
|
|
|
+ list_del(&obj->gtt_list);
|
|
|
+
|
|
|
+ if (i915_gem_object_is_purgeable(obj))
|
|
|
+ i915_gem_object_truncate(obj);
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static long
|
|
|
+i915_gem_purge(struct drm_i915_private *dev_priv, long target)
|
|
|
+{
|
|
|
+ struct drm_i915_gem_object *obj, *next;
|
|
|
+ long count = 0;
|
|
|
+
|
|
|
+ list_for_each_entry_safe(obj, next,
|
|
|
+ &dev_priv->mm.unbound_list,
|
|
|
+ gtt_list) {
|
|
|
+ if (i915_gem_object_is_purgeable(obj) &&
|
|
|
+ i915_gem_object_put_pages_gtt(obj) == 0) {
|
|
|
+ count += obj->base.size >> PAGE_SHIFT;
|
|
|
+ if (count >= target)
|
|
|
+ return count;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ list_for_each_entry_safe(obj, next,
|
|
|
+ &dev_priv->mm.inactive_list,
|
|
|
+ mm_list) {
|
|
|
+ if (i915_gem_object_is_purgeable(obj) &&
|
|
|
+ i915_gem_object_unbind(obj) == 0 &&
|
|
|
+ i915_gem_object_put_pages_gtt(obj) == 0) {
|
|
|
+ count += obj->base.size >> PAGE_SHIFT;
|
|
|
+ if (count >= target)
|
|
|
+ return count;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ return count;
|
|
|
+}
|
|
|
+
|
|
|
+static void
|
|
|
+i915_gem_shrink_all(struct drm_i915_private *dev_priv)
|
|
|
+{
|
|
|
+ struct drm_i915_gem_object *obj, *next;
|
|
|
+
|
|
|
+ i915_gem_evict_everything(dev_priv->dev);
|
|
|
+
|
|
|
+ list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, gtt_list)
|
|
|
+ i915_gem_object_put_pages_gtt(obj);
|
|
|
}
|
|
|
|
|
|
int
|
|
|
-i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
|
|
|
- gfp_t gfpmask)
|
|
|
+i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
|
|
|
{
|
|
|
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
|
|
|
int page_count, i;
|
|
|
struct address_space *mapping;
|
|
|
- struct inode *inode;
|
|
|
struct page *page;
|
|
|
+ gfp_t gfp;
|
|
|
|
|
|
if (obj->pages || obj->sg_table)
|
|
|
return 0;
|
|
|
|
|
|
+ /* Assert that the object is not currently in any GPU domain. As it
|
|
|
+ * wasn't in the GTT, there shouldn't be any way it could have been in
|
|
|
+ * a GPU cache
|
|
|
+ */
|
|
|
+ BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
|
|
|
+ BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
|
|
|
+
|
|
|
/* Get the list of pages out of our struct file. They'll be pinned
|
|
|
* at this point until we release them.
|
|
|
*/
|
|
|
page_count = obj->base.size / PAGE_SIZE;
|
|
|
- BUG_ON(obj->pages != NULL);
|
|
|
obj->pages = drm_malloc_ab(page_count, sizeof(struct page *));
|
|
|
if (obj->pages == NULL)
|
|
|
return -ENOMEM;
|
|
|
|
|
|
- inode = obj->base.filp->f_path.dentry->d_inode;
|
|
|
- mapping = inode->i_mapping;
|
|
|
- gfpmask |= mapping_gfp_mask(mapping);
|
|
|
-
|
|
|
+ /* Fail silently without starting the shrinker */
|
|
|
+ mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
|
|
|
+ gfp = mapping_gfp_mask(mapping);
|
|
|
+ gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
|
|
|
+ gfp &= ~(__GFP_IO | __GFP_WAIT);
|
|
|
for (i = 0; i < page_count; i++) {
|
|
|
- page = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
|
|
|
- if (IS_ERR(page))
|
|
|
- goto err_pages;
|
|
|
+ page = shmem_read_mapping_page_gfp(mapping, i, gfp);
|
|
|
+ if (IS_ERR(page)) {
|
|
|
+ i915_gem_purge(dev_priv, page_count);
|
|
|
+ page = shmem_read_mapping_page_gfp(mapping, i, gfp);
|
|
|
+ }
|
|
|
+ if (IS_ERR(page)) {
|
|
|
+ /* We've tried hard to allocate the memory by reaping
|
|
|
+ * our own buffer, now let the real VM do its job and
|
|
|
+ * go down in flames if truly OOM.
|
|
|
+ */
|
|
|
+ gfp &= ~(__GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD);
|
|
|
+ gfp |= __GFP_IO | __GFP_WAIT;
|
|
|
+
|
|
|
+ i915_gem_shrink_all(dev_priv);
|
|
|
+ page = shmem_read_mapping_page_gfp(mapping, i, gfp);
|
|
|
+ if (IS_ERR(page))
|
|
|
+ goto err_pages;
|
|
|
+
|
|
|
+ gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
|
|
|
+ gfp &= ~(__GFP_IO | __GFP_WAIT);
|
|
|
+ }
|
|
|
|
|
|
obj->pages[i] = page;
|
|
|
}
|
|
@@ -1434,6 +1526,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
|
|
|
if (i915_gem_object_needs_bit17_swizzle(obj))
|
|
|
i915_gem_object_do_bit_17_swizzle(obj);
|
|
|
|
|
|
+ list_add_tail(&obj->gtt_list, &dev_priv->mm.unbound_list);
|
|
|
return 0;
|
|
|
|
|
|
err_pages:
|
|
@@ -1698,6 +1791,7 @@ void i915_gem_reset(struct drm_device *dev)
|
|
|
obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
|
|
|
}
|
|
|
|
|
|
+
|
|
|
/* The fence registers are invalidated so clear them out */
|
|
|
i915_gem_reset_fences(dev);
|
|
|
}
|
|
@@ -2209,22 +2303,6 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
|
|
|
|
|
|
i915_gem_object_finish_gtt(obj);
|
|
|
|
|
|
- /* Move the object to the CPU domain to ensure that
|
|
|
- * any possible CPU writes while it's not in the GTT
|
|
|
- * are flushed when we go to remap it.
|
|
|
- */
|
|
|
- if (ret == 0)
|
|
|
- ret = i915_gem_object_set_to_cpu_domain(obj, 1);
|
|
|
- if (ret == -ERESTARTSYS)
|
|
|
- return ret;
|
|
|
- if (ret) {
|
|
|
- /* In the event of a disaster, abandon all caches and
|
|
|
- * hope for the best.
|
|
|
- */
|
|
|
- i915_gem_clflush_object(obj);
|
|
|
- obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
|
|
|
- }
|
|
|
-
|
|
|
/* release the fence reg _after_ flushing */
|
|
|
ret = i915_gem_object_put_fence(obj);
|
|
|
if (ret)
|
|
@@ -2240,10 +2318,8 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
|
|
|
}
|
|
|
i915_gem_gtt_finish_object(obj);
|
|
|
|
|
|
- i915_gem_object_put_pages_gtt(obj);
|
|
|
-
|
|
|
- list_del_init(&obj->gtt_list);
|
|
|
- list_del_init(&obj->mm_list);
|
|
|
+ list_del(&obj->mm_list);
|
|
|
+ list_move_tail(&obj->gtt_list, &dev_priv->mm.unbound_list);
|
|
|
/* Avoid an unnecessary call to unbind on rebind. */
|
|
|
obj->map_and_fenceable = true;
|
|
|
|
|
@@ -2251,10 +2327,7 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
|
|
|
obj->gtt_space = NULL;
|
|
|
obj->gtt_offset = 0;
|
|
|
|
|
|
- if (i915_gem_object_is_purgeable(obj))
|
|
|
- i915_gem_object_truncate(obj);
|
|
|
-
|
|
|
- return ret;
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
static int i915_ring_idle(struct intel_ring_buffer *ring)
|
|
@@ -2667,7 +2740,6 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
|
|
|
struct drm_device *dev = obj->base.dev;
|
|
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
|
|
struct drm_mm_node *free_space;
|
|
|
- gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
|
|
|
u32 size, fence_size, fence_alignment, unfenced_alignment;
|
|
|
bool mappable, fenceable;
|
|
|
int ret;
|
|
@@ -2707,6 +2779,10 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
|
|
|
return -E2BIG;
|
|
|
}
|
|
|
|
|
|
+ ret = i915_gem_object_get_pages_gtt(obj);
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
+
|
|
|
search_free:
|
|
|
if (map_and_fenceable)
|
|
|
free_space =
|
|
@@ -2733,9 +2809,6 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
|
|
|
false);
|
|
|
}
|
|
|
if (obj->gtt_space == NULL) {
|
|
|
- /* If the gtt is empty and we're still having trouble
|
|
|
- * fitting our object in, we're out of memory.
|
|
|
- */
|
|
|
ret = i915_gem_evict_something(dev, size, alignment,
|
|
|
obj->cache_level,
|
|
|
map_and_fenceable);
|
|
@@ -2752,55 +2825,20 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
|
|
|
- ret = i915_gem_object_get_pages_gtt(obj, gfpmask);
|
|
|
- if (ret) {
|
|
|
- drm_mm_put_block(obj->gtt_space);
|
|
|
- obj->gtt_space = NULL;
|
|
|
-
|
|
|
- if (ret == -ENOMEM) {
|
|
|
- /* first try to reclaim some memory by clearing the GTT */
|
|
|
- ret = i915_gem_evict_everything(dev, false);
|
|
|
- if (ret) {
|
|
|
- /* now try to shrink everyone else */
|
|
|
- if (gfpmask) {
|
|
|
- gfpmask = 0;
|
|
|
- goto search_free;
|
|
|
- }
|
|
|
-
|
|
|
- return -ENOMEM;
|
|
|
- }
|
|
|
-
|
|
|
- goto search_free;
|
|
|
- }
|
|
|
-
|
|
|
- return ret;
|
|
|
- }
|
|
|
|
|
|
ret = i915_gem_gtt_prepare_object(obj);
|
|
|
if (ret) {
|
|
|
- i915_gem_object_put_pages_gtt(obj);
|
|
|
drm_mm_put_block(obj->gtt_space);
|
|
|
obj->gtt_space = NULL;
|
|
|
-
|
|
|
- if (i915_gem_evict_everything(dev, false))
|
|
|
- return ret;
|
|
|
-
|
|
|
- goto search_free;
|
|
|
+ return ret;
|
|
|
}
|
|
|
|
|
|
if (!dev_priv->mm.aliasing_ppgtt)
|
|
|
i915_gem_gtt_bind_object(obj, obj->cache_level);
|
|
|
|
|
|
- list_add_tail(&obj->gtt_list, &dev_priv->mm.gtt_list);
|
|
|
+ list_move_tail(&obj->gtt_list, &dev_priv->mm.bound_list);
|
|
|
list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
|
|
|
|
|
|
- /* Assert that the object is not currently in any GPU domain. As it
|
|
|
- * wasn't in the GTT, there shouldn't be any way it could have been in
|
|
|
- * a GPU cache
|
|
|
- */
|
|
|
- BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
|
|
|
- BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
|
|
|
-
|
|
|
obj->gtt_offset = obj->gtt_space->start;
|
|
|
|
|
|
fenceable =
|
|
@@ -3464,9 +3502,8 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
|
|
|
if (obj->madv != __I915_MADV_PURGED)
|
|
|
obj->madv = args->madv;
|
|
|
|
|
|
- /* if the object is no longer bound, discard its backing storage */
|
|
|
- if (i915_gem_object_is_purgeable(obj) &&
|
|
|
- obj->gtt_space == NULL)
|
|
|
+ /* if the object is no longer attached, discard its backing storage */
|
|
|
+ if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL)
|
|
|
i915_gem_object_truncate(obj);
|
|
|
|
|
|
args->retained = obj->madv != __I915_MADV_PURGED;
|
|
@@ -3573,6 +3610,7 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
|
|
|
dev_priv->mm.interruptible = was_interruptible;
|
|
|
}
|
|
|
|
|
|
+ i915_gem_object_put_pages_gtt(obj);
|
|
|
if (obj->base.map_list.map)
|
|
|
drm_gem_free_mmap_offset(&obj->base);
|
|
|
|
|
@@ -3605,7 +3643,7 @@ i915_gem_idle(struct drm_device *dev)
|
|
|
|
|
|
/* Under UMS, be paranoid and evict. */
|
|
|
if (!drm_core_check_feature(dev, DRIVER_MODESET))
|
|
|
- i915_gem_evict_everything(dev, false);
|
|
|
+ i915_gem_evict_everything(dev);
|
|
|
|
|
|
i915_gem_reset_fences(dev);
|
|
|
|
|
@@ -3963,8 +4001,9 @@ i915_gem_load(struct drm_device *dev)
|
|
|
|
|
|
INIT_LIST_HEAD(&dev_priv->mm.active_list);
|
|
|
INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
|
|
|
+ INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
|
|
|
+ INIT_LIST_HEAD(&dev_priv->mm.bound_list);
|
|
|
INIT_LIST_HEAD(&dev_priv->mm.fence_list);
|
|
|
- INIT_LIST_HEAD(&dev_priv->mm.gtt_list);
|
|
|
for (i = 0; i < I915_NUM_RINGS; i++)
|
|
|
init_ring_lists(&dev_priv->ring[i]);
|
|
|
for (i = 0; i < I915_MAX_NUM_FENCES; i++)
|
|
@@ -4208,13 +4247,6 @@ void i915_gem_release(struct drm_device *dev, struct drm_file *file)
|
|
|
spin_unlock(&file_priv->mm.lock);
|
|
|
}
|
|
|
|
|
|
-static int
|
|
|
-i915_gpu_is_active(struct drm_device *dev)
|
|
|
-{
|
|
|
- drm_i915_private_t *dev_priv = dev->dev_private;
|
|
|
- return !list_empty(&dev_priv->mm.active_list);
|
|
|
-}
|
|
|
-
|
|
|
static int
|
|
|
i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
|
|
|
{
|
|
@@ -4223,60 +4255,26 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
|
|
|
struct drm_i915_private,
|
|
|
mm.inactive_shrinker);
|
|
|
struct drm_device *dev = dev_priv->dev;
|
|
|
- struct drm_i915_gem_object *obj, *next;
|
|
|
+ struct drm_i915_gem_object *obj;
|
|
|
int nr_to_scan = sc->nr_to_scan;
|
|
|
int cnt;
|
|
|
|
|
|
if (!mutex_trylock(&dev->struct_mutex))
|
|
|
return 0;
|
|
|
|
|
|
- /* "fast-path" to count number of available objects */
|
|
|
- if (nr_to_scan == 0) {
|
|
|
- cnt = 0;
|
|
|
- list_for_each_entry(obj,
|
|
|
- &dev_priv->mm.inactive_list,
|
|
|
- mm_list)
|
|
|
- cnt++;
|
|
|
- mutex_unlock(&dev->struct_mutex);
|
|
|
- return cnt / 100 * sysctl_vfs_cache_pressure;
|
|
|
+ if (nr_to_scan) {
|
|
|
+ nr_to_scan -= i915_gem_purge(dev_priv, nr_to_scan);
|
|
|
+ if (nr_to_scan > 0)
|
|
|
+ i915_gem_shrink_all(dev_priv);
|
|
|
}
|
|
|
|
|
|
-rescan:
|
|
|
- /* first scan for clean buffers */
|
|
|
- i915_gem_retire_requests(dev);
|
|
|
-
|
|
|
- list_for_each_entry_safe(obj, next,
|
|
|
- &dev_priv->mm.inactive_list,
|
|
|
- mm_list) {
|
|
|
- if (i915_gem_object_is_purgeable(obj)) {
|
|
|
- if (i915_gem_object_unbind(obj) == 0 &&
|
|
|
- --nr_to_scan == 0)
|
|
|
- break;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- /* second pass, evict/count anything still on the inactive list */
|
|
|
cnt = 0;
|
|
|
- list_for_each_entry_safe(obj, next,
|
|
|
- &dev_priv->mm.inactive_list,
|
|
|
- mm_list) {
|
|
|
- if (nr_to_scan &&
|
|
|
- i915_gem_object_unbind(obj) == 0)
|
|
|
- nr_to_scan--;
|
|
|
- else
|
|
|
- cnt++;
|
|
|
- }
|
|
|
+ list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list)
|
|
|
+ cnt += obj->base.size >> PAGE_SHIFT;
|
|
|
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
|
|
|
+ if (obj->pin_count == 0)
|
|
|
+ cnt += obj->base.size >> PAGE_SHIFT;
|
|
|
|
|
|
- if (nr_to_scan && i915_gpu_is_active(dev)) {
|
|
|
- /*
|
|
|
- * We are desperate for pages, so as a last resort, wait
|
|
|
- * for the GPU to finish and discard whatever we can.
|
|
|
- * This has a dramatic impact to reduce the number of
|
|
|
- * OOM-killer events whilst running the GPU aggressively.
|
|
|
- */
|
|
|
- if (i915_gpu_idle(dev) == 0)
|
|
|
- goto rescan;
|
|
|
- }
|
|
|
mutex_unlock(&dev->struct_mutex);
|
|
|
- return cnt / 100 * sysctl_vfs_cache_pressure;
|
|
|
+ return cnt;
|
|
|
}
|