|
@@ -1692,6 +1692,7 @@ __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
|
|
|
bool purgeable_only)
|
|
|
{
|
|
|
struct drm_i915_gem_object *obj, *next;
|
|
|
+ struct i915_address_space *vm = &dev_priv->gtt.base;
|
|
|
long count = 0;
|
|
|
|
|
|
list_for_each_entry_safe(obj, next,
|
|
@@ -1705,9 +1706,7 @@ __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- list_for_each_entry_safe(obj, next,
|
|
|
- &dev_priv->mm.inactive_list,
|
|
|
- mm_list) {
|
|
|
+ list_for_each_entry_safe(obj, next, &vm->inactive_list, mm_list) {
|
|
|
if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
|
|
|
i915_gem_object_unbind(obj) == 0 &&
|
|
|
i915_gem_object_put_pages(obj) == 0) {
|
|
@@ -1878,6 +1877,7 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
|
|
|
{
|
|
|
struct drm_device *dev = obj->base.dev;
|
|
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
|
+ struct i915_address_space *vm = &dev_priv->gtt.base;
|
|
|
u32 seqno = intel_ring_get_seqno(ring);
|
|
|
|
|
|
BUG_ON(ring == NULL);
|
|
@@ -1890,7 +1890,7 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
|
|
|
}
|
|
|
|
|
|
/* Move from whatever list we were on to the tail of execution. */
|
|
|
- list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
|
|
|
+ list_move_tail(&obj->mm_list, &vm->active_list);
|
|
|
list_move_tail(&obj->ring_list, &ring->active_list);
|
|
|
|
|
|
obj->last_read_seqno = seqno;
|
|
@@ -1914,11 +1914,12 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
|
|
|
{
|
|
|
struct drm_device *dev = obj->base.dev;
|
|
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
|
+ struct i915_address_space *vm = &dev_priv->gtt.base;
|
|
|
|
|
|
BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
|
|
|
BUG_ON(!obj->active);
|
|
|
|
|
|
- list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
|
|
|
+ list_move_tail(&obj->mm_list, &vm->inactive_list);
|
|
|
|
|
|
list_del_init(&obj->ring_list);
|
|
|
obj->ring = NULL;
|
|
@@ -2270,6 +2271,7 @@ static void i915_gem_reset_fences(struct drm_device *dev)
|
|
|
void i915_gem_reset(struct drm_device *dev)
|
|
|
{
|
|
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
|
+ struct i915_address_space *vm = &dev_priv->gtt.base;
|
|
|
struct drm_i915_gem_object *obj;
|
|
|
struct intel_ring_buffer *ring;
|
|
|
int i;
|
|
@@ -2280,12 +2282,8 @@ void i915_gem_reset(struct drm_device *dev)
|
|
|
/* Move everything out of the GPU domains to ensure we do any
|
|
|
* necessary invalidation upon reuse.
|
|
|
*/
|
|
|
- list_for_each_entry(obj,
|
|
|
- &dev_priv->mm.inactive_list,
|
|
|
- mm_list)
|
|
|
- {
|
|
|
+ list_for_each_entry(obj, &vm->inactive_list, mm_list)
|
|
|
obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
|
|
|
- }
|
|
|
|
|
|
/* The fence registers are invalidated so clear them out */
|
|
|
i915_gem_reset_fences(dev);
|
|
@@ -3076,6 +3074,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
|
|
|
{
|
|
|
struct drm_device *dev = obj->base.dev;
|
|
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
|
|
+ struct i915_address_space *vm = &dev_priv->gtt.base;
|
|
|
u32 size, fence_size, fence_alignment, unfenced_alignment;
|
|
|
bool mappable, fenceable;
|
|
|
size_t gtt_max = map_and_fenceable ?
|
|
@@ -3151,7 +3150,7 @@ search_free:
|
|
|
}
|
|
|
|
|
|
list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
|
|
|
- list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
|
|
|
+ list_add_tail(&obj->mm_list, &vm->inactive_list);
|
|
|
|
|
|
fenceable =
|
|
|
i915_gem_obj_ggtt_size(obj) == fence_size &&
|
|
@@ -3299,7 +3298,8 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
|
|
|
|
|
|
/* And bump the LRU for this access */
|
|
|
if (i915_gem_object_is_inactive(obj))
|
|
|
- list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
|
|
|
+ list_move_tail(&obj->mm_list,
|
|
|
+ &dev_priv->gtt.base.inactive_list);
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
@@ -4242,7 +4242,7 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
- BUG_ON(!list_empty(&dev_priv->mm.active_list));
|
|
|
+ BUG_ON(!list_empty(&dev_priv->gtt.base.active_list));
|
|
|
mutex_unlock(&dev->struct_mutex);
|
|
|
|
|
|
ret = drm_irq_install(dev);
|
|
@@ -4320,8 +4320,8 @@ i915_gem_load(struct drm_device *dev)
|
|
|
SLAB_HWCACHE_ALIGN,
|
|
|
NULL);
|
|
|
|
|
|
- INIT_LIST_HEAD(&dev_priv->mm.active_list);
|
|
|
- INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
|
|
|
+ INIT_LIST_HEAD(&dev_priv->gtt.base.active_list);
|
|
|
+ INIT_LIST_HEAD(&dev_priv->gtt.base.inactive_list);
|
|
|
INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
|
|
|
INIT_LIST_HEAD(&dev_priv->mm.bound_list);
|
|
|
INIT_LIST_HEAD(&dev_priv->mm.fence_list);
|
|
@@ -4591,6 +4591,7 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
|
|
|
struct drm_i915_private,
|
|
|
mm.inactive_shrinker);
|
|
|
struct drm_device *dev = dev_priv->dev;
|
|
|
+ struct i915_address_space *vm = &dev_priv->gtt.base;
|
|
|
struct drm_i915_gem_object *obj;
|
|
|
int nr_to_scan = sc->nr_to_scan;
|
|
|
bool unlock = true;
|
|
@@ -4619,7 +4620,7 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
|
|
|
list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
|
|
|
if (obj->pages_pin_count == 0)
|
|
|
cnt += obj->base.size >> PAGE_SHIFT;
|
|
|
- list_for_each_entry(obj, &dev_priv->mm.inactive_list, global_list)
|
|
|
+ list_for_each_entry(obj, &vm->inactive_list, global_list)
|
|
|
if (obj->pin_count == 0 && obj->pages_pin_count == 0)
|
|
|
cnt += obj->base.size >> PAGE_SHIFT;
|
|
|
|