|
@@ -277,7 +277,7 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
|
|
|
|
|
|
mutex_lock(&dev->struct_mutex);
|
|
|
|
|
|
- ret = i915_gem_object_get_pages(obj);
|
|
|
+ ret = i915_gem_object_get_pages(obj, 0);
|
|
|
if (ret != 0)
|
|
|
goto fail_unlock;
|
|
|
|
|
@@ -321,40 +321,24 @@ fail_unlock:
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
-static inline gfp_t
|
|
|
-i915_gem_object_get_page_gfp_mask (struct drm_gem_object *obj)
|
|
|
-{
|
|
|
- return mapping_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping);
|
|
|
-}
|
|
|
-
|
|
|
-static inline void
|
|
|
-i915_gem_object_set_page_gfp_mask (struct drm_gem_object *obj, gfp_t gfp)
|
|
|
-{
|
|
|
- mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping, gfp);
|
|
|
-}
|
|
|
-
|
|
|
static int
|
|
|
i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj)
|
|
|
{
|
|
|
int ret;
|
|
|
|
|
|
- ret = i915_gem_object_get_pages(obj);
|
|
|
+ ret = i915_gem_object_get_pages(obj, __GFP_NORETRY | __GFP_NOWARN);
|
|
|
|
|
|
/* If we've insufficient memory to map in the pages, attempt
|
|
|
* to make some space by throwing out some old buffers.
|
|
|
*/
|
|
|
if (ret == -ENOMEM) {
|
|
|
struct drm_device *dev = obj->dev;
|
|
|
- gfp_t gfp;
|
|
|
|
|
|
ret = i915_gem_evict_something(dev, obj->size);
|
|
|
if (ret)
|
|
|
return ret;
|
|
|
|
|
|
- gfp = i915_gem_object_get_page_gfp_mask(obj);
|
|
|
- i915_gem_object_set_page_gfp_mask(obj, gfp & ~__GFP_NORETRY);
|
|
|
- ret = i915_gem_object_get_pages(obj);
|
|
|
- i915_gem_object_set_page_gfp_mask (obj, gfp);
|
|
|
+ ret = i915_gem_object_get_pages(obj, 0);
|
|
|
}
|
|
|
|
|
|
return ret;
|
|
@@ -790,7 +774,7 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
|
|
|
|
|
|
mutex_lock(&dev->struct_mutex);
|
|
|
|
|
|
- ret = i915_gem_object_get_pages(obj);
|
|
|
+ ret = i915_gem_object_get_pages(obj, 0);
|
|
|
if (ret != 0)
|
|
|
goto fail_unlock;
|
|
|
|
|
@@ -2230,7 +2214,8 @@ i915_gem_evict_something(struct drm_device *dev, int min_size)
|
|
|
}
|
|
|
|
|
|
int
|
|
|
-i915_gem_object_get_pages(struct drm_gem_object *obj)
|
|
|
+i915_gem_object_get_pages(struct drm_gem_object *obj,
|
|
|
+ gfp_t gfpmask)
|
|
|
{
|
|
|
struct drm_i915_gem_object *obj_priv = obj->driver_private;
|
|
|
int page_count, i;
|
|
@@ -2256,7 +2241,10 @@ i915_gem_object_get_pages(struct drm_gem_object *obj)
|
|
|
inode = obj->filp->f_path.dentry->d_inode;
|
|
|
mapping = inode->i_mapping;
|
|
|
for (i = 0; i < page_count; i++) {
|
|
|
- page = read_mapping_page(mapping, i, NULL);
|
|
|
+ page = read_cache_page_gfp(mapping, i,
|
|
|
+ mapping_gfp_mask (mapping) |
|
|
|
+ __GFP_COLD |
|
|
|
+ gfpmask);
|
|
|
if (IS_ERR(page)) {
|
|
|
ret = PTR_ERR(page);
|
|
|
i915_gem_object_put_pages(obj);
|
|
@@ -2579,7 +2567,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
|
|
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
|
|
struct drm_i915_gem_object *obj_priv = obj->driver_private;
|
|
|
struct drm_mm_node *free_space;
|
|
|
- bool retry_alloc = false;
|
|
|
+ gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
|
|
|
int ret;
|
|
|
|
|
|
if (obj_priv->madv != I915_MADV_WILLNEED) {
|
|
@@ -2623,15 +2611,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
|
|
|
DRM_INFO("Binding object of size %zd at 0x%08x\n",
|
|
|
obj->size, obj_priv->gtt_offset);
|
|
|
#endif
|
|
|
- if (retry_alloc) {
|
|
|
- i915_gem_object_set_page_gfp_mask (obj,
|
|
|
- i915_gem_object_get_page_gfp_mask (obj) & ~__GFP_NORETRY);
|
|
|
- }
|
|
|
- ret = i915_gem_object_get_pages(obj);
|
|
|
- if (retry_alloc) {
|
|
|
- i915_gem_object_set_page_gfp_mask (obj,
|
|
|
- i915_gem_object_get_page_gfp_mask (obj) | __GFP_NORETRY);
|
|
|
- }
|
|
|
+ ret = i915_gem_object_get_pages(obj, gfpmask);
|
|
|
if (ret) {
|
|
|
drm_mm_put_block(obj_priv->gtt_space);
|
|
|
obj_priv->gtt_space = NULL;
|
|
@@ -2641,9 +2621,9 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
|
|
|
ret = i915_gem_evict_something(dev, obj->size);
|
|
|
if (ret) {
|
|
|
/* now try to shrink everyone else */
|
|
|
- if (! retry_alloc) {
|
|
|
- retry_alloc = true;
|
|
|
- goto search_free;
|
|
|
+ if (gfpmask) {
|
|
|
+ gfpmask = 0;
|
|
|
+ goto search_free;
|
|
|
}
|
|
|
|
|
|
return ret;
|
|
@@ -4946,7 +4926,7 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
|
|
|
if (!obj_priv->phys_obj)
|
|
|
return;
|
|
|
|
|
|
- ret = i915_gem_object_get_pages(obj);
|
|
|
+ ret = i915_gem_object_get_pages(obj, 0);
|
|
|
if (ret)
|
|
|
goto out;
|
|
|
|
|
@@ -5004,7 +4984,7 @@ i915_gem_attach_phys_object(struct drm_device *dev,
|
|
|
obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1];
|
|
|
obj_priv->phys_obj->cur_obj = obj;
|
|
|
|
|
|
- ret = i915_gem_object_get_pages(obj);
|
|
|
+ ret = i915_gem_object_get_pages(obj, 0);
|
|
|
if (ret) {
|
|
|
DRM_ERROR("failed to get page list\n");
|
|
|
goto out;
|