|
@@ -343,7 +343,7 @@ shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
|
|
|
page_length);
|
|
|
kunmap_atomic(vaddr);
|
|
|
|
|
|
- return ret;
|
|
|
+ return ret ? -EFAULT : 0;
|
|
|
}
|
|
|
|
|
|
static void
|
|
@@ -394,7 +394,7 @@ shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
|
|
|
page_length);
|
|
|
kunmap(page);
|
|
|
|
|
|
- return ret;
|
|
|
+ return ret ? - EFAULT : 0;
|
|
|
}
|
|
|
|
|
|
static int
|
|
@@ -403,7 +403,6 @@ i915_gem_shmem_pread(struct drm_device *dev,
|
|
|
struct drm_i915_gem_pread *args,
|
|
|
struct drm_file *file)
|
|
|
{
|
|
|
- struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
|
|
|
char __user *user_data;
|
|
|
ssize_t remain;
|
|
|
loff_t offset;
|
|
@@ -412,7 +411,6 @@ i915_gem_shmem_pread(struct drm_device *dev,
|
|
|
int hit_slowpath = 0;
|
|
|
int prefaulted = 0;
|
|
|
int needs_clflush = 0;
|
|
|
- int release_page;
|
|
|
|
|
|
user_data = (char __user *) (uintptr_t) args->data_ptr;
|
|
|
remain = args->size;
|
|
@@ -433,6 +431,12 @@ i915_gem_shmem_pread(struct drm_device *dev,
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+ ret = i915_gem_object_get_pages(obj);
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
+
|
|
|
+ i915_gem_object_pin_pages(obj);
|
|
|
+
|
|
|
offset = args->offset;
|
|
|
|
|
|
while (remain > 0) {
|
|
@@ -448,18 +452,7 @@ i915_gem_shmem_pread(struct drm_device *dev,
|
|
|
if ((shmem_page_offset + page_length) > PAGE_SIZE)
|
|
|
page_length = PAGE_SIZE - shmem_page_offset;
|
|
|
|
|
|
- if (obj->pages) {
|
|
|
- page = obj->pages[offset >> PAGE_SHIFT];
|
|
|
- release_page = 0;
|
|
|
- } else {
|
|
|
- page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
|
|
|
- if (IS_ERR(page)) {
|
|
|
- ret = PTR_ERR(page);
|
|
|
- goto out;
|
|
|
- }
|
|
|
- release_page = 1;
|
|
|
- }
|
|
|
-
|
|
|
+ page = obj->pages[offset >> PAGE_SHIFT];
|
|
|
page_do_bit17_swizzling = obj_do_bit17_swizzling &&
|
|
|
(page_to_phys(page) & (1 << 17)) != 0;
|
|
|
|
|
@@ -470,7 +463,6 @@ i915_gem_shmem_pread(struct drm_device *dev,
|
|
|
goto next_page;
|
|
|
|
|
|
hit_slowpath = 1;
|
|
|
- page_cache_get(page);
|
|
|
mutex_unlock(&dev->struct_mutex);
|
|
|
|
|
|
if (!prefaulted) {
|
|
@@ -488,16 +480,12 @@ i915_gem_shmem_pread(struct drm_device *dev,
|
|
|
needs_clflush);
|
|
|
|
|
|
mutex_lock(&dev->struct_mutex);
|
|
|
- page_cache_release(page);
|
|
|
+
|
|
|
next_page:
|
|
|
mark_page_accessed(page);
|
|
|
- if (release_page)
|
|
|
- page_cache_release(page);
|
|
|
|
|
|
- if (ret) {
|
|
|
- ret = -EFAULT;
|
|
|
+ if (ret)
|
|
|
goto out;
|
|
|
- }
|
|
|
|
|
|
remain -= page_length;
|
|
|
user_data += page_length;
|
|
@@ -505,6 +493,8 @@ next_page:
|
|
|
}
|
|
|
|
|
|
out:
|
|
|
+ i915_gem_object_unpin_pages(obj);
|
|
|
+
|
|
|
if (hit_slowpath) {
|
|
|
/* Fixup: Kill any reinstated backing storage pages */
|
|
|
if (obj->madv == __I915_MADV_PURGED)
|