|
@@ -891,15 +891,20 @@ void do_generic_mapping_read(struct address_space *mapping,
|
|
|
unsigned long nr, ret;
|
|
|
|
|
|
cond_resched();
|
|
|
- if (index == next_index)
|
|
|
- next_index = page_cache_readahead(mapping, &ra, filp,
|
|
|
- index, last_index - index);
|
|
|
-
|
|
|
find_page:
|
|
|
page = find_get_page(mapping, index);
|
|
|
- if (unlikely(page == NULL)) {
|
|
|
- handle_ra_miss(mapping, &ra, index);
|
|
|
- goto no_cached_page;
|
|
|
+ if (!page) {
|
|
|
+ page_cache_readahead_ondemand(mapping,
|
|
|
+ &ra, filp, page,
|
|
|
+ index, last_index - index);
|
|
|
+ page = find_get_page(mapping, index);
|
|
|
+ if (unlikely(page == NULL))
|
|
|
+ goto no_cached_page;
|
|
|
+ }
|
|
|
+ if (PageReadahead(page)) {
|
|
|
+ page_cache_readahead_ondemand(mapping,
|
|
|
+ &ra, filp, page,
|
|
|
+ index, last_index - index);
|
|
|
}
|
|
|
if (!PageUptodate(page))
|
|
|
goto page_not_up_to_date;
|
|
@@ -1051,6 +1056,7 @@ no_cached_page:
|
|
|
|
|
|
out:
|
|
|
*_ra = ra;
|
|
|
+ _ra->prev_index = prev_index;
|
|
|
|
|
|
*ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
|
|
|
if (cached_page)
|
|
@@ -1332,27 +1338,31 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
|
|
if (VM_RandomReadHint(vma))
|
|
|
goto no_cached_page;
|
|
|
|
|
|
- /*
|
|
|
- * The readahead code wants to be told about each and every page
|
|
|
- * so it can build and shrink its windows appropriately
|
|
|
- *
|
|
|
- * For sequential accesses, we use the generic readahead logic.
|
|
|
- */
|
|
|
- if (VM_SequentialReadHint(vma))
|
|
|
- page_cache_readahead(mapping, ra, file, vmf->pgoff, 1);
|
|
|
-
|
|
|
/*
|
|
|
* Do we have something in the page cache already?
|
|
|
*/
|
|
|
retry_find:
|
|
|
page = find_lock_page(mapping, vmf->pgoff);
|
|
|
+ /*
|
|
|
+ * For sequential accesses, we use the generic readahead logic.
|
|
|
+ */
|
|
|
+ if (VM_SequentialReadHint(vma)) {
|
|
|
+ if (!page) {
|
|
|
+ page_cache_readahead_ondemand(mapping, ra, file, page,
|
|
|
+ vmf->pgoff, 1);
|
|
|
+ page = find_lock_page(mapping, vmf->pgoff);
|
|
|
+ if (!page)
|
|
|
+ goto no_cached_page;
|
|
|
+ }
|
|
|
+ if (PageReadahead(page)) {
|
|
|
+ page_cache_readahead_ondemand(mapping, ra, file, page,
|
|
|
+ vmf->pgoff, 1);
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
if (!page) {
|
|
|
unsigned long ra_pages;
|
|
|
|
|
|
- if (VM_SequentialReadHint(vma)) {
|
|
|
- handle_ra_miss(mapping, ra, vmf->pgoff);
|
|
|
- goto no_cached_page;
|
|
|
- }
|
|
|
ra->mmap_miss++;
|
|
|
|
|
|
/*
|
|
@@ -1405,6 +1415,7 @@ retry_find:
|
|
|
* Found the page and have a reference on it.
|
|
|
*/
|
|
|
mark_page_accessed(page);
|
|
|
+ ra->prev_index = page->index;
|
|
|
vmf->page = page;
|
|
|
return ret | VM_FAULT_LOCKED;
|
|
|
|