|
@@ -379,6 +379,29 @@ ondemand_readahead(struct address_space *mapping,
|
|
|
offset, req_size, 0);
|
|
|
}
|
|
|
|
|
|
+ /*
|
|
|
+ * Hit a marked page without valid readahead state.
|
|
|
+ * E.g. interleaved reads.
|
|
|
+ * Query the pagecache for async_size, which normally equals to
|
|
|
+ * readahead size. Ramp it up and use it as the new readahead size.
|
|
|
+ */
|
|
|
+ if (hit_readahead_marker) {
|
|
|
+ pgoff_t start;
|
|
|
+
|
|
|
+ read_lock_irq(&mapping->tree_lock);
|
|
|
+ start = radix_tree_next_hole(&mapping->page_tree, offset, max+1);
|
|
|
+ read_unlock_irq(&mapping->tree_lock);
|
|
|
+
|
|
|
+ if (!start || start - offset > max)
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ ra->start = start;
|
|
|
+ ra->size = start - offset; /* old async_size */
|
|
|
+ ra->size = get_next_ra_size(ra, max);
|
|
|
+ ra->async_size = ra->size;
|
|
|
+ goto readit;
|
|
|
+ }
|
|
|
+
|
|
|
/*
|
|
|
* It may be one of
|
|
|
* - first read on start of file
|
|
@@ -390,16 +413,6 @@ ondemand_readahead(struct address_space *mapping,
|
|
|
ra->size = get_init_ra_size(req_size, max);
|
|
|
ra->async_size = ra->size > req_size ? ra->size - req_size : ra->size;
|
|
|
|
|
|
- /*
|
|
|
- * Hit on a marked page without valid readahead state.
|
|
|
- * E.g. interleaved reads.
|
|
|
- * Not knowing its readahead pos/size, bet on the minimal possible one.
|
|
|
- */
|
|
|
- if (hit_readahead_marker) {
|
|
|
- ra->start++;
|
|
|
- ra->size = get_next_ra_size(ra, max);
|
|
|
- }
|
|
|
-
|
|
|
readit:
|
|
|
return ra_submit(ra, mapping, filp);
|
|
|
}
|