|
@@ -2726,11 +2726,11 @@ void set_page_extent_mapped(struct page *page)
|
|
|
* handlers)
|
|
|
* XXX JDM: This needs looking at to ensure proper page locking
|
|
|
*/
|
|
|
-static int __extent_read_full_page(struct extent_io_tree *tree,
|
|
|
- struct page *page,
|
|
|
- get_extent_t *get_extent,
|
|
|
- struct bio **bio, int mirror_num,
|
|
|
- unsigned long *bio_flags, int rw)
|
|
|
+static int __do_readpage(struct extent_io_tree *tree,
|
|
|
+ struct page *page,
|
|
|
+ get_extent_t *get_extent,
|
|
|
+ struct bio **bio, int mirror_num,
|
|
|
+ unsigned long *bio_flags, int rw)
|
|
|
{
|
|
|
struct inode *inode = page->mapping->host;
|
|
|
u64 start = page_offset(page);
|
|
@@ -2744,7 +2744,6 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
|
|
|
sector_t sector;
|
|
|
struct extent_map *em;
|
|
|
struct block_device *bdev;
|
|
|
- struct btrfs_ordered_extent *ordered;
|
|
|
int ret;
|
|
|
int nr = 0;
|
|
|
size_t pg_offset = 0;
|
|
@@ -2755,24 +2754,15 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
|
|
|
|
|
|
set_page_extent_mapped(page);
|
|
|
|
|
|
+ end = page_end;
|
|
|
if (!PageUptodate(page)) {
|
|
|
if (cleancache_get_page(page) == 0) {
|
|
|
BUG_ON(blocksize != PAGE_SIZE);
|
|
|
+ unlock_extent(tree, start, end);
|
|
|
goto out;
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- end = page_end;
|
|
|
- while (1) {
|
|
|
- lock_extent(tree, start, end);
|
|
|
- ordered = btrfs_lookup_ordered_extent(inode, start);
|
|
|
- if (!ordered)
|
|
|
- break;
|
|
|
- unlock_extent(tree, start, end);
|
|
|
- btrfs_start_ordered_extent(inode, ordered, 1);
|
|
|
- btrfs_put_ordered_extent(ordered);
|
|
|
- }
|
|
|
-
|
|
|
if (page->index == last_byte >> PAGE_CACHE_SHIFT) {
|
|
|
char *userpage;
|
|
|
size_t zero_offset = last_byte & (PAGE_CACHE_SIZE - 1);
|
|
@@ -2901,6 +2891,101 @@ out:
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+static inline void __do_contiguous_readpages(struct extent_io_tree *tree,
|
|
|
+ struct page *pages[], int nr_pages,
|
|
|
+ u64 start, u64 end,
|
|
|
+ get_extent_t *get_extent,
|
|
|
+ struct bio **bio, int mirror_num,
|
|
|
+ unsigned long *bio_flags, int rw)
|
|
|
+{
|
|
|
+ struct inode *inode;
|
|
|
+ struct btrfs_ordered_extent *ordered;
|
|
|
+ int index;
|
|
|
+
|
|
|
+ inode = pages[0]->mapping->host;
|
|
|
+ while (1) {
|
|
|
+ lock_extent(tree, start, end);
|
|
|
+ ordered = btrfs_lookup_ordered_range(inode, start,
|
|
|
+ end - start + 1);
|
|
|
+ if (!ordered)
|
|
|
+ break;
|
|
|
+ unlock_extent(tree, start, end);
|
|
|
+ btrfs_start_ordered_extent(inode, ordered, 1);
|
|
|
+ btrfs_put_ordered_extent(ordered);
|
|
|
+ }
|
|
|
+
|
|
|
+ for (index = 0; index < nr_pages; index++) {
|
|
|
+ __do_readpage(tree, pages[index], get_extent, bio, mirror_num,
|
|
|
+ bio_flags, rw);
|
|
|
+ page_cache_release(pages[index]);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+static void __extent_readpages(struct extent_io_tree *tree,
|
|
|
+ struct page *pages[],
|
|
|
+ int nr_pages, get_extent_t *get_extent,
|
|
|
+ struct bio **bio, int mirror_num,
|
|
|
+ unsigned long *bio_flags, int rw)
|
|
|
+{
|
|
|
+ u64 start;
|
|
|
+ u64 end = 0;
|
|
|
+ u64 page_start;
|
|
|
+ int index;
|
|
|
+ int first_index;
|
|
|
+
|
|
|
+ for (index = 0; index < nr_pages; index++) {
|
|
|
+ page_start = page_offset(pages[index]);
|
|
|
+ if (!end) {
|
|
|
+ start = page_start;
|
|
|
+ end = start + PAGE_CACHE_SIZE - 1;
|
|
|
+ first_index = index;
|
|
|
+ } else if (end + 1 == page_start) {
|
|
|
+ end += PAGE_CACHE_SIZE;
|
|
|
+ } else {
|
|
|
+ __do_contiguous_readpages(tree, &pages[first_index],
|
|
|
+ index - first_index, start,
|
|
|
+ end, get_extent, bio,
|
|
|
+ mirror_num, bio_flags, rw);
|
|
|
+ start = page_start;
|
|
|
+ end = start + PAGE_CACHE_SIZE - 1;
|
|
|
+ first_index = index;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ if (end)
|
|
|
+ __do_contiguous_readpages(tree, &pages[first_index],
|
|
|
+ index - first_index, start,
|
|
|
+ end, get_extent, bio,
|
|
|
+ mirror_num, bio_flags, rw);
|
|
|
+}
|
|
|
+
|
|
|
+static int __extent_read_full_page(struct extent_io_tree *tree,
|
|
|
+ struct page *page,
|
|
|
+ get_extent_t *get_extent,
|
|
|
+ struct bio **bio, int mirror_num,
|
|
|
+ unsigned long *bio_flags, int rw)
|
|
|
+{
|
|
|
+ struct inode *inode = page->mapping->host;
|
|
|
+ struct btrfs_ordered_extent *ordered;
|
|
|
+ u64 start = page_offset(page);
|
|
|
+ u64 end = start + PAGE_CACHE_SIZE - 1;
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ while (1) {
|
|
|
+ lock_extent(tree, start, end);
|
|
|
+ ordered = btrfs_lookup_ordered_extent(inode, start);
|
|
|
+ if (!ordered)
|
|
|
+ break;
|
|
|
+ unlock_extent(tree, start, end);
|
|
|
+ btrfs_start_ordered_extent(inode, ordered, 1);
|
|
|
+ btrfs_put_ordered_extent(ordered);
|
|
|
+ }
|
|
|
+
|
|
|
+ ret = __do_readpage(tree, page, get_extent, bio, mirror_num, bio_flags,
|
|
|
+ rw);
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
|
|
|
get_extent_t *get_extent, int mirror_num)
|
|
|
{
|
|
@@ -3751,7 +3836,6 @@ int extent_readpages(struct extent_io_tree *tree,
|
|
|
unsigned long bio_flags = 0;
|
|
|
struct page *pagepool[16];
|
|
|
struct page *page;
|
|
|
- int i = 0;
|
|
|
int nr = 0;
|
|
|
|
|
|
for (page_idx = 0; page_idx < nr_pages; page_idx++) {
|
|
@@ -3768,18 +3852,13 @@ int extent_readpages(struct extent_io_tree *tree,
|
|
|
pagepool[nr++] = page;
|
|
|
if (nr < ARRAY_SIZE(pagepool))
|
|
|
continue;
|
|
|
- for (i = 0; i < nr; i++) {
|
|
|
- __extent_read_full_page(tree, pagepool[i], get_extent,
|
|
|
- &bio, 0, &bio_flags, READ);
|
|
|
- page_cache_release(pagepool[i]);
|
|
|
- }
|
|
|
+ __extent_readpages(tree, pagepool, nr, get_extent,
|
|
|
+ &bio, 0, &bio_flags, READ);
|
|
|
nr = 0;
|
|
|
}
|
|
|
- for (i = 0; i < nr; i++) {
|
|
|
- __extent_read_full_page(tree, pagepool[i], get_extent,
|
|
|
- &bio, 0, &bio_flags, READ);
|
|
|
- page_cache_release(pagepool[i]);
|
|
|
- }
|
|
|
+ if (nr)
|
|
|
+ __extent_readpages(tree, pagepool, nr, get_extent,
|
|
|
+ &bio, 0, &bio_flags, READ);
|
|
|
|
|
|
BUG_ON(!list_empty(pages));
|
|
|
if (bio)
|