|
@@ -256,7 +256,7 @@ out_unlock:
|
|
|
* the page lock and the glock) and return having done no I/O. Its
|
|
|
* obviously not something we'd want to do on too regular a basis.
|
|
|
* Any I/O we ignore at this time will be done via readpage later.
|
|
|
- * 2. We have to handle stuffed files here too.
|
|
|
+ * 2. We don't handle stuffed files here we let readpage do the honours.
|
|
|
* 3. mpage_readpages() does most of the heavy lifting in the common case.
|
|
|
* 4. gfs2_get_block() is relied upon to set BH_Boundary in the right places.
|
|
|
* 5. We use LM_FLAG_TRY_1CB here, effectively we then have lock-ahead as
|
|
@@ -269,8 +269,7 @@ static int gfs2_readpages(struct file *file, struct address_space *mapping,
|
|
|
struct gfs2_inode *ip = GFS2_I(inode);
|
|
|
struct gfs2_sbd *sdp = GFS2_SB(inode);
|
|
|
struct gfs2_holder gh;
|
|
|
- unsigned page_idx;
|
|
|
- int ret;
|
|
|
+ int ret = 0;
|
|
|
int do_unlock = 0;
|
|
|
|
|
|
if (likely(file != &gfs2_internal_file_sentinel)) {
|
|
@@ -289,29 +288,8 @@ static int gfs2_readpages(struct file *file, struct address_space *mapping,
|
|
|
goto out_unlock;
|
|
|
}
|
|
|
skip_lock:
|
|
|
- if (gfs2_is_stuffed(ip)) {
|
|
|
- struct pagevec lru_pvec;
|
|
|
- pagevec_init(&lru_pvec, 0);
|
|
|
- for (page_idx = 0; page_idx < nr_pages; page_idx++) {
|
|
|
- struct page *page = list_entry(pages->prev, struct page, lru);
|
|
|
- prefetchw(&page->flags);
|
|
|
- list_del(&page->lru);
|
|
|
- if (!add_to_page_cache(page, mapping,
|
|
|
- page->index, GFP_KERNEL)) {
|
|
|
- ret = stuffed_readpage(ip, page);
|
|
|
- unlock_page(page);
|
|
|
- if (!pagevec_add(&lru_pvec, page))
|
|
|
- __pagevec_lru_add(&lru_pvec);
|
|
|
- } else {
|
|
|
- page_cache_release(page);
|
|
|
- }
|
|
|
- }
|
|
|
- pagevec_lru_add(&lru_pvec);
|
|
|
- ret = 0;
|
|
|
- } else {
|
|
|
- /* What we really want to do .... */
|
|
|
+ if (!gfs2_is_stuffed(ip))
|
|
|
ret = mpage_readpages(mapping, pages, nr_pages, gfs2_get_block);
|
|
|
- }
|
|
|
|
|
|
if (do_unlock) {
|
|
|
gfs2_glock_dq_m(1, &gh);
|