|
@@ -2690,6 +2690,7 @@ static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
|
|
|
eb = kmem_cache_zalloc(extent_buffer_cache, mask);
|
|
|
eb->start = start;
|
|
|
eb->len = len;
|
|
|
+ mutex_init(&eb->mutex);
|
|
|
spin_lock_irqsave(&leak_lock, flags);
|
|
|
list_add(&eb->leak_list, &buffers);
|
|
|
spin_unlock_irqrestore(&leak_lock, flags);
|
|
@@ -2837,6 +2838,7 @@ int clear_extent_buffer_dirty(struct extent_io_tree *tree,
|
|
|
|
|
|
for (i = 0; i < num_pages; i++) {
|
|
|
page = extent_buffer_page(eb, i);
|
|
|
+ lock_page(page);
|
|
|
if (i == 0)
|
|
|
set_page_extent_head(page, eb->len);
|
|
|
else
|
|
@@ -2854,6 +2856,7 @@ int clear_extent_buffer_dirty(struct extent_io_tree *tree,
|
|
|
end = start + PAGE_CACHE_SIZE - 1;
|
|
|
if (test_range_bit(tree, start, end,
|
|
|
EXTENT_DIRTY, 0)) {
|
|
|
+ unlock_page(page);
|
|
|
continue;
|
|
|
}
|
|
|
}
|
|
@@ -2865,6 +2868,7 @@ int clear_extent_buffer_dirty(struct extent_io_tree *tree,
|
|
|
PAGECACHE_TAG_DIRTY);
|
|
|
}
|
|
|
read_unlock_irq(&page->mapping->tree_lock);
|
|
|
+ unlock_page(page);
|
|
|
}
|
|
|
return 0;
|
|
|
}
|
|
@@ -2893,12 +2897,17 @@ int set_extent_buffer_dirty(struct extent_io_tree *tree,
|
|
|
* on us if the page isn't already dirty.
|
|
|
*/
|
|
|
if (i == 0) {
|
|
|
+ lock_page(page);
|
|
|
set_page_extent_head(page, eb->len);
|
|
|
} else if (PagePrivate(page) &&
|
|
|
page->private != EXTENT_PAGE_PRIVATE) {
|
|
|
+ lock_page(page);
|
|
|
set_page_extent_mapped(page);
|
|
|
+ unlock_page(page);
|
|
|
}
|
|
|
__set_page_dirty_nobuffers(extent_buffer_page(eb, i));
|
|
|
+ if (i == 0)
|
|
|
+ unlock_page(page);
|
|
|
}
|
|
|
return set_extent_dirty(tree, eb->start,
|
|
|
eb->start + eb->len - 1, GFP_NOFS);
|