Răsfoiți Sursa

Btrfs: Use a mutex in the extent buffer for tree block locking

This replaces the use of the page cache lock bit for locking, which wasn't
suitable for block size < page size and couldn't be used recursively.

The mutexes alone don't fix either problem, but they are the first step.

Signed-off-by: Chris Mason <chris.mason@oracle.com>
Chris Mason 17 ani în urmă
părinte
comite
a61e6f29dc
4 a modificat fișierele cu 17 adăugiri și 13 ștergeri
  1. 1 6
      fs/btrfs/extent-tree.c
  2. 9 0
      fs/btrfs/extent_io.c
  3. 1 0
      fs/btrfs/extent_io.h
  4. 6 7
      fs/btrfs/locking.c

+ 1 - 6
fs/btrfs/extent-tree.c

@@ -1451,7 +1451,7 @@ static int pin_down_bytes(struct btrfs_root *root, u64 bytenr, u32 num_bytes,
 		struct extent_buffer *buf;
 		buf = btrfs_find_tree_block(root, bytenr, num_bytes);
 		if (buf) {
-			if (!btrfs_try_tree_lock(buf) &&
+			if (btrfs_try_tree_lock(buf) &&
 			    btrfs_buffer_uptodate(buf, 0)) {
 				u64 transid =
 				    root->fs_info->running_transaction->transid;
@@ -3345,11 +3345,6 @@ int btrfs_read_block_groups(struct btrfs_root *root)
 		set_state_private(block_group_cache, found_key.objectid,
 				  (unsigned long)cache);
 
-		/* hack for now */
-		if (cache->flags & BTRFS_BLOCK_GROUP_METADATA) {
-			cache_block_group(root->fs_info->extent_root,
-					  cache);
-		}
 		if (key.objectid >=
 		    btrfs_super_total_bytes(&info->super_copy))
 			break;

+ 9 - 0
fs/btrfs/extent_io.c

@@ -2690,6 +2690,7 @@ static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
 	eb = kmem_cache_zalloc(extent_buffer_cache, mask);
 	eb->start = start;
 	eb->len = len;
+	mutex_init(&eb->mutex);
 	spin_lock_irqsave(&leak_lock, flags);
 	list_add(&eb->leak_list, &buffers);
 	spin_unlock_irqrestore(&leak_lock, flags);
@@ -2837,6 +2838,7 @@ int clear_extent_buffer_dirty(struct extent_io_tree *tree,
 
 	for (i = 0; i < num_pages; i++) {
 		page = extent_buffer_page(eb, i);
+		lock_page(page);
 		if (i == 0)
 			set_page_extent_head(page, eb->len);
 		else
@@ -2854,6 +2856,7 @@ int clear_extent_buffer_dirty(struct extent_io_tree *tree,
 			end  = start + PAGE_CACHE_SIZE - 1;
 			if (test_range_bit(tree, start, end,
 					   EXTENT_DIRTY, 0)) {
+				unlock_page(page);
 				continue;
 			}
 		}
@@ -2865,6 +2868,7 @@ int clear_extent_buffer_dirty(struct extent_io_tree *tree,
 						PAGECACHE_TAG_DIRTY);
 		}
 		read_unlock_irq(&page->mapping->tree_lock);
+		unlock_page(page);
 	}
 	return 0;
 }
@@ -2893,12 +2897,17 @@ int set_extent_buffer_dirty(struct extent_io_tree *tree,
 		 * on us if the page isn't already dirty.
 		 */
 		if (i == 0) {
+			lock_page(page);
 			set_page_extent_head(page, eb->len);
 		} else if (PagePrivate(page) &&
 			   page->private != EXTENT_PAGE_PRIVATE) {
+			lock_page(page);
 			set_page_extent_mapped(page);
+			unlock_page(page);
 		}
 		__set_page_dirty_nobuffers(extent_buffer_page(eb, i));
+		if (i == 0)
+			unlock_page(page);
 	}
 	return set_extent_dirty(tree, eb->start,
 				eb->start + eb->len - 1, GFP_NOFS);

+ 1 - 0
fs/btrfs/extent_io.h

@@ -90,6 +90,7 @@ struct extent_buffer {
 	int flags;
 	struct list_head leak_list;
 	struct rb_node rb_node;
+	struct mutex mutex;
 };
 
 struct extent_map_tree;

+ 6 - 7
fs/btrfs/locking.c

@@ -29,32 +29,31 @@ int btrfs_tree_lock(struct extent_buffer *eb)
 {
 	int i;
 
-	if (!TestSetPageLocked(eb->first_page))
+	if (mutex_trylock(&eb->mutex))
 		return 0;
 	for (i = 0; i < 512; i++) {
 		cpu_relax();
-		if (!TestSetPageLocked(eb->first_page))
+		if (mutex_trylock(&eb->mutex))
 			return 0;
 	}
 	cpu_relax();
-	lock_page(eb->first_page);
+	mutex_lock(&eb->mutex);
 	return 0;
 }
 
 int btrfs_try_tree_lock(struct extent_buffer *eb)
 {
-	return TestSetPageLocked(eb->first_page);
+	return mutex_trylock(&eb->mutex);
 }
 
 int btrfs_tree_unlock(struct extent_buffer *eb)
 {
-	WARN_ON(!PageLocked(eb->first_page));
-	unlock_page(eb->first_page);
+	mutex_unlock(&eb->mutex);
 	return 0;
 }
 
 int btrfs_tree_locked(struct extent_buffer *eb)
 {
-	return PageLocked(eb->first_page);
+	return mutex_is_locked(&eb->mutex);
 }