瀏覽代碼

Merge branch 'cleanups' of git://repo.or.cz/linux-2.6/btrfs-unstable into inode_numbers

Conflicts:
	fs/btrfs/extent-tree.c
	fs/btrfs/free-space-cache.c
	fs/btrfs/inode.c
	fs/btrfs/tree-log.c

Signed-off-by: Chris Mason <chris.mason@oracle.com>
Chris Mason 14 年之前
父節點
當前提交
945d8962ce

+ 1 - 1
fs/btrfs/acl.c

@@ -288,7 +288,7 @@ int btrfs_acl_chmod(struct inode *inode)
 		return 0;
 		return 0;
 
 
 	acl = btrfs_get_acl(inode, ACL_TYPE_ACCESS);
 	acl = btrfs_get_acl(inode, ACL_TYPE_ACCESS);
-	if (IS_ERR(acl) || !acl)
+	if (IS_ERR_OR_NULL(acl))
 		return PTR_ERR(acl);
 		return PTR_ERR(acl);
 
 
 	clone = posix_acl_clone(acl, GFP_KERNEL);
 	clone = posix_acl_clone(acl, GFP_KERNEL);

+ 21 - 21
fs/btrfs/compression.c

@@ -333,7 +333,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
 	struct compressed_bio *cb;
 	struct compressed_bio *cb;
 	unsigned long bytes_left;
 	unsigned long bytes_left;
 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
-	int page_index = 0;
+	int pg_index = 0;
 	struct page *page;
 	struct page *page;
 	u64 first_byte = disk_start;
 	u64 first_byte = disk_start;
 	struct block_device *bdev;
 	struct block_device *bdev;
@@ -367,8 +367,8 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
 
 
 	/* create and submit bios for the compressed pages */
 	/* create and submit bios for the compressed pages */
 	bytes_left = compressed_len;
 	bytes_left = compressed_len;
-	for (page_index = 0; page_index < cb->nr_pages; page_index++) {
-		page = compressed_pages[page_index];
+	for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) {
+		page = compressed_pages[pg_index];
 		page->mapping = inode->i_mapping;
 		page->mapping = inode->i_mapping;
 		if (bio->bi_size)
 		if (bio->bi_size)
 			ret = io_tree->ops->merge_bio_hook(page, 0,
 			ret = io_tree->ops->merge_bio_hook(page, 0,
@@ -433,7 +433,7 @@ static noinline int add_ra_bio_pages(struct inode *inode,
 				     struct compressed_bio *cb)
 				     struct compressed_bio *cb)
 {
 {
 	unsigned long end_index;
 	unsigned long end_index;
-	unsigned long page_index;
+	unsigned long pg_index;
 	u64 last_offset;
 	u64 last_offset;
 	u64 isize = i_size_read(inode);
 	u64 isize = i_size_read(inode);
 	int ret;
 	int ret;
@@ -457,13 +457,13 @@ static noinline int add_ra_bio_pages(struct inode *inode,
 	end_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT;
 	end_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT;
 
 
 	while (last_offset < compressed_end) {
 	while (last_offset < compressed_end) {
-		page_index = last_offset >> PAGE_CACHE_SHIFT;
+		pg_index = last_offset >> PAGE_CACHE_SHIFT;
 
 
-		if (page_index > end_index)
+		if (pg_index > end_index)
 			break;
 			break;
 
 
 		rcu_read_lock();
 		rcu_read_lock();
-		page = radix_tree_lookup(&mapping->page_tree, page_index);
+		page = radix_tree_lookup(&mapping->page_tree, pg_index);
 		rcu_read_unlock();
 		rcu_read_unlock();
 		if (page) {
 		if (page) {
 			misses++;
 			misses++;
@@ -477,7 +477,7 @@ static noinline int add_ra_bio_pages(struct inode *inode,
 		if (!page)
 		if (!page)
 			break;
 			break;
 
 
-		if (add_to_page_cache_lru(page, mapping, page_index,
+		if (add_to_page_cache_lru(page, mapping, pg_index,
 								GFP_NOFS)) {
 								GFP_NOFS)) {
 			page_cache_release(page);
 			page_cache_release(page);
 			goto next;
 			goto next;
@@ -561,7 +561,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
 	unsigned long uncompressed_len = bio->bi_vcnt * PAGE_CACHE_SIZE;
 	unsigned long uncompressed_len = bio->bi_vcnt * PAGE_CACHE_SIZE;
 	unsigned long compressed_len;
 	unsigned long compressed_len;
 	unsigned long nr_pages;
 	unsigned long nr_pages;
-	unsigned long page_index;
+	unsigned long pg_index;
 	struct page *page;
 	struct page *page;
 	struct block_device *bdev;
 	struct block_device *bdev;
 	struct bio *comp_bio;
 	struct bio *comp_bio;
@@ -614,10 +614,10 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
 
 
 	bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
 	bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
 
 
-	for (page_index = 0; page_index < nr_pages; page_index++) {
-		cb->compressed_pages[page_index] = alloc_page(GFP_NOFS |
+	for (pg_index = 0; pg_index < nr_pages; pg_index++) {
+		cb->compressed_pages[pg_index] = alloc_page(GFP_NOFS |
 							      __GFP_HIGHMEM);
 							      __GFP_HIGHMEM);
-		if (!cb->compressed_pages[page_index])
+		if (!cb->compressed_pages[pg_index])
 			goto fail2;
 			goto fail2;
 	}
 	}
 	cb->nr_pages = nr_pages;
 	cb->nr_pages = nr_pages;
@@ -635,8 +635,8 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
 	comp_bio->bi_end_io = end_compressed_bio_read;
 	comp_bio->bi_end_io = end_compressed_bio_read;
 	atomic_inc(&cb->pending_bios);
 	atomic_inc(&cb->pending_bios);
 
 
-	for (page_index = 0; page_index < nr_pages; page_index++) {
-		page = cb->compressed_pages[page_index];
+	for (pg_index = 0; pg_index < nr_pages; pg_index++) {
+		page = cb->compressed_pages[pg_index];
 		page->mapping = inode->i_mapping;
 		page->mapping = inode->i_mapping;
 		page->index = em_start >> PAGE_CACHE_SHIFT;
 		page->index = em_start >> PAGE_CACHE_SHIFT;
 
 
@@ -703,8 +703,8 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
 	return 0;
 	return 0;
 
 
 fail2:
 fail2:
-	for (page_index = 0; page_index < nr_pages; page_index++)
-		free_page((unsigned long)cb->compressed_pages[page_index]);
+	for (pg_index = 0; pg_index < nr_pages; pg_index++)
+		free_page((unsigned long)cb->compressed_pages[pg_index]);
 
 
 	kfree(cb->compressed_pages);
 	kfree(cb->compressed_pages);
 fail1:
 fail1:
@@ -946,7 +946,7 @@ void btrfs_exit_compress(void)
 int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
 int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
 			      unsigned long total_out, u64 disk_start,
 			      unsigned long total_out, u64 disk_start,
 			      struct bio_vec *bvec, int vcnt,
 			      struct bio_vec *bvec, int vcnt,
-			      unsigned long *page_index,
+			      unsigned long *pg_index,
 			      unsigned long *pg_offset)
 			      unsigned long *pg_offset)
 {
 {
 	unsigned long buf_offset;
 	unsigned long buf_offset;
@@ -955,7 +955,7 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
 	unsigned long working_bytes = total_out - buf_start;
 	unsigned long working_bytes = total_out - buf_start;
 	unsigned long bytes;
 	unsigned long bytes;
 	char *kaddr;
 	char *kaddr;
-	struct page *page_out = bvec[*page_index].bv_page;
+	struct page *page_out = bvec[*pg_index].bv_page;
 
 
 	/*
 	/*
 	 * start byte is the first byte of the page we're currently
 	 * start byte is the first byte of the page we're currently
@@ -996,11 +996,11 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
 
 
 		/* check if we need to pick another page */
 		/* check if we need to pick another page */
 		if (*pg_offset == PAGE_CACHE_SIZE) {
 		if (*pg_offset == PAGE_CACHE_SIZE) {
-			(*page_index)++;
-			if (*page_index >= vcnt)
+			(*pg_index)++;
+			if (*pg_index >= vcnt)
 				return 0;
 				return 0;
 
 
-			page_out = bvec[*page_index].bv_page;
+			page_out = bvec[*pg_index].bv_page;
 			*pg_offset = 0;
 			*pg_offset = 0;
 			start_byte = page_offset(page_out) - disk_start;
 			start_byte = page_offset(page_out) - disk_start;
 
 

+ 1 - 1
fs/btrfs/compression.h

@@ -37,7 +37,7 @@ int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page,
 int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
 int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
 			      unsigned long total_out, u64 disk_start,
 			      unsigned long total_out, u64 disk_start,
 			      struct bio_vec *bvec, int vcnt,
 			      struct bio_vec *bvec, int vcnt,
-			      unsigned long *page_index,
+			      unsigned long *pg_index,
 			      unsigned long *pg_offset);
 			      unsigned long *pg_offset);
 
 
 int btrfs_submit_compressed_write(struct inode *inode, u64 start,
 int btrfs_submit_compressed_write(struct inode *inode, u64 start,

+ 14 - 15
fs/btrfs/ctree.c

@@ -102,7 +102,7 @@ void btrfs_free_path(struct btrfs_path *p)
 {
 {
 	if (!p)
 	if (!p)
 		return;
 		return;
-	btrfs_release_path(NULL, p);
+	btrfs_release_path(p);
 	kmem_cache_free(btrfs_path_cachep, p);
 	kmem_cache_free(btrfs_path_cachep, p);
 }
 }
 
 
@@ -112,7 +112,7 @@ void btrfs_free_path(struct btrfs_path *p)
  *
  *
  * It is safe to call this on paths that no locks or extent buffers held.
  * It is safe to call this on paths that no locks or extent buffers held.
  */
  */
-noinline void btrfs_release_path(struct btrfs_root *root, struct btrfs_path *p)
+noinline void btrfs_release_path(struct btrfs_path *p)
 {
 {
 	int i;
 	int i;
 
 
@@ -1323,7 +1323,7 @@ static noinline int reada_for_balance(struct btrfs_root *root,
 		ret = -EAGAIN;
 		ret = -EAGAIN;
 
 
 		/* release the whole path */
 		/* release the whole path */
-		btrfs_release_path(root, path);
+		btrfs_release_path(path);
 
 
 		/* read the blocks */
 		/* read the blocks */
 		if (block1)
 		if (block1)
@@ -1470,7 +1470,7 @@ read_block_for_search(struct btrfs_trans_handle *trans,
 				return 0;
 				return 0;
 			}
 			}
 			free_extent_buffer(tmp);
 			free_extent_buffer(tmp);
-			btrfs_release_path(NULL, p);
+			btrfs_release_path(p);
 			return -EIO;
 			return -EIO;
 		}
 		}
 	}
 	}
@@ -1489,7 +1489,7 @@ read_block_for_search(struct btrfs_trans_handle *trans,
 	if (p->reada)
 	if (p->reada)
 		reada_for_search(root, p, level, slot, key->objectid);
 		reada_for_search(root, p, level, slot, key->objectid);
 
 
-	btrfs_release_path(NULL, p);
+	btrfs_release_path(p);
 
 
 	ret = -EAGAIN;
 	ret = -EAGAIN;
 	tmp = read_tree_block(root, blocknr, blocksize, 0);
 	tmp = read_tree_block(root, blocknr, blocksize, 0);
@@ -1558,7 +1558,7 @@ setup_nodes_for_search(struct btrfs_trans_handle *trans,
 		}
 		}
 		b = p->nodes[level];
 		b = p->nodes[level];
 		if (!b) {
 		if (!b) {
-			btrfs_release_path(NULL, p);
+			btrfs_release_path(p);
 			goto again;
 			goto again;
 		}
 		}
 		BUG_ON(btrfs_header_nritems(b) == 1);
 		BUG_ON(btrfs_header_nritems(b) == 1);
@@ -1748,7 +1748,7 @@ done:
 	if (!p->leave_spinning)
 	if (!p->leave_spinning)
 		btrfs_set_path_blocking(p);
 		btrfs_set_path_blocking(p);
 	if (ret < 0)
 	if (ret < 0)
-		btrfs_release_path(root, p);
+		btrfs_release_path(p);
 	return ret;
 	return ret;
 }
 }
 
 
@@ -3021,7 +3021,7 @@ static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
 				    struct btrfs_file_extent_item);
 				    struct btrfs_file_extent_item);
 		extent_len = btrfs_file_extent_num_bytes(leaf, fi);
 		extent_len = btrfs_file_extent_num_bytes(leaf, fi);
 	}
 	}
-	btrfs_release_path(root, path);
+	btrfs_release_path(path);
 
 
 	path->keep_locks = 1;
 	path->keep_locks = 1;
 	path->search_for_split = 1;
 	path->search_for_split = 1;
@@ -3641,7 +3641,6 @@ int setup_items_for_insert(struct btrfs_trans_handle *trans,
 
 
 	ret = 0;
 	ret = 0;
 	if (slot == 0) {
 	if (slot == 0) {
-		struct btrfs_disk_key disk_key;
 		btrfs_cpu_key_to_disk(&disk_key, cpu_key);
 		btrfs_cpu_key_to_disk(&disk_key, cpu_key);
 		ret = fixup_low_keys(trans, root, path, &disk_key, 1);
 		ret = fixup_low_keys(trans, root, path, &disk_key, 1);
 	}
 	}
@@ -3943,7 +3942,7 @@ int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
 	else
 	else
 		return 1;
 		return 1;
 
 
-	btrfs_release_path(root, path);
+	btrfs_release_path(path);
 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
 	if (ret < 0)
 	if (ret < 0)
 		return ret;
 		return ret;
@@ -4067,7 +4066,7 @@ find_next_key:
 			sret = btrfs_find_next_key(root, path, min_key, level,
 			sret = btrfs_find_next_key(root, path, min_key, level,
 						  cache_only, min_trans);
 						  cache_only, min_trans);
 			if (sret == 0) {
 			if (sret == 0) {
-				btrfs_release_path(root, path);
+				btrfs_release_path(path);
 				goto again;
 				goto again;
 			} else {
 			} else {
 				goto out;
 				goto out;
@@ -4146,7 +4145,7 @@ next:
 				btrfs_node_key_to_cpu(c, &cur_key, slot);
 				btrfs_node_key_to_cpu(c, &cur_key, slot);
 
 
 			orig_lowest = path->lowest_level;
 			orig_lowest = path->lowest_level;
-			btrfs_release_path(root, path);
+			btrfs_release_path(path);
 			path->lowest_level = level;
 			path->lowest_level = level;
 			ret = btrfs_search_slot(NULL, root, &cur_key, path,
 			ret = btrfs_search_slot(NULL, root, &cur_key, path,
 						0, 0);
 						0, 0);
@@ -4223,7 +4222,7 @@ int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
 again:
 again:
 	level = 1;
 	level = 1;
 	next = NULL;
 	next = NULL;
-	btrfs_release_path(root, path);
+	btrfs_release_path(path);
 
 
 	path->keep_locks = 1;
 	path->keep_locks = 1;
 
 
@@ -4279,7 +4278,7 @@ again:
 			goto again;
 			goto again;
 
 
 		if (ret < 0) {
 		if (ret < 0) {
-			btrfs_release_path(root, path);
+			btrfs_release_path(path);
 			goto done;
 			goto done;
 		}
 		}
 
 
@@ -4318,7 +4317,7 @@ again:
 			goto again;
 			goto again;
 
 
 		if (ret < 0) {
 		if (ret < 0) {
-			btrfs_release_path(root, path);
+			btrfs_release_path(path);
 			goto done;
 			goto done;
 		}
 		}
 
 

+ 6 - 96
fs/btrfs/ctree.h

@@ -746,12 +746,12 @@ struct btrfs_space_info {
 	 */
 	 */
 	unsigned long reservation_progress;
 	unsigned long reservation_progress;
 
 
-	int full:1;		/* indicates that we cannot allocate any more
+	unsigned int full:1;	/* indicates that we cannot allocate any more
 				   chunks for this space */
 				   chunks for this space */
-	int chunk_alloc:1;	/* set if we are allocating a chunk */
+	unsigned int chunk_alloc:1;	/* set if we are allocating a chunk */
 
 
-	int force_alloc;	/* set if we need to force a chunk alloc for
-				   this space */
+	unsigned int force_alloc;	/* set if we need to force a chunk
+					   alloc for this space */
 
 
 	struct list_head list;
 	struct list_head list;
 
 
@@ -1463,26 +1463,12 @@ static inline u64 btrfs_stripe_offset_nr(struct extent_buffer *eb,
 	return btrfs_stripe_offset(eb, btrfs_stripe_nr(c, nr));
 	return btrfs_stripe_offset(eb, btrfs_stripe_nr(c, nr));
 }
 }
 
 
-static inline void btrfs_set_stripe_offset_nr(struct extent_buffer *eb,
-					     struct btrfs_chunk *c, int nr,
-					     u64 val)
-{
-	btrfs_set_stripe_offset(eb, btrfs_stripe_nr(c, nr), val);
-}
-
 static inline u64 btrfs_stripe_devid_nr(struct extent_buffer *eb,
 static inline u64 btrfs_stripe_devid_nr(struct extent_buffer *eb,
 					 struct btrfs_chunk *c, int nr)
 					 struct btrfs_chunk *c, int nr)
 {
 {
 	return btrfs_stripe_devid(eb, btrfs_stripe_nr(c, nr));
 	return btrfs_stripe_devid(eb, btrfs_stripe_nr(c, nr));
 }
 }
 
 
-static inline void btrfs_set_stripe_devid_nr(struct extent_buffer *eb,
-					     struct btrfs_chunk *c, int nr,
-					     u64 val)
-{
-	btrfs_set_stripe_devid(eb, btrfs_stripe_nr(c, nr), val);
-}
-
 /* struct btrfs_block_group_item */
 /* struct btrfs_block_group_item */
 BTRFS_SETGET_STACK_FUNCS(block_group_used, struct btrfs_block_group_item,
 BTRFS_SETGET_STACK_FUNCS(block_group_used, struct btrfs_block_group_item,
 			 used, 64);
 			 used, 64);
@@ -1540,14 +1526,6 @@ btrfs_inode_ctime(struct btrfs_inode_item *inode_item)
 	return (struct btrfs_timespec *)ptr;
 	return (struct btrfs_timespec *)ptr;
 }
 }
 
 
-static inline struct btrfs_timespec *
-btrfs_inode_otime(struct btrfs_inode_item *inode_item)
-{
-	unsigned long ptr = (unsigned long)inode_item;
-	ptr += offsetof(struct btrfs_inode_item, otime);
-	return (struct btrfs_timespec *)ptr;
-}
-
 BTRFS_SETGET_FUNCS(timespec_sec, struct btrfs_timespec, sec, 64);
 BTRFS_SETGET_FUNCS(timespec_sec, struct btrfs_timespec, sec, 64);
 BTRFS_SETGET_FUNCS(timespec_nsec, struct btrfs_timespec, nsec, 32);
 BTRFS_SETGET_FUNCS(timespec_nsec, struct btrfs_timespec, nsec, 32);
 
 
@@ -1898,33 +1876,6 @@ static inline u8 *btrfs_header_chunk_tree_uuid(struct extent_buffer *eb)
 	return (u8 *)ptr;
 	return (u8 *)ptr;
 }
 }
 
 
-static inline u8 *btrfs_super_fsid(struct extent_buffer *eb)
-{
-	unsigned long ptr = offsetof(struct btrfs_super_block, fsid);
-	return (u8 *)ptr;
-}
-
-static inline u8 *btrfs_header_csum(struct extent_buffer *eb)
-{
-	unsigned long ptr = offsetof(struct btrfs_header, csum);
-	return (u8 *)ptr;
-}
-
-static inline struct btrfs_node *btrfs_buffer_node(struct extent_buffer *eb)
-{
-	return NULL;
-}
-
-static inline struct btrfs_leaf *btrfs_buffer_leaf(struct extent_buffer *eb)
-{
-	return NULL;
-}
-
-static inline struct btrfs_header *btrfs_buffer_header(struct extent_buffer *eb)
-{
-	return NULL;
-}
-
 static inline int btrfs_is_leaf(struct extent_buffer *eb)
 static inline int btrfs_is_leaf(struct extent_buffer *eb)
 {
 {
 	return btrfs_header_level(eb) == 0;
 	return btrfs_header_level(eb) == 0;
@@ -2078,22 +2029,6 @@ static inline struct btrfs_root *btrfs_sb(struct super_block *sb)
 	return sb->s_fs_info;
 	return sb->s_fs_info;
 }
 }
 
 
-static inline int btrfs_set_root_name(struct btrfs_root *root,
-				      const char *name, int len)
-{
-	/* if we already have a name just free it */
-	kfree(root->name);
-
-	root->name = kmalloc(len+1, GFP_KERNEL);
-	if (!root->name)
-		return -ENOMEM;
-
-	memcpy(root->name, name, len);
-	root->name[len] = '\0';
-
-	return 0;
-}
-
 static inline u32 btrfs_level_size(struct btrfs_root *root, int level)
 static inline u32 btrfs_level_size(struct btrfs_root *root, int level)
 {
 {
 	if (level == 0)
 	if (level == 0)
@@ -2138,12 +2073,9 @@ int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
 			     u64 num_bytes, u64 *refs, u64 *flags);
 			     u64 num_bytes, u64 *refs, u64 *flags);
 int btrfs_pin_extent(struct btrfs_root *root,
 int btrfs_pin_extent(struct btrfs_root *root,
 		     u64 bytenr, u64 num, int reserved);
 		     u64 bytenr, u64 num, int reserved);
-int btrfs_drop_leaf_ref(struct btrfs_trans_handle *trans,
-			struct btrfs_root *root, struct extent_buffer *leaf);
 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
 			  struct btrfs_root *root,
 			  struct btrfs_root *root,
 			  u64 objectid, u64 offset, u64 bytenr);
 			  u64 objectid, u64 offset, u64 bytenr);
-int btrfs_copy_pinned(struct btrfs_root *root, struct extent_io_tree *copy);
 struct btrfs_block_group_cache *btrfs_lookup_block_group(
 struct btrfs_block_group_cache *btrfs_lookup_block_group(
 						 struct btrfs_fs_info *info,
 						 struct btrfs_fs_info *info,
 						 u64 bytenr);
 						 u64 bytenr);
@@ -2320,7 +2252,7 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
 		       struct btrfs_root *root, struct extent_buffer *parent,
 		       struct btrfs_root *root, struct extent_buffer *parent,
 		       int start_slot, int cache_only, u64 *last_ret,
 		       int start_slot, int cache_only, u64 *last_ret,
 		       struct btrfs_key *progress);
 		       struct btrfs_key *progress);
-void btrfs_release_path(struct btrfs_root *root, struct btrfs_path *p);
+void btrfs_release_path(struct btrfs_path *p);
 struct btrfs_path *btrfs_alloc_path(void);
 struct btrfs_path *btrfs_alloc_path(void);
 void btrfs_free_path(struct btrfs_path *p);
 void btrfs_free_path(struct btrfs_path *p);
 void btrfs_set_path_blocking(struct btrfs_path *p);
 void btrfs_set_path_blocking(struct btrfs_path *p);
@@ -2343,11 +2275,6 @@ int setup_items_for_insert(struct btrfs_trans_handle *trans,
 			   u32 total_data, u32 total_size, int nr);
 			   u32 total_data, u32 total_size, int nr);
 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root
 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root
 		      *root, struct btrfs_key *key, void *data, u32 data_size);
 		      *root, struct btrfs_key *key, void *data, u32 data_size);
-int btrfs_insert_some_items(struct btrfs_trans_handle *trans,
-			    struct btrfs_root *root,
-			    struct btrfs_path *path,
-			    struct btrfs_key *cpu_key, u32 *data_size,
-			    int nr);
 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
 			     struct btrfs_root *root,
 			     struct btrfs_root *root,
 			     struct btrfs_path *path,
 			     struct btrfs_path *path,
@@ -2393,8 +2320,6 @@ int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root
 		      *item);
 		      *item);
 int btrfs_find_last_root(struct btrfs_root *root, u64 objectid, struct
 int btrfs_find_last_root(struct btrfs_root *root, u64 objectid, struct
 			 btrfs_root_item *item, struct btrfs_key *key);
 			 btrfs_root_item *item, struct btrfs_key *key);
-int btrfs_search_root(struct btrfs_root *root, u64 search_start,
-		      u64 *found_objectid);
 int btrfs_find_dead_roots(struct btrfs_root *root, u64 objectid);
 int btrfs_find_dead_roots(struct btrfs_root *root, u64 objectid);
 int btrfs_find_orphan_roots(struct btrfs_root *tree_root);
 int btrfs_find_orphan_roots(struct btrfs_root *tree_root);
 int btrfs_set_root_node(struct btrfs_root_item *item,
 int btrfs_set_root_node(struct btrfs_root_item *item,
@@ -2493,15 +2418,10 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
 			   struct btrfs_ordered_sum *sums);
 			   struct btrfs_ordered_sum *sums);
 int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
 int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
 		       struct bio *bio, u64 file_start, int contig);
 		       struct bio *bio, u64 file_start, int contig);
-int btrfs_csum_file_bytes(struct btrfs_root *root, struct inode *inode,
-			  u64 start, unsigned long len);
 struct btrfs_csum_item *btrfs_lookup_csum(struct btrfs_trans_handle *trans,
 struct btrfs_csum_item *btrfs_lookup_csum(struct btrfs_trans_handle *trans,
 					  struct btrfs_root *root,
 					  struct btrfs_root *root,
 					  struct btrfs_path *path,
 					  struct btrfs_path *path,
 					  u64 bytenr, int cow);
 					  u64 bytenr, int cow);
-int btrfs_csum_truncate(struct btrfs_trans_handle *trans,
-			struct btrfs_root *root, struct btrfs_path *path,
-			u64 isize);
 int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start,
 int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start,
 			     u64 end, struct list_head *list);
 			     u64 end, struct list_head *list);
 /* inode.c */
 /* inode.c */
@@ -2532,8 +2452,6 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
 			       u32 min_type);
 			       u32 min_type);
 
 
 int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput);
 int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput);
-int btrfs_start_one_delalloc_inode(struct btrfs_root *root, int delay_iput,
-				   int sync);
 int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
 int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
 			      struct extent_state **cached_state);
 			      struct extent_state **cached_state);
 int btrfs_writepages(struct address_space *mapping,
 int btrfs_writepages(struct address_space *mapping,
@@ -2550,7 +2468,6 @@ unsigned long btrfs_force_ra(struct address_space *mapping,
 int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
 int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
 int btrfs_readpage(struct file *file, struct page *page);
 int btrfs_readpage(struct file *file, struct page *page);
 void btrfs_evict_inode(struct inode *inode);
 void btrfs_evict_inode(struct inode *inode);
-void btrfs_put_inode(struct inode *inode);
 int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc);
 int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc);
 void btrfs_dirty_inode(struct inode *inode);
 void btrfs_dirty_inode(struct inode *inode);
 struct inode *btrfs_alloc_inode(struct super_block *sb);
 struct inode *btrfs_alloc_inode(struct super_block *sb);
@@ -2561,10 +2478,8 @@ void btrfs_destroy_cachep(void);
 long btrfs_ioctl_trans_end(struct file *file);
 long btrfs_ioctl_trans_end(struct file *file);
 struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
 struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
 			 struct btrfs_root *root, int *was_new);
 			 struct btrfs_root *root, int *was_new);
-int btrfs_commit_write(struct file *file, struct page *page,
-		       unsigned from, unsigned to);
 struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
 struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
-				    size_t page_offset, u64 start, u64 end,
+				    size_t pg_offset, u64 start, u64 end,
 				    int create);
 				    int create);
 int btrfs_update_inode(struct btrfs_trans_handle *trans,
 int btrfs_update_inode(struct btrfs_trans_handle *trans,
 			      struct btrfs_root *root,
 			      struct btrfs_root *root,
@@ -2601,7 +2516,6 @@ void btrfs_inherit_iflags(struct inode *inode, struct inode *dir);
 int btrfs_sync_file(struct file *file, int datasync);
 int btrfs_sync_file(struct file *file, int datasync);
 int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
 int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
 			    int skip_pinned);
 			    int skip_pinned);
-int btrfs_check_file(struct btrfs_root *root, struct inode *inode);
 extern const struct file_operations btrfs_file_operations;
 extern const struct file_operations btrfs_file_operations;
 int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode,
 int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode,
 		       u64 start, u64 end, u64 *hint_byte, int drop_cache);
 		       u64 start, u64 end, u64 *hint_byte, int drop_cache);
@@ -2621,10 +2535,6 @@ int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
 /* sysfs.c */
 /* sysfs.c */
 int btrfs_init_sysfs(void);
 int btrfs_init_sysfs(void);
 void btrfs_exit_sysfs(void);
 void btrfs_exit_sysfs(void);
-int btrfs_sysfs_add_super(struct btrfs_fs_info *fs);
-int btrfs_sysfs_add_root(struct btrfs_root *root);
-void btrfs_sysfs_del_root(struct btrfs_root *root);
-void btrfs_sysfs_del_super(struct btrfs_fs_info *root);
 
 
 /* xattr.c */
 /* xattr.c */
 ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size);
 ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size);

+ 7 - 7
fs/btrfs/delayed-inode.c

@@ -813,7 +813,7 @@ do_again:
 
 
 	ret = btrfs_insert_delayed_item(trans, root, path, curr);
 	ret = btrfs_insert_delayed_item(trans, root, path, curr);
 	if (ret < 0) {
 	if (ret < 0) {
-		btrfs_release_path(root, path);
+		btrfs_release_path(path);
 		goto insert_end;
 		goto insert_end;
 	}
 	}
 
 
@@ -827,7 +827,7 @@ do_again:
 	btrfs_release_delayed_item(prev);
 	btrfs_release_delayed_item(prev);
 	btrfs_mark_buffer_dirty(path->nodes[0]);
 	btrfs_mark_buffer_dirty(path->nodes[0]);
 
 
-	btrfs_release_path(root, path);
+	btrfs_release_path(path);
 	mutex_unlock(&node->mutex);
 	mutex_unlock(&node->mutex);
 	goto do_again;
 	goto do_again;
 
 
@@ -925,7 +925,7 @@ do_again:
 		curr = __btrfs_next_delayed_item(prev);
 		curr = __btrfs_next_delayed_item(prev);
 		btrfs_release_delayed_item(prev);
 		btrfs_release_delayed_item(prev);
 		ret = 0;
 		ret = 0;
-		btrfs_release_path(root, path);
+		btrfs_release_path(path);
 		if (curr)
 		if (curr)
 			goto do_again;
 			goto do_again;
 		else
 		else
@@ -933,12 +933,12 @@ do_again:
 	}
 	}
 
 
 	btrfs_batch_delete_items(trans, root, path, curr);
 	btrfs_batch_delete_items(trans, root, path, curr);
-	btrfs_release_path(root, path);
+	btrfs_release_path(path);
 	mutex_unlock(&node->mutex);
 	mutex_unlock(&node->mutex);
 	goto do_again;
 	goto do_again;
 
 
 delete_fail:
 delete_fail:
-	btrfs_release_path(root, path);
+	btrfs_release_path(path);
 	mutex_unlock(&node->mutex);
 	mutex_unlock(&node->mutex);
 	return ret;
 	return ret;
 }
 }
@@ -982,7 +982,7 @@ static int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
 	key.offset = 0;
 	key.offset = 0;
 	ret = btrfs_lookup_inode(trans, root, path, &key, 1);
 	ret = btrfs_lookup_inode(trans, root, path, &key, 1);
 	if (ret > 0) {
 	if (ret > 0) {
-		btrfs_release_path(root, path);
+		btrfs_release_path(path);
 		mutex_unlock(&node->mutex);
 		mutex_unlock(&node->mutex);
 		return -ENOENT;
 		return -ENOENT;
 	} else if (ret < 0) {
 	} else if (ret < 0) {
@@ -997,7 +997,7 @@ static int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
 	write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
 	write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
 			    sizeof(struct btrfs_inode_item));
 			    sizeof(struct btrfs_inode_item));
 	btrfs_mark_buffer_dirty(leaf);
 	btrfs_mark_buffer_dirty(leaf);
-	btrfs_release_path(root, path);
+	btrfs_release_path(path);
 
 
 	btrfs_delayed_inode_release_metadata(root, node);
 	btrfs_delayed_inode_release_metadata(root, node);
 	btrfs_release_delayed_inode(node);
 	btrfs_release_delayed_inode(node);

+ 0 - 114
fs/btrfs/delayed-ref.c

@@ -280,44 +280,6 @@ again:
 	return 1;
 	return 1;
 }
 }
 
 
-/*
- * This checks to see if there are any delayed refs in the
- * btree for a given bytenr.  It returns one if it finds any
- * and zero otherwise.
- *
- * If it only finds a head node, it returns 0.
- *
- * The idea is to use this when deciding if you can safely delete an
- * extent from the extent allocation tree.  There may be a pending
- * ref in the rbtree that adds or removes references, so as long as this
- * returns one you need to leave the BTRFS_EXTENT_ITEM in the extent
- * allocation tree.
- */
-int btrfs_delayed_ref_pending(struct btrfs_trans_handle *trans, u64 bytenr)
-{
-	struct btrfs_delayed_ref_node *ref;
-	struct btrfs_delayed_ref_root *delayed_refs;
-	struct rb_node *prev_node;
-	int ret = 0;
-
-	delayed_refs = &trans->transaction->delayed_refs;
-	spin_lock(&delayed_refs->lock);
-
-	ref = find_ref_head(&delayed_refs->root, bytenr, NULL);
-	if (ref) {
-		prev_node = rb_prev(&ref->rb_node);
-		if (!prev_node)
-			goto out;
-		ref = rb_entry(prev_node, struct btrfs_delayed_ref_node,
-			       rb_node);
-		if (ref->bytenr == bytenr)
-			ret = 1;
-	}
-out:
-	spin_unlock(&delayed_refs->lock);
-	return ret;
-}
-
 /*
 /*
  * helper function to update an extent delayed ref in the
  * helper function to update an extent delayed ref in the
  * rbtree.  existing and update must both have the same
  * rbtree.  existing and update must both have the same
@@ -747,79 +709,3 @@ btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr)
 		return btrfs_delayed_node_to_head(ref);
 		return btrfs_delayed_node_to_head(ref);
 	return NULL;
 	return NULL;
 }
 }
-
-/*
- * add a delayed ref to the tree.  This does all of the accounting required
- * to make sure the delayed ref is eventually processed before this
- * transaction commits.
- *
- * The main point of this call is to add and remove a backreference in a single
- * shot, taking the lock only once, and only searching for the head node once.
- *
- * It is the same as doing a ref add and delete in two separate calls.
- */
-#if 0
-int btrfs_update_delayed_ref(struct btrfs_trans_handle *trans,
-			  u64 bytenr, u64 num_bytes, u64 orig_parent,
-			  u64 parent, u64 orig_ref_root, u64 ref_root,
-			  u64 orig_ref_generation, u64 ref_generation,
-			  u64 owner_objectid, int pin)
-{
-	struct btrfs_delayed_ref *ref;
-	struct btrfs_delayed_ref *old_ref;
-	struct btrfs_delayed_ref_head *head_ref;
-	struct btrfs_delayed_ref_root *delayed_refs;
-	int ret;
-
-	ref = kmalloc(sizeof(*ref), GFP_NOFS);
-	if (!ref)
-		return -ENOMEM;
-
-	old_ref = kmalloc(sizeof(*old_ref), GFP_NOFS);
-	if (!old_ref) {
-		kfree(ref);
-		return -ENOMEM;
-	}
-
-	/*
-	 * the parent = 0 case comes from cases where we don't actually
-	 * know the parent yet.  It will get updated later via a add/drop
-	 * pair.
-	 */
-	if (parent == 0)
-		parent = bytenr;
-	if (orig_parent == 0)
-		orig_parent = bytenr;
-
-	head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS);
-	if (!head_ref) {
-		kfree(ref);
-		kfree(old_ref);
-		return -ENOMEM;
-	}
-	delayed_refs = &trans->transaction->delayed_refs;
-	spin_lock(&delayed_refs->lock);
-
-	/*
-	 * insert both the head node and the new ref without dropping
-	 * the spin lock
-	 */
-	ret = __btrfs_add_delayed_ref(trans, &head_ref->node, bytenr, num_bytes,
-				      (u64)-1, 0, 0, 0,
-				      BTRFS_UPDATE_DELAYED_HEAD, 0);
-	BUG_ON(ret);
-
-	ret = __btrfs_add_delayed_ref(trans, &ref->node, bytenr, num_bytes,
-				      parent, ref_root, ref_generation,
-				      owner_objectid, BTRFS_ADD_DELAYED_REF, 0);
-	BUG_ON(ret);
-
-	ret = __btrfs_add_delayed_ref(trans, &old_ref->node, bytenr, num_bytes,
-				      orig_parent, orig_ref_root,
-				      orig_ref_generation, owner_objectid,
-				      BTRFS_DROP_DELAYED_REF, pin);
-	BUG_ON(ret);
-	spin_unlock(&delayed_refs->lock);
-	return 0;
-}
-#endif

+ 0 - 6
fs/btrfs/delayed-ref.h

@@ -166,12 +166,6 @@ int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans,
 
 
 struct btrfs_delayed_ref_head *
 struct btrfs_delayed_ref_head *
 btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr);
 btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr);
-int btrfs_delayed_ref_pending(struct btrfs_trans_handle *trans, u64 bytenr);
-int btrfs_update_delayed_ref(struct btrfs_trans_handle *trans,
-			  u64 bytenr, u64 num_bytes, u64 orig_parent,
-			  u64 parent, u64 orig_ref_root, u64 ref_root,
-			  u64 orig_ref_generation, u64 ref_generation,
-			  u64 owner_objectid, int pin);
 int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
 int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
 			   struct btrfs_delayed_ref_head *head);
 			   struct btrfs_delayed_ref_head *head);
 int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans,
 int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans,

+ 1 - 1
fs/btrfs/dir-item.c

@@ -176,7 +176,7 @@ second_insert:
 		ret = 0;
 		ret = 0;
 		goto out_free;
 		goto out_free;
 	}
 	}
-	btrfs_release_path(root, path);
+	btrfs_release_path(path);
 
 
 	ret2 = btrfs_insert_delayed_dir_index(trans, root, name, name_len, dir,
 	ret2 = btrfs_insert_delayed_dir_index(trans, root, name, name_len, dir,
 					      &disk_key, type, index);
 					      &disk_key, type, index);

+ 21 - 87
fs/btrfs/disk-io.c

@@ -29,6 +29,7 @@
 #include <linux/crc32c.h>
 #include <linux/crc32c.h>
 #include <linux/slab.h>
 #include <linux/slab.h>
 #include <linux/migrate.h>
 #include <linux/migrate.h>
+#include <linux/ratelimit.h>
 #include <asm/unaligned.h>
 #include <asm/unaligned.h>
 #include "compat.h"
 #include "compat.h"
 #include "ctree.h"
 #include "ctree.h"
@@ -138,7 +139,7 @@ static const char *btrfs_eb_name[BTRFS_MAX_LEVEL + 1] = {
  * that covers the entire device
  * that covers the entire device
  */
  */
 static struct extent_map *btree_get_extent(struct inode *inode,
 static struct extent_map *btree_get_extent(struct inode *inode,
-		struct page *page, size_t page_offset, u64 start, u64 len,
+		struct page *page, size_t pg_offset, u64 start, u64 len,
 		int create)
 		int create)
 {
 {
 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
@@ -155,7 +156,7 @@ static struct extent_map *btree_get_extent(struct inode *inode,
 	}
 	}
 	read_unlock(&em_tree->lock);
 	read_unlock(&em_tree->lock);
 
 
-	em = alloc_extent_map(GFP_NOFS);
+	em = alloc_extent_map();
 	if (!em) {
 	if (!em) {
 		em = ERR_PTR(-ENOMEM);
 		em = ERR_PTR(-ENOMEM);
 		goto out;
 		goto out;
@@ -255,14 +256,12 @@ static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf,
 			memcpy(&found, result, csum_size);
 			memcpy(&found, result, csum_size);
 
 
 			read_extent_buffer(buf, &val, 0, csum_size);
 			read_extent_buffer(buf, &val, 0, csum_size);
-			if (printk_ratelimit()) {
-				printk(KERN_INFO "btrfs: %s checksum verify "
+			printk_ratelimited(KERN_INFO "btrfs: %s checksum verify "
 				       "failed on %llu wanted %X found %X "
 				       "failed on %llu wanted %X found %X "
 				       "level %d\n",
 				       "level %d\n",
 				       root->fs_info->sb->s_id,
 				       root->fs_info->sb->s_id,
 				       (unsigned long long)buf->start, val, found,
 				       (unsigned long long)buf->start, val, found,
 				       btrfs_header_level(buf));
 				       btrfs_header_level(buf));
-			}
 			if (result != (char *)&inline_result)
 			if (result != (char *)&inline_result)
 				kfree(result);
 				kfree(result);
 			return 1;
 			return 1;
@@ -297,13 +296,11 @@ static int verify_parent_transid(struct extent_io_tree *io_tree,
 		ret = 0;
 		ret = 0;
 		goto out;
 		goto out;
 	}
 	}
-	if (printk_ratelimit()) {
-		printk("parent transid verify failed on %llu wanted %llu "
+	printk_ratelimited("parent transid verify failed on %llu wanted %llu "
 		       "found %llu\n",
 		       "found %llu\n",
 		       (unsigned long long)eb->start,
 		       (unsigned long long)eb->start,
 		       (unsigned long long)parent_transid,
 		       (unsigned long long)parent_transid,
 		       (unsigned long long)btrfs_header_generation(eb));
 		       (unsigned long long)btrfs_header_generation(eb));
-	}
 	ret = 1;
 	ret = 1;
 	clear_extent_buffer_uptodate(io_tree, eb, &cached_state);
 	clear_extent_buffer_uptodate(io_tree, eb, &cached_state);
 out:
 out:
@@ -381,7 +378,7 @@ static int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
 	len = page->private >> 2;
 	len = page->private >> 2;
 	WARN_ON(len == 0);
 	WARN_ON(len == 0);
 
 
-	eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS);
+	eb = alloc_extent_buffer(tree, start, len, page);
 	if (eb == NULL) {
 	if (eb == NULL) {
 		WARN_ON(1);
 		WARN_ON(1);
 		goto out;
 		goto out;
@@ -526,7 +523,7 @@ static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
 	len = page->private >> 2;
 	len = page->private >> 2;
 	WARN_ON(len == 0);
 	WARN_ON(len == 0);
 
 
-	eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS);
+	eb = alloc_extent_buffer(tree, start, len, page);
 	if (eb == NULL) {
 	if (eb == NULL) {
 		ret = -EIO;
 		ret = -EIO;
 		goto out;
 		goto out;
@@ -534,12 +531,10 @@ static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
 
 
 	found_start = btrfs_header_bytenr(eb);
 	found_start = btrfs_header_bytenr(eb);
 	if (found_start != start) {
 	if (found_start != start) {
-		if (printk_ratelimit()) {
-			printk(KERN_INFO "btrfs bad tree block start "
+		printk_ratelimited(KERN_INFO "btrfs bad tree block start "
 			       "%llu %llu\n",
 			       "%llu %llu\n",
 			       (unsigned long long)found_start,
 			       (unsigned long long)found_start,
 			       (unsigned long long)eb->start);
 			       (unsigned long long)eb->start);
-		}
 		ret = -EIO;
 		ret = -EIO;
 		goto err;
 		goto err;
 	}
 	}
@@ -551,10 +546,8 @@ static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
 		goto err;
 		goto err;
 	}
 	}
 	if (check_tree_block_fsid(root, eb)) {
 	if (check_tree_block_fsid(root, eb)) {
-		if (printk_ratelimit()) {
-			printk(KERN_INFO "btrfs bad fsid on block %llu\n",
+		printk_ratelimited(KERN_INFO "btrfs bad fsid on block %llu\n",
 			       (unsigned long long)eb->start);
 			       (unsigned long long)eb->start);
-		}
 		ret = -EIO;
 		ret = -EIO;
 		goto err;
 		goto err;
 	}
 	}
@@ -651,12 +644,6 @@ unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info)
 	return 256 * limit;
 	return 256 * limit;
 }
 }
 
 
-int btrfs_congested_async(struct btrfs_fs_info *info, int iodone)
-{
-	return atomic_read(&info->nr_async_bios) >
-		btrfs_async_submit_limit(info);
-}
-
 static void run_one_async_start(struct btrfs_work *work)
 static void run_one_async_start(struct btrfs_work *work)
 {
 {
 	struct async_submit_bio *async;
 	struct async_submit_bio *async;
@@ -964,7 +951,7 @@ struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root,
 	struct inode *btree_inode = root->fs_info->btree_inode;
 	struct inode *btree_inode = root->fs_info->btree_inode;
 	struct extent_buffer *eb;
 	struct extent_buffer *eb;
 	eb = find_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
 	eb = find_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
-				bytenr, blocksize, GFP_NOFS);
+				bytenr, blocksize);
 	return eb;
 	return eb;
 }
 }
 
 
@@ -975,7 +962,7 @@ struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
 	struct extent_buffer *eb;
 	struct extent_buffer *eb;
 
 
 	eb = alloc_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
 	eb = alloc_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
-				 bytenr, blocksize, NULL, GFP_NOFS);
+				 bytenr, blocksize, NULL);
 	return eb;
 	return eb;
 }
 }
 
 
@@ -1082,7 +1069,7 @@ static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
 	root->log_transid = 0;
 	root->log_transid = 0;
 	root->last_log_commit = 0;
 	root->last_log_commit = 0;
 	extent_io_tree_init(&root->dirty_log_pages,
 	extent_io_tree_init(&root->dirty_log_pages,
-			     fs_info->btree_inode->i_mapping, GFP_NOFS);
+			     fs_info->btree_inode->i_mapping);
 
 
 	memset(&root->root_key, 0, sizeof(root->root_key));
 	memset(&root->root_key, 0, sizeof(root->root_key));
 	memset(&root->root_item, 0, sizeof(root->root_item));
 	memset(&root->root_item, 0, sizeof(root->root_item));
@@ -1285,21 +1272,6 @@ out:
 	return root;
 	return root;
 }
 }
 
 
-struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
-					u64 root_objectid)
-{
-	struct btrfs_root *root;
-
-	if (root_objectid == BTRFS_ROOT_TREE_OBJECTID)
-		return fs_info->tree_root;
-	if (root_objectid == BTRFS_EXTENT_TREE_OBJECTID)
-		return fs_info->extent_root;
-
-	root = radix_tree_lookup(&fs_info->fs_roots_radix,
-				 (unsigned long)root_objectid);
-	return root;
-}
-
 struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
 struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
 					      struct btrfs_key *location)
 					      struct btrfs_key *location)
 {
 {
@@ -1384,41 +1356,6 @@ fail:
 	return ERR_PTR(ret);
 	return ERR_PTR(ret);
 }
 }
 
 
-struct btrfs_root *btrfs_read_fs_root(struct btrfs_fs_info *fs_info,
-				      struct btrfs_key *location,
-				      const char *name, int namelen)
-{
-	return btrfs_read_fs_root_no_name(fs_info, location);
-#if 0
-	struct btrfs_root *root;
-	int ret;
-
-	root = btrfs_read_fs_root_no_name(fs_info, location);
-	if (!root)
-		return NULL;
-
-	if (root->in_sysfs)
-		return root;
-
-	ret = btrfs_set_root_name(root, name, namelen);
-	if (ret) {
-		free_extent_buffer(root->node);
-		kfree(root);
-		return ERR_PTR(ret);
-	}
-
-	ret = btrfs_sysfs_add_root(root);
-	if (ret) {
-		free_extent_buffer(root->node);
-		kfree(root->name);
-		kfree(root);
-		return ERR_PTR(ret);
-	}
-	root->in_sysfs = 1;
-	return root;
-#endif
-}
-
 static int btrfs_congested_fn(void *congested_data, int bdi_bits)
 static int btrfs_congested_fn(void *congested_data, int bdi_bits)
 {
 {
 	struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data;
 	struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data;
@@ -1626,7 +1563,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
 	struct btrfs_root *csum_root = kzalloc(sizeof(struct btrfs_root),
 	struct btrfs_root *csum_root = kzalloc(sizeof(struct btrfs_root),
 						 GFP_NOFS);
 						 GFP_NOFS);
 	struct btrfs_root *tree_root = btrfs_sb(sb);
 	struct btrfs_root *tree_root = btrfs_sb(sb);
-	struct btrfs_fs_info *fs_info = tree_root->fs_info;
+	struct btrfs_fs_info *fs_info = NULL;
 	struct btrfs_root *chunk_root = kzalloc(sizeof(struct btrfs_root),
 	struct btrfs_root *chunk_root = kzalloc(sizeof(struct btrfs_root),
 						GFP_NOFS);
 						GFP_NOFS);
 	struct btrfs_root *dev_root = kzalloc(sizeof(struct btrfs_root),
 	struct btrfs_root *dev_root = kzalloc(sizeof(struct btrfs_root),
@@ -1638,11 +1575,12 @@ struct btrfs_root *open_ctree(struct super_block *sb,
 
 
 	struct btrfs_super_block *disk_super;
 	struct btrfs_super_block *disk_super;
 
 
-	if (!extent_root || !tree_root || !fs_info ||
+	if (!extent_root || !tree_root || !tree_root->fs_info ||
 	    !chunk_root || !dev_root || !csum_root) {
 	    !chunk_root || !dev_root || !csum_root) {
 		err = -ENOMEM;
 		err = -ENOMEM;
 		goto fail;
 		goto fail;
 	}
 	}
+	fs_info = tree_root->fs_info;
 
 
 	ret = init_srcu_struct(&fs_info->subvol_srcu);
 	ret = init_srcu_struct(&fs_info->subvol_srcu);
 	if (ret) {
 	if (ret) {
@@ -1733,10 +1671,8 @@ struct btrfs_root *open_ctree(struct super_block *sb,
 
 
 	RB_CLEAR_NODE(&BTRFS_I(fs_info->btree_inode)->rb_node);
 	RB_CLEAR_NODE(&BTRFS_I(fs_info->btree_inode)->rb_node);
 	extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree,
 	extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree,
-			     fs_info->btree_inode->i_mapping,
-			     GFP_NOFS);
-	extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree,
-			     GFP_NOFS);
+			     fs_info->btree_inode->i_mapping);
+	extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree);
 
 
 	BTRFS_I(fs_info->btree_inode)->io_tree.ops = &btree_extent_io_ops;
 	BTRFS_I(fs_info->btree_inode)->io_tree.ops = &btree_extent_io_ops;
 
 
@@ -1750,9 +1686,9 @@ struct btrfs_root *open_ctree(struct super_block *sb,
 	fs_info->block_group_cache_tree = RB_ROOT;
 	fs_info->block_group_cache_tree = RB_ROOT;
 
 
 	extent_io_tree_init(&fs_info->freed_extents[0],
 	extent_io_tree_init(&fs_info->freed_extents[0],
-			     fs_info->btree_inode->i_mapping, GFP_NOFS);
+			     fs_info->btree_inode->i_mapping);
 	extent_io_tree_init(&fs_info->freed_extents[1],
 	extent_io_tree_init(&fs_info->freed_extents[1],
-			     fs_info->btree_inode->i_mapping, GFP_NOFS);
+			     fs_info->btree_inode->i_mapping);
 	fs_info->pinned_extents = &fs_info->freed_extents[0];
 	fs_info->pinned_extents = &fs_info->freed_extents[0];
 	fs_info->do_barriers = 1;
 	fs_info->do_barriers = 1;
 
 
@@ -2194,11 +2130,9 @@ static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
 	if (uptodate) {
 	if (uptodate) {
 		set_buffer_uptodate(bh);
 		set_buffer_uptodate(bh);
 	} else {
 	} else {
-		if (printk_ratelimit()) {
-			printk(KERN_WARNING "lost page write due to "
+		printk_ratelimited(KERN_WARNING "lost page write due to "
 					"I/O error on %s\n",
 					"I/O error on %s\n",
 				       bdevname(bh->b_bdev, b));
 				       bdevname(bh->b_bdev, b));
-		}
 		/* note, we dont' set_buffer_write_io_error because we have
 		/* note, we dont' set_buffer_write_io_error because we have
 		 * our own ways of dealing with the IO errors
 		 * our own ways of dealing with the IO errors
 		 */
 		 */
@@ -2756,7 +2690,7 @@ int btree_lock_page_hook(struct page *page)
 		goto out;
 		goto out;
 
 
 	len = page->private >> 2;
 	len = page->private >> 2;
-	eb = find_extent_buffer(io_tree, bytenr, len, GFP_NOFS);
+	eb = find_extent_buffer(io_tree, bytenr, len);
 	if (!eb)
 	if (!eb)
 		goto out;
 		goto out;
 
 

+ 0 - 18
fs/btrfs/disk-io.h

@@ -55,36 +55,20 @@ int btrfs_commit_super(struct btrfs_root *root);
 int btrfs_error_commit_super(struct btrfs_root *root);
 int btrfs_error_commit_super(struct btrfs_root *root);
 struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root,
 struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root,
 					    u64 bytenr, u32 blocksize);
 					    u64 bytenr, u32 blocksize);
-struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
-					u64 root_objectid);
-struct btrfs_root *btrfs_read_fs_root(struct btrfs_fs_info *fs_info,
-				      struct btrfs_key *location,
-				      const char *name, int namelen);
 struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root,
 struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root,
 					       struct btrfs_key *location);
 					       struct btrfs_key *location);
 struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
 struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
 					      struct btrfs_key *location);
 					      struct btrfs_key *location);
 int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info);
 int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info);
-int btrfs_insert_dev_radix(struct btrfs_root *root,
-			   struct block_device *bdev,
-			   u64 device_id,
-			   u64 block_start,
-			   u64 num_blocks);
 void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr);
 void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr);
 void __btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr);
 void __btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr);
 int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root);
 int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root);
 void btrfs_mark_buffer_dirty(struct extent_buffer *buf);
 void btrfs_mark_buffer_dirty(struct extent_buffer *buf);
-void btrfs_mark_buffer_dirty_nonblocking(struct extent_buffer *buf);
 int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid);
 int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid);
 int btrfs_set_buffer_uptodate(struct extent_buffer *buf);
 int btrfs_set_buffer_uptodate(struct extent_buffer *buf);
-int wait_on_tree_block_writeback(struct btrfs_root *root,
-				 struct extent_buffer *buf);
 int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid);
 int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid);
 u32 btrfs_csum_data(struct btrfs_root *root, char *data, u32 seed, size_t len);
 u32 btrfs_csum_data(struct btrfs_root *root, char *data, u32 seed, size_t len);
 void btrfs_csum_final(u32 crc, char *result);
 void btrfs_csum_final(u32 crc, char *result);
-int btrfs_open_device(struct btrfs_device *dev);
-int btrfs_verify_block_csum(struct btrfs_root *root,
-			    struct extent_buffer *buf);
 int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
 int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
 			int metadata);
 			int metadata);
 int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
 int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
@@ -92,8 +76,6 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
 			unsigned long bio_flags, u64 bio_offset,
 			unsigned long bio_flags, u64 bio_offset,
 			extent_submit_bio_hook_t *submit_bio_start,
 			extent_submit_bio_hook_t *submit_bio_start,
 			extent_submit_bio_hook_t *submit_bio_done);
 			extent_submit_bio_hook_t *submit_bio_done);
-
-int btrfs_congested_async(struct btrfs_fs_info *info, int iodone);
 unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info);
 unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info);
 int btrfs_write_tree_block(struct extent_buffer *buf);
 int btrfs_write_tree_block(struct extent_buffer *buf);
 int btrfs_wait_tree_block_writeback(struct extent_buffer *buf);
 int btrfs_wait_tree_block_writeback(struct extent_buffer *buf);

文件差異過大導致無法顯示
+ 35 - 1682
fs/btrfs/extent-tree.c


+ 19 - 248
fs/btrfs/extent_io.c

@@ -101,7 +101,7 @@ void extent_io_exit(void)
 }
 }
 
 
 void extent_io_tree_init(struct extent_io_tree *tree,
 void extent_io_tree_init(struct extent_io_tree *tree,
-			  struct address_space *mapping, gfp_t mask)
+			 struct address_space *mapping)
 {
 {
 	tree->state = RB_ROOT;
 	tree->state = RB_ROOT;
 	INIT_RADIX_TREE(&tree->buffer, GFP_ATOMIC);
 	INIT_RADIX_TREE(&tree->buffer, GFP_ATOMIC);
@@ -941,13 +941,6 @@ int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
 			      NULL, mask);
 			      NULL, mask);
 }
 }
 
 
-static int clear_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
-		       gfp_t mask)
-{
-	return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0,
-				NULL, mask);
-}
-
 int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
 int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
 			struct extent_state **cached_state, gfp_t mask)
 			struct extent_state **cached_state, gfp_t mask)
 {
 {
@@ -963,11 +956,6 @@ static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
 				cached_state, mask);
 				cached_state, mask);
 }
 }
 
 
-int wait_on_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end)
-{
-	return wait_extent_bit(tree, start, end, EXTENT_WRITEBACK);
-}
-
 /*
 /*
  * either insert or lock state struct between start and end use mask to tell
  * either insert or lock state struct between start and end use mask to tell
  * us if waiting is desired.
  * us if waiting is desired.
@@ -1027,25 +1015,6 @@ int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
 				mask);
 				mask);
 }
 }
 
 
-/*
- * helper function to set pages and extents in the tree dirty
- */
-int set_range_dirty(struct extent_io_tree *tree, u64 start, u64 end)
-{
-	unsigned long index = start >> PAGE_CACHE_SHIFT;
-	unsigned long end_index = end >> PAGE_CACHE_SHIFT;
-	struct page *page;
-
-	while (index <= end_index) {
-		page = find_get_page(tree->mapping, index);
-		BUG_ON(!page);
-		__set_page_dirty_nobuffers(page);
-		page_cache_release(page);
-		index++;
-	}
-	return 0;
-}
-
 /*
 /*
  * helper function to set both pages and extents in the tree writeback
  * helper function to set both pages and extents in the tree writeback
  */
  */
@@ -1819,46 +1788,6 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
 	bio_put(bio);
 	bio_put(bio);
 }
 }
 
 
-/*
- * IO done from prepare_write is pretty simple, we just unlock
- * the structs in the extent tree when done, and set the uptodate bits
- * as appropriate.
- */
-static void end_bio_extent_preparewrite(struct bio *bio, int err)
-{
-	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
-	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
-	struct extent_io_tree *tree;
-	u64 start;
-	u64 end;
-
-	do {
-		struct page *page = bvec->bv_page;
-		struct extent_state *cached = NULL;
-		tree = &BTRFS_I(page->mapping->host)->io_tree;
-
-		start = ((u64)page->index << PAGE_CACHE_SHIFT) +
-			bvec->bv_offset;
-		end = start + bvec->bv_len - 1;
-
-		if (--bvec >= bio->bi_io_vec)
-			prefetchw(&bvec->bv_page->flags);
-
-		if (uptodate) {
-			set_extent_uptodate(tree, start, end, &cached,
-					    GFP_ATOMIC);
-		} else {
-			ClearPageUptodate(page);
-			SetPageError(page);
-		}
-
-		unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC);
-
-	} while (bvec >= bio->bi_io_vec);
-
-	bio_put(bio);
-}
-
 struct bio *
 struct bio *
 btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
 btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
 		gfp_t gfp_flags)
 		gfp_t gfp_flags)
@@ -2007,7 +1936,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
 	struct btrfs_ordered_extent *ordered;
 	struct btrfs_ordered_extent *ordered;
 	int ret;
 	int ret;
 	int nr = 0;
 	int nr = 0;
-	size_t page_offset = 0;
+	size_t pg_offset = 0;
 	size_t iosize;
 	size_t iosize;
 	size_t disk_io_size;
 	size_t disk_io_size;
 	size_t blocksize = inode->i_sb->s_blocksize;
 	size_t blocksize = inode->i_sb->s_blocksize;
@@ -2043,9 +1972,9 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
 			char *userpage;
 			char *userpage;
 			struct extent_state *cached = NULL;
 			struct extent_state *cached = NULL;
 
 
-			iosize = PAGE_CACHE_SIZE - page_offset;
+			iosize = PAGE_CACHE_SIZE - pg_offset;
 			userpage = kmap_atomic(page, KM_USER0);
 			userpage = kmap_atomic(page, KM_USER0);
-			memset(userpage + page_offset, 0, iosize);
+			memset(userpage + pg_offset, 0, iosize);
 			flush_dcache_page(page);
 			flush_dcache_page(page);
 			kunmap_atomic(userpage, KM_USER0);
 			kunmap_atomic(userpage, KM_USER0);
 			set_extent_uptodate(tree, cur, cur + iosize - 1,
 			set_extent_uptodate(tree, cur, cur + iosize - 1,
@@ -2054,9 +1983,9 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
 					     &cached, GFP_NOFS);
 					     &cached, GFP_NOFS);
 			break;
 			break;
 		}
 		}
-		em = get_extent(inode, page, page_offset, cur,
+		em = get_extent(inode, page, pg_offset, cur,
 				end - cur + 1, 0);
 				end - cur + 1, 0);
-		if (IS_ERR(em) || !em) {
+		if (IS_ERR_OR_NULL(em)) {
 			SetPageError(page);
 			SetPageError(page);
 			unlock_extent(tree, cur, end, GFP_NOFS);
 			unlock_extent(tree, cur, end, GFP_NOFS);
 			break;
 			break;
@@ -2094,7 +2023,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
 			struct extent_state *cached = NULL;
 			struct extent_state *cached = NULL;
 
 
 			userpage = kmap_atomic(page, KM_USER0);
 			userpage = kmap_atomic(page, KM_USER0);
-			memset(userpage + page_offset, 0, iosize);
+			memset(userpage + pg_offset, 0, iosize);
 			flush_dcache_page(page);
 			flush_dcache_page(page);
 			kunmap_atomic(userpage, KM_USER0);
 			kunmap_atomic(userpage, KM_USER0);
 
 
@@ -2103,7 +2032,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
 			unlock_extent_cached(tree, cur, cur + iosize - 1,
 			unlock_extent_cached(tree, cur, cur + iosize - 1,
 			                     &cached, GFP_NOFS);
 			                     &cached, GFP_NOFS);
 			cur = cur + iosize;
 			cur = cur + iosize;
-			page_offset += iosize;
+			pg_offset += iosize;
 			continue;
 			continue;
 		}
 		}
 		/* the get_extent function already copied into the page */
 		/* the get_extent function already copied into the page */
@@ -2112,7 +2041,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
 			check_page_uptodate(tree, page);
 			check_page_uptodate(tree, page);
 			unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
 			unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
 			cur = cur + iosize;
 			cur = cur + iosize;
-			page_offset += iosize;
+			pg_offset += iosize;
 			continue;
 			continue;
 		}
 		}
 		/* we have an inline extent but it didn't get marked up
 		/* we have an inline extent but it didn't get marked up
@@ -2122,7 +2051,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
 			SetPageError(page);
 			SetPageError(page);
 			unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
 			unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
 			cur = cur + iosize;
 			cur = cur + iosize;
-			page_offset += iosize;
+			pg_offset += iosize;
 			continue;
 			continue;
 		}
 		}
 
 
@@ -2135,7 +2064,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
 			unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
 			unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
 			pnr -= page->index;
 			pnr -= page->index;
 			ret = submit_extent_page(READ, tree, page,
 			ret = submit_extent_page(READ, tree, page,
-					 sector, disk_io_size, page_offset,
+					 sector, disk_io_size, pg_offset,
 					 bdev, bio, pnr,
 					 bdev, bio, pnr,
 					 end_bio_extent_readpage, mirror_num,
 					 end_bio_extent_readpage, mirror_num,
 					 *bio_flags,
 					 *bio_flags,
@@ -2146,7 +2075,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
 		if (ret)
 		if (ret)
 			SetPageError(page);
 			SetPageError(page);
 		cur = cur + iosize;
 		cur = cur + iosize;
-		page_offset += iosize;
+		pg_offset += iosize;
 	}
 	}
 	if (!nr) {
 	if (!nr) {
 		if (!PageError(page))
 		if (!PageError(page))
@@ -2341,7 +2270,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
 		}
 		}
 		em = epd->get_extent(inode, page, pg_offset, cur,
 		em = epd->get_extent(inode, page, pg_offset, cur,
 				     end - cur + 1, 1);
 				     end - cur + 1, 1);
-		if (IS_ERR(em) || !em) {
+		if (IS_ERR_OR_NULL(em)) {
 			SetPageError(page);
 			SetPageError(page);
 			break;
 			break;
 		}
 		}
@@ -2719,128 +2648,6 @@ int extent_invalidatepage(struct extent_io_tree *tree,
 	return 0;
 	return 0;
 }
 }
 
 
-/*
- * simple commit_write call, set_range_dirty is used to mark both
- * the pages and the extent records as dirty
- */
-int extent_commit_write(struct extent_io_tree *tree,
-			struct inode *inode, struct page *page,
-			unsigned from, unsigned to)
-{
-	loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
-
-	set_page_extent_mapped(page);
-	set_page_dirty(page);
-
-	if (pos > inode->i_size) {
-		i_size_write(inode, pos);
-		mark_inode_dirty(inode);
-	}
-	return 0;
-}
-
-int extent_prepare_write(struct extent_io_tree *tree,
-			 struct inode *inode, struct page *page,
-			 unsigned from, unsigned to, get_extent_t *get_extent)
-{
-	u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
-	u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
-	u64 block_start;
-	u64 orig_block_start;
-	u64 block_end;
-	u64 cur_end;
-	struct extent_map *em;
-	unsigned blocksize = 1 << inode->i_blkbits;
-	size_t page_offset = 0;
-	size_t block_off_start;
-	size_t block_off_end;
-	int err = 0;
-	int iocount = 0;
-	int ret = 0;
-	int isnew;
-
-	set_page_extent_mapped(page);
-
-	block_start = (page_start + from) & ~((u64)blocksize - 1);
-	block_end = (page_start + to - 1) | (blocksize - 1);
-	orig_block_start = block_start;
-
-	lock_extent(tree, page_start, page_end, GFP_NOFS);
-	while (block_start <= block_end) {
-		em = get_extent(inode, page, page_offset, block_start,
-				block_end - block_start + 1, 1);
-		if (IS_ERR(em) || !em)
-			goto err;
-
-		cur_end = min(block_end, extent_map_end(em) - 1);
-		block_off_start = block_start & (PAGE_CACHE_SIZE - 1);
-		block_off_end = block_off_start + blocksize;
-		isnew = clear_extent_new(tree, block_start, cur_end, GFP_NOFS);
-
-		if (!PageUptodate(page) && isnew &&
-		    (block_off_end > to || block_off_start < from)) {
-			void *kaddr;
-
-			kaddr = kmap_atomic(page, KM_USER0);
-			if (block_off_end > to)
-				memset(kaddr + to, 0, block_off_end - to);
-			if (block_off_start < from)
-				memset(kaddr + block_off_start, 0,
-				       from - block_off_start);
-			flush_dcache_page(page);
-			kunmap_atomic(kaddr, KM_USER0);
-		}
-		if ((em->block_start != EXTENT_MAP_HOLE &&
-		     em->block_start != EXTENT_MAP_INLINE) &&
-		    !isnew && !PageUptodate(page) &&
-		    (block_off_end > to || block_off_start < from) &&
-		    !test_range_bit(tree, block_start, cur_end,
-				    EXTENT_UPTODATE, 1, NULL)) {
-			u64 sector;
-			u64 extent_offset = block_start - em->start;
-			size_t iosize;
-			sector = (em->block_start + extent_offset) >> 9;
-			iosize = (cur_end - block_start + blocksize) &
-				~((u64)blocksize - 1);
-			/*
-			 * we've already got the extent locked, but we
-			 * need to split the state such that our end_bio
-			 * handler can clear the lock.
-			 */
-			set_extent_bit(tree, block_start,
-				       block_start + iosize - 1,
-				       EXTENT_LOCKED, 0, NULL, NULL, GFP_NOFS);
-			ret = submit_extent_page(READ, tree, page,
-					 sector, iosize, page_offset, em->bdev,
-					 NULL, 1,
-					 end_bio_extent_preparewrite, 0,
-					 0, 0);
-			if (ret && !err)
-				err = ret;
-			iocount++;
-			block_start = block_start + iosize;
-		} else {
-			struct extent_state *cached = NULL;
-
-			set_extent_uptodate(tree, block_start, cur_end, &cached,
-					    GFP_NOFS);
-			unlock_extent_cached(tree, block_start, cur_end,
-					     &cached, GFP_NOFS);
-			block_start = cur_end + 1;
-		}
-		page_offset = block_start & (PAGE_CACHE_SIZE - 1);
-		free_extent_map(em);
-	}
-	if (iocount) {
-		wait_extent_bit(tree, orig_block_start,
-				block_end, EXTENT_LOCKED);
-	}
-	check_page_uptodate(tree, page);
-err:
-	/* FIXME, zero out newly allocated blocks on error */
-	return err;
-}
-
 /*
 /*
  * a helper for releasepage, this tests for areas of the page that
  * a helper for releasepage, this tests for areas of the page that
  * are locked or under IO and drops the related state bits if it is safe
  * are locked or under IO and drops the related state bits if it is safe
@@ -2899,7 +2706,7 @@ int try_release_extent_mapping(struct extent_map_tree *map,
 			len = end - start + 1;
 			len = end - start + 1;
 			write_lock(&map->lock);
 			write_lock(&map->lock);
 			em = lookup_extent_mapping(map, start, len);
 			em = lookup_extent_mapping(map, start, len);
-			if (!em || IS_ERR(em)) {
+			if (IS_ERR_OR_NULL(em)) {
 				write_unlock(&map->lock);
 				write_unlock(&map->lock);
 				break;
 				break;
 			}
 			}
@@ -2927,33 +2734,6 @@ int try_release_extent_mapping(struct extent_map_tree *map,
 	return try_release_extent_state(map, tree, page, mask);
 	return try_release_extent_state(map, tree, page, mask);
 }
 }
 
 
-sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
-		get_extent_t *get_extent)
-{
-	struct inode *inode = mapping->host;
-	struct extent_state *cached_state = NULL;
-	u64 start = iblock << inode->i_blkbits;
-	sector_t sector = 0;
-	size_t blksize = (1 << inode->i_blkbits);
-	struct extent_map *em;
-
-	lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + blksize - 1,
-			 0, &cached_state, GFP_NOFS);
-	em = get_extent(inode, NULL, 0, start, blksize, 0);
-	unlock_extent_cached(&BTRFS_I(inode)->io_tree, start,
-			     start + blksize - 1, &cached_state, GFP_NOFS);
-	if (!em || IS_ERR(em))
-		return 0;
-
-	if (em->block_start > EXTENT_MAP_LAST_BYTE)
-		goto out;
-
-	sector = (em->block_start + start - em->start) >> inode->i_blkbits;
-out:
-	free_extent_map(em);
-	return sector;
-}
-
 /*
 /*
  * helper function for fiemap, which doesn't want to see any holes.
  * helper function for fiemap, which doesn't want to see any holes.
  * This maps until we find something past 'last'
  * This maps until we find something past 'last'
@@ -2976,7 +2756,7 @@ static struct extent_map *get_extent_skip_holes(struct inode *inode,
 			break;
 			break;
 		len = (len + sectorsize - 1) & ~(sectorsize - 1);
 		len = (len + sectorsize - 1) & ~(sectorsize - 1);
 		em = get_extent(inode, NULL, 0, offset, len, 0);
 		em = get_extent(inode, NULL, 0, offset, len, 0);
-		if (!em || IS_ERR(em))
+		if (IS_ERR_OR_NULL(em))
 			return em;
 			return em;
 
 
 		/* if this isn't a hole return it */
 		/* if this isn't a hole return it */
@@ -3266,8 +3046,7 @@ static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
 
 
 struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
 struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
 					  u64 start, unsigned long len,
 					  u64 start, unsigned long len,
-					  struct page *page0,
-					  gfp_t mask)
+					  struct page *page0)
 {
 {
 	unsigned long num_pages = num_extent_pages(start, len);
 	unsigned long num_pages = num_extent_pages(start, len);
 	unsigned long i;
 	unsigned long i;
@@ -3288,7 +3067,7 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
 	}
 	}
 	rcu_read_unlock();
 	rcu_read_unlock();
 
 
-	eb = __alloc_extent_buffer(tree, start, len, mask);
+	eb = __alloc_extent_buffer(tree, start, len, GFP_NOFS);
 	if (!eb)
 	if (!eb)
 		return NULL;
 		return NULL;
 
 
@@ -3305,7 +3084,7 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
 		i = 0;
 		i = 0;
 	}
 	}
 	for (; i < num_pages; i++, index++) {
 	for (; i < num_pages; i++, index++) {
-		p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM);
+		p = find_or_create_page(mapping, index, GFP_NOFS | __GFP_HIGHMEM);
 		if (!p) {
 		if (!p) {
 			WARN_ON(1);
 			WARN_ON(1);
 			goto free_eb;
 			goto free_eb;
@@ -3377,8 +3156,7 @@ free_eb:
 }
 }
 
 
 struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
 struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
-					 u64 start, unsigned long len,
-					  gfp_t mask)
+					 u64 start, unsigned long len)
 {
 {
 	struct extent_buffer *eb;
 	struct extent_buffer *eb;
 
 
@@ -3439,13 +3217,6 @@ int clear_extent_buffer_dirty(struct extent_io_tree *tree,
 	return 0;
 	return 0;
 }
 }
 
 
-int wait_on_extent_buffer_writeback(struct extent_io_tree *tree,
-				    struct extent_buffer *eb)
-{
-	return wait_on_extent_writeback(tree, eb->start,
-					eb->start + eb->len - 1);
-}
-
 int set_extent_buffer_dirty(struct extent_io_tree *tree,
 int set_extent_buffer_dirty(struct extent_io_tree *tree,
 			     struct extent_buffer *eb)
 			     struct extent_buffer *eb)
 {
 {

+ 4 - 36
fs/btrfs/extent_io.h

@@ -153,23 +153,14 @@ static inline int extent_compress_type(unsigned long bio_flags)
 
 
 struct extent_map_tree;
 struct extent_map_tree;
 
 
-static inline struct extent_state *extent_state_next(struct extent_state *state)
-{
-	struct rb_node *node;
-	node = rb_next(&state->rb_node);
-	if (!node)
-		return NULL;
-	return rb_entry(node, struct extent_state, rb_node);
-}
-
 typedef struct extent_map *(get_extent_t)(struct inode *inode,
 typedef struct extent_map *(get_extent_t)(struct inode *inode,
 					  struct page *page,
 					  struct page *page,
-					  size_t page_offset,
+					  size_t pg_offset,
 					  u64 start, u64 len,
 					  u64 start, u64 len,
 					  int create);
 					  int create);
 
 
 void extent_io_tree_init(struct extent_io_tree *tree,
 void extent_io_tree_init(struct extent_io_tree *tree,
-			  struct address_space *mapping, gfp_t mask);
+			 struct address_space *mapping);
 int try_release_extent_mapping(struct extent_map_tree *map,
 int try_release_extent_mapping(struct extent_map_tree *map,
 			       struct extent_io_tree *tree, struct page *page,
 			       struct extent_io_tree *tree, struct page *page,
 			       gfp_t mask);
 			       gfp_t mask);
@@ -215,14 +206,8 @@ int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
 		     gfp_t mask);
 		     gfp_t mask);
 int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
 int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
 		       gfp_t mask);
 		       gfp_t mask);
-int clear_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end,
-		       gfp_t mask);
-int clear_extent_ordered_metadata(struct extent_io_tree *tree, u64 start,
-				  u64 end, gfp_t mask);
 int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
 int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
 			struct extent_state **cached_state, gfp_t mask);
 			struct extent_state **cached_state, gfp_t mask);
-int set_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end,
-		     gfp_t mask);
 int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
 int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
 			  u64 *start_ret, u64 *end_ret, int bits);
 			  u64 *start_ret, u64 *end_ret, int bits);
 struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree,
 struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree,
@@ -243,28 +228,17 @@ int extent_readpages(struct extent_io_tree *tree,
 		     struct address_space *mapping,
 		     struct address_space *mapping,
 		     struct list_head *pages, unsigned nr_pages,
 		     struct list_head *pages, unsigned nr_pages,
 		     get_extent_t get_extent);
 		     get_extent_t get_extent);
-int extent_prepare_write(struct extent_io_tree *tree,
-			 struct inode *inode, struct page *page,
-			 unsigned from, unsigned to, get_extent_t *get_extent);
-int extent_commit_write(struct extent_io_tree *tree,
-			struct inode *inode, struct page *page,
-			unsigned from, unsigned to);
-sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
-		get_extent_t *get_extent);
 int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
 int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
 		__u64 start, __u64 len, get_extent_t *get_extent);
 		__u64 start, __u64 len, get_extent_t *get_extent);
-int set_range_dirty(struct extent_io_tree *tree, u64 start, u64 end);
 int set_state_private(struct extent_io_tree *tree, u64 start, u64 private);
 int set_state_private(struct extent_io_tree *tree, u64 start, u64 private);
 int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private);
 int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private);
 void set_page_extent_mapped(struct page *page);
 void set_page_extent_mapped(struct page *page);
 
 
 struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
 struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
 					  u64 start, unsigned long len,
 					  u64 start, unsigned long len,
-					  struct page *page0,
-					  gfp_t mask);
+					  struct page *page0);
 struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
 struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
-					 u64 start, unsigned long len,
-					  gfp_t mask);
+					 u64 start, unsigned long len);
 void free_extent_buffer(struct extent_buffer *eb);
 void free_extent_buffer(struct extent_buffer *eb);
 int read_extent_buffer_pages(struct extent_io_tree *tree,
 int read_extent_buffer_pages(struct extent_io_tree *tree,
 			     struct extent_buffer *eb, u64 start, int wait,
 			     struct extent_buffer *eb, u64 start, int wait,
@@ -292,16 +266,11 @@ void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
 			   unsigned long src_offset, unsigned long len);
 			   unsigned long src_offset, unsigned long len);
 void memset_extent_buffer(struct extent_buffer *eb, char c,
 void memset_extent_buffer(struct extent_buffer *eb, char c,
 			  unsigned long start, unsigned long len);
 			  unsigned long start, unsigned long len);
-int wait_on_extent_buffer_writeback(struct extent_io_tree *tree,
-				    struct extent_buffer *eb);
-int wait_on_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end);
 int wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits);
 int wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits);
 int clear_extent_buffer_dirty(struct extent_io_tree *tree,
 int clear_extent_buffer_dirty(struct extent_io_tree *tree,
 			      struct extent_buffer *eb);
 			      struct extent_buffer *eb);
 int set_extent_buffer_dirty(struct extent_io_tree *tree,
 int set_extent_buffer_dirty(struct extent_io_tree *tree,
 			     struct extent_buffer *eb);
 			     struct extent_buffer *eb);
-int test_extent_buffer_dirty(struct extent_io_tree *tree,
-			     struct extent_buffer *eb);
 int set_extent_buffer_uptodate(struct extent_io_tree *tree,
 int set_extent_buffer_uptodate(struct extent_io_tree *tree,
 			       struct extent_buffer *eb);
 			       struct extent_buffer *eb);
 int clear_extent_buffer_uptodate(struct extent_io_tree *tree,
 int clear_extent_buffer_uptodate(struct extent_io_tree *tree,
@@ -319,7 +288,6 @@ int map_private_extent_buffer(struct extent_buffer *eb, unsigned long offset,
 		      unsigned long *map_start,
 		      unsigned long *map_start,
 		      unsigned long *map_len, int km);
 		      unsigned long *map_len, int km);
 void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km);
 void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km);
-int release_extent_buffer_tail_pages(struct extent_buffer *eb);
 int extent_range_uptodate(struct extent_io_tree *tree,
 int extent_range_uptodate(struct extent_io_tree *tree,
 			  u64 start, u64 end);
 			  u64 start, u64 end);
 int extent_clear_unlock_delalloc(struct inode *inode,
 int extent_clear_unlock_delalloc(struct inode *inode,

+ 3 - 5
fs/btrfs/extent_map.c

@@ -28,12 +28,11 @@ void extent_map_exit(void)
 /**
 /**
  * extent_map_tree_init - initialize extent map tree
  * extent_map_tree_init - initialize extent map tree
  * @tree:		tree to initialize
  * @tree:		tree to initialize
- * @mask:		flags for memory allocations during tree operations
  *
  *
  * Initialize the extent tree @tree.  Should be called for each new inode
  * Initialize the extent tree @tree.  Should be called for each new inode
  * or other user of the extent_map interface.
  * or other user of the extent_map interface.
  */
  */
-void extent_map_tree_init(struct extent_map_tree *tree, gfp_t mask)
+void extent_map_tree_init(struct extent_map_tree *tree)
 {
 {
 	tree->map = RB_ROOT;
 	tree->map = RB_ROOT;
 	rwlock_init(&tree->lock);
 	rwlock_init(&tree->lock);
@@ -41,16 +40,15 @@ void extent_map_tree_init(struct extent_map_tree *tree, gfp_t mask)
 
 
 /**
 /**
  * alloc_extent_map - allocate new extent map structure
  * alloc_extent_map - allocate new extent map structure
- * @mask:	memory allocation flags
  *
  *
  * Allocate a new extent_map structure.  The new structure is
  * Allocate a new extent_map structure.  The new structure is
  * returned with a reference count of one and needs to be
  * returned with a reference count of one and needs to be
  * freed using free_extent_map()
  * freed using free_extent_map()
  */
  */
-struct extent_map *alloc_extent_map(gfp_t mask)
+struct extent_map *alloc_extent_map(void)
 {
 {
 	struct extent_map *em;
 	struct extent_map *em;
-	em = kmem_cache_alloc(extent_map_cache, mask);
+	em = kmem_cache_alloc(extent_map_cache, GFP_NOFS);
 	if (!em)
 	if (!em)
 		return NULL;
 		return NULL;
 	em->in_tree = 0;
 	em->in_tree = 0;

+ 2 - 2
fs/btrfs/extent_map.h

@@ -49,14 +49,14 @@ static inline u64 extent_map_block_end(struct extent_map *em)
 	return em->block_start + em->block_len;
 	return em->block_start + em->block_len;
 }
 }
 
 
-void extent_map_tree_init(struct extent_map_tree *tree, gfp_t mask);
+void extent_map_tree_init(struct extent_map_tree *tree);
 struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
 struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
 					 u64 start, u64 len);
 					 u64 start, u64 len);
 int add_extent_mapping(struct extent_map_tree *tree,
 int add_extent_mapping(struct extent_map_tree *tree,
 		       struct extent_map *em);
 		       struct extent_map *em);
 int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em);
 int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em);
 
 
-struct extent_map *alloc_extent_map(gfp_t mask);
+struct extent_map *alloc_extent_map(void);
 void free_extent_map(struct extent_map *em);
 void free_extent_map(struct extent_map *em);
 int __init extent_map_init(void);
 int __init extent_map_init(void);
 void extent_map_exit(void);
 void extent_map_exit(void);

+ 6 - 6
fs/btrfs/file-item.c

@@ -193,7 +193,7 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
 			u32 item_size;
 			u32 item_size;
 
 
 			if (item)
 			if (item)
-				btrfs_release_path(root, path);
+				btrfs_release_path(path);
 			item = btrfs_lookup_csum(NULL, root->fs_info->csum_root,
 			item = btrfs_lookup_csum(NULL, root->fs_info->csum_root,
 						 path, disk_bytenr, 0);
 						 path, disk_bytenr, 0);
 			if (IS_ERR(item)) {
 			if (IS_ERR(item)) {
@@ -214,7 +214,7 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
 					       (unsigned long long)offset);
 					       (unsigned long long)offset);
 				}
 				}
 				item = NULL;
 				item = NULL;
-				btrfs_release_path(root, path);
+				btrfs_release_path(path);
 				goto found;
 				goto found;
 			}
 			}
 			btrfs_item_key_to_cpu(path->nodes[0], &found_key,
 			btrfs_item_key_to_cpu(path->nodes[0], &found_key,
@@ -632,7 +632,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
 			if (key.offset < bytenr)
 			if (key.offset < bytenr)
 				break;
 				break;
 		}
 		}
-		btrfs_release_path(root, path);
+		btrfs_release_path(path);
 	}
 	}
 out:
 out:
 	btrfs_free_path(path);
 	btrfs_free_path(path);
@@ -723,7 +723,7 @@ again:
 	 * at this point, we know the tree has an item, but it isn't big
 	 * at this point, we know the tree has an item, but it isn't big
 	 * enough yet to put our csum in.  Grow it
 	 * enough yet to put our csum in.  Grow it
 	 */
 	 */
-	btrfs_release_path(root, path);
+	btrfs_release_path(path);
 	ret = btrfs_search_slot(trans, root, &file_key, path,
 	ret = btrfs_search_slot(trans, root, &file_key, path,
 				csum_size, 1);
 				csum_size, 1);
 	if (ret < 0)
 	if (ret < 0)
@@ -767,7 +767,7 @@ again:
 	}
 	}
 
 
 insert:
 insert:
-	btrfs_release_path(root, path);
+	btrfs_release_path(path);
 	csum_offset = 0;
 	csum_offset = 0;
 	if (found_next) {
 	if (found_next) {
 		u64 tmp = total_bytes + root->sectorsize;
 		u64 tmp = total_bytes + root->sectorsize;
@@ -851,7 +851,7 @@ next_sector:
 	}
 	}
 	btrfs_mark_buffer_dirty(path->nodes[0]);
 	btrfs_mark_buffer_dirty(path->nodes[0]);
 	if (total_bytes < sums->len) {
 	if (total_bytes < sums->len) {
-		btrfs_release_path(root, path);
+		btrfs_release_path(path);
 		cond_resched();
 		cond_resched();
 		goto again;
 		goto again;
 	}
 	}

+ 9 - 9
fs/btrfs/file.c

@@ -191,9 +191,9 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
 	}
 	}
 	while (1) {
 	while (1) {
 		if (!split)
 		if (!split)
-			split = alloc_extent_map(GFP_NOFS);
+			split = alloc_extent_map();
 		if (!split2)
 		if (!split2)
-			split2 = alloc_extent_map(GFP_NOFS);
+			split2 = alloc_extent_map();
 		BUG_ON(!split || !split2);
 		BUG_ON(!split || !split2);
 
 
 		write_lock(&em_tree->lock);
 		write_lock(&em_tree->lock);
@@ -377,7 +377,7 @@ next_slot:
 
 
 		search_start = max(key.offset, start);
 		search_start = max(key.offset, start);
 		if (recow) {
 		if (recow) {
-			btrfs_release_path(root, path);
+			btrfs_release_path(path);
 			continue;
 			continue;
 		}
 		}
 
 
@@ -394,7 +394,7 @@ next_slot:
 			ret = btrfs_duplicate_item(trans, root, path,
 			ret = btrfs_duplicate_item(trans, root, path,
 						   &new_key);
 						   &new_key);
 			if (ret == -EAGAIN) {
 			if (ret == -EAGAIN) {
-				btrfs_release_path(root, path);
+				btrfs_release_path(path);
 				continue;
 				continue;
 			}
 			}
 			if (ret < 0)
 			if (ret < 0)
@@ -517,7 +517,7 @@ next_slot:
 			del_nr = 0;
 			del_nr = 0;
 			del_slot = 0;
 			del_slot = 0;
 
 
-			btrfs_release_path(root, path);
+			btrfs_release_path(path);
 			continue;
 			continue;
 		}
 		}
 
 
@@ -682,7 +682,7 @@ again:
 		new_key.offset = split;
 		new_key.offset = split;
 		ret = btrfs_duplicate_item(trans, root, path, &new_key);
 		ret = btrfs_duplicate_item(trans, root, path, &new_key);
 		if (ret == -EAGAIN) {
 		if (ret == -EAGAIN) {
-			btrfs_release_path(root, path);
+			btrfs_release_path(path);
 			goto again;
 			goto again;
 		}
 		}
 		BUG_ON(ret < 0);
 		BUG_ON(ret < 0);
@@ -722,7 +722,7 @@ again:
 			     ino, bytenr, orig_offset,
 			     ino, bytenr, orig_offset,
 			     &other_start, &other_end)) {
 			     &other_start, &other_end)) {
 		if (recow) {
 		if (recow) {
-			btrfs_release_path(root, path);
+			btrfs_release_path(path);
 			goto again;
 			goto again;
 		}
 		}
 		extent_end = other_end;
 		extent_end = other_end;
@@ -739,7 +739,7 @@ again:
 			     ino, bytenr, orig_offset,
 			     ino, bytenr, orig_offset,
 			     &other_start, &other_end)) {
 			     &other_start, &other_end)) {
 		if (recow) {
 		if (recow) {
-			btrfs_release_path(root, path);
+			btrfs_release_path(path);
 			goto again;
 			goto again;
 		}
 		}
 		key.offset = other_start;
 		key.offset = other_start;
@@ -1376,7 +1376,7 @@ static long btrfs_fallocate(struct file *file, int mode,
 	while (1) {
 	while (1) {
 		em = btrfs_get_extent(inode, NULL, 0, cur_offset,
 		em = btrfs_get_extent(inode, NULL, 0, cur_offset,
 				      alloc_end - cur_offset, 0);
 				      alloc_end - cur_offset, 0);
-		BUG_ON(IS_ERR(em) || !em);
+		BUG_ON(IS_ERR_OR_NULL(em));
 		last_byte = min(extent_map_end(em), alloc_end);
 		last_byte = min(extent_map_end(em), alloc_end);
 		last_byte = (last_byte + mask) & ~mask;
 		last_byte = (last_byte + mask) & ~mask;
 		if (em->block_start == EXTENT_MAP_HOLE ||
 		if (em->block_start == EXTENT_MAP_HOLE ||

+ 10 - 13
fs/btrfs/free-space-cache.c

@@ -53,7 +53,7 @@ static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
 	if (ret < 0)
 	if (ret < 0)
 		return ERR_PTR(ret);
 		return ERR_PTR(ret);
 	if (ret > 0) {
 	if (ret > 0) {
-		btrfs_release_path(root, path);
+		btrfs_release_path(path);
 		return ERR_PTR(-ENOENT);
 		return ERR_PTR(-ENOENT);
 	}
 	}
 
 
@@ -62,7 +62,7 @@ static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
 				struct btrfs_free_space_header);
 				struct btrfs_free_space_header);
 	btrfs_free_space_key(leaf, header, &disk_key);
 	btrfs_free_space_key(leaf, header, &disk_key);
 	btrfs_disk_key_to_cpu(&location, &disk_key);
 	btrfs_disk_key_to_cpu(&location, &disk_key);
-	btrfs_release_path(root, path);
+	btrfs_release_path(path);
 
 
 	inode = btrfs_iget(root->fs_info->sb, &location, root, NULL);
 	inode = btrfs_iget(root->fs_info->sb, &location, root, NULL);
 	if (!inode)
 	if (!inode)
@@ -140,7 +140,7 @@ int __create_free_space_inode(struct btrfs_root *root,
 	btrfs_set_inode_transid(leaf, inode_item, trans->transid);
 	btrfs_set_inode_transid(leaf, inode_item, trans->transid);
 	btrfs_set_inode_block_group(leaf, inode_item, offset);
 	btrfs_set_inode_block_group(leaf, inode_item, offset);
 	btrfs_mark_buffer_dirty(leaf);
 	btrfs_mark_buffer_dirty(leaf);
-	btrfs_release_path(root, path);
+	btrfs_release_path(path);
 
 
 	key.objectid = BTRFS_FREE_SPACE_OBJECTID;
 	key.objectid = BTRFS_FREE_SPACE_OBJECTID;
 	key.offset = offset;
 	key.offset = offset;
@@ -149,7 +149,7 @@ int __create_free_space_inode(struct btrfs_root *root,
 	ret = btrfs_insert_empty_item(trans, root, path, &key,
 	ret = btrfs_insert_empty_item(trans, root, path, &key,
 				      sizeof(struct btrfs_free_space_header));
 				      sizeof(struct btrfs_free_space_header));
 	if (ret < 0) {
 	if (ret < 0) {
-		btrfs_release_path(root, path);
+		btrfs_release_path(path);
 		return ret;
 		return ret;
 	}
 	}
 	leaf = path->nodes[0];
 	leaf = path->nodes[0];
@@ -158,7 +158,7 @@ int __create_free_space_inode(struct btrfs_root *root,
 	memset_extent_buffer(leaf, 0, (unsigned long)header, sizeof(*header));
 	memset_extent_buffer(leaf, 0, (unsigned long)header, sizeof(*header));
 	btrfs_set_free_space_key(leaf, header, &disk_key);
 	btrfs_set_free_space_key(leaf, header, &disk_key);
 	btrfs_mark_buffer_dirty(leaf);
 	btrfs_mark_buffer_dirty(leaf);
-	btrfs_release_path(root, path);
+	btrfs_release_path(path);
 
 
 	return 0;
 	return 0;
 }
 }
@@ -266,7 +266,7 @@ int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
 	if (ret < 0)
 	if (ret < 0)
 		goto out;
 		goto out;
 	else if (ret > 0) {
 	else if (ret > 0) {
-		btrfs_release_path(root, path);
+		btrfs_release_path(path);
 		ret = 0;
 		ret = 0;
 		goto out;
 		goto out;
 	}
 	}
@@ -279,7 +279,7 @@ int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
 	num_entries = btrfs_free_space_entries(leaf, header);
 	num_entries = btrfs_free_space_entries(leaf, header);
 	num_bitmaps = btrfs_free_space_bitmaps(leaf, header);
 	num_bitmaps = btrfs_free_space_bitmaps(leaf, header);
 	generation = btrfs_free_space_generation(leaf, header);
 	generation = btrfs_free_space_generation(leaf, header);
-	btrfs_release_path(root, path);
+	btrfs_release_path(path);
 
 
 	if (BTRFS_I(inode)->generation != generation) {
 	if (BTRFS_I(inode)->generation != generation) {
 		printk(KERN_ERR "btrfs: free space inode generation (%llu) did"
 		printk(KERN_ERR "btrfs: free space inode generation (%llu) did"
@@ -842,7 +842,7 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
 					 EXTENT_DIRTY | EXTENT_DELALLOC |
 					 EXTENT_DIRTY | EXTENT_DELALLOC |
 					 EXTENT_DO_ACCOUNTING, 0, 0, NULL,
 					 EXTENT_DO_ACCOUNTING, 0, 0, NULL,
 					 GFP_NOFS);
 					 GFP_NOFS);
-			btrfs_release_path(root, path);
+			btrfs_release_path(path);
 			goto out_free;
 			goto out_free;
 		}
 		}
 	}
 	}
@@ -852,7 +852,7 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
 	btrfs_set_free_space_bitmaps(leaf, header, bitmaps);
 	btrfs_set_free_space_bitmaps(leaf, header, bitmaps);
 	btrfs_set_free_space_generation(leaf, header, trans->transid);
 	btrfs_set_free_space_generation(leaf, header, trans->transid);
 	btrfs_mark_buffer_dirty(leaf);
 	btrfs_mark_buffer_dirty(leaf);
-	btrfs_release_path(root, path);
+	btrfs_release_path(path);
 
 
 	ret = 1;
 	ret = 1;
 
 
@@ -1504,7 +1504,7 @@ out:
 	return ret;
 	return ret;
 }
 }
 
 
-bool try_merge_free_space(struct btrfs_free_space_ctl *ctl,
+static bool try_merge_free_space(struct btrfs_free_space_ctl *ctl,
 			  struct btrfs_free_space *info, bool update_stat)
 			  struct btrfs_free_space *info, bool update_stat)
 {
 {
 	struct btrfs_free_space *left_info;
 	struct btrfs_free_space *left_info;
@@ -1984,8 +1984,6 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
 	while(1) {
 	while(1) {
 		if (entry->bytes < bytes ||
 		if (entry->bytes < bytes ||
 		    (!entry->bitmap && entry->offset < min_start)) {
 		    (!entry->bitmap && entry->offset < min_start)) {
-			struct rb_node *node;
-
 			node = rb_next(&entry->offset_index);
 			node = rb_next(&entry->offset_index);
 			if (!node)
 			if (!node)
 				break;
 				break;
@@ -1999,7 +1997,6 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
 						      cluster, entry, bytes,
 						      cluster, entry, bytes,
 						      min_start);
 						      min_start);
 			if (ret == 0) {
 			if (ret == 0) {
-				struct rb_node *node;
 				node = rb_next(&entry->offset_index);
 				node = rb_next(&entry->offset_index);
 				if (!node)
 				if (!node)
 					break;
 					break;

+ 1 - 1
fs/btrfs/inode-map.c

@@ -86,7 +86,7 @@ again:
 				 * in the next search.
 				 * in the next search.
 				 */
 				 */
 				btrfs_item_key_to_cpu(leaf, &key, 0);
 				btrfs_item_key_to_cpu(leaf, &key, 0);
-				btrfs_release_path(root, path);
+				btrfs_release_path(path);
 				root->cache_progress = last;
 				root->cache_progress = last;
 				mutex_unlock(&root->fs_commit_mutex);
 				mutex_unlock(&root->fs_commit_mutex);
 				schedule_timeout(1);
 				schedule_timeout(1);

+ 34 - 263
fs/btrfs/inode.c

@@ -37,6 +37,7 @@
 #include <linux/posix_acl.h>
 #include <linux/posix_acl.h>
 #include <linux/falloc.h>
 #include <linux/falloc.h>
 #include <linux/slab.h>
 #include <linux/slab.h>
+#include <linux/ratelimit.h>
 #include "compat.h"
 #include "compat.h"
 #include "ctree.h"
 #include "ctree.h"
 #include "disk-io.h"
 #include "disk-io.h"
@@ -650,7 +651,7 @@ retry:
 					async_extent->start +
 					async_extent->start +
 					async_extent->ram_size - 1, 0);
 					async_extent->ram_size - 1, 0);
 
 
-		em = alloc_extent_map(GFP_NOFS);
+		em = alloc_extent_map();
 		BUG_ON(!em);
 		BUG_ON(!em);
 		em->start = async_extent->start;
 		em->start = async_extent->start;
 		em->len = async_extent->ram_size;
 		em->len = async_extent->ram_size;
@@ -836,7 +837,7 @@ static noinline int cow_file_range(struct inode *inode,
 					   (u64)-1, &ins, 1);
 					   (u64)-1, &ins, 1);
 		BUG_ON(ret);
 		BUG_ON(ret);
 
 
-		em = alloc_extent_map(GFP_NOFS);
+		em = alloc_extent_map();
 		BUG_ON(!em);
 		BUG_ON(!em);
 		em->start = start;
 		em->start = start;
 		em->orig_start = em->start;
 		em->orig_start = em->start;
@@ -1176,7 +1177,7 @@ out_check:
 			goto next_slot;
 			goto next_slot;
 		}
 		}
 
 
-		btrfs_release_path(root, path);
+		btrfs_release_path(path);
 		if (cow_start != (u64)-1) {
 		if (cow_start != (u64)-1) {
 			ret = cow_file_range(inode, locked_page, cow_start,
 			ret = cow_file_range(inode, locked_page, cow_start,
 					found_key.offset - 1, page_started,
 					found_key.offset - 1, page_started,
@@ -1189,7 +1190,7 @@ out_check:
 			struct extent_map *em;
 			struct extent_map *em;
 			struct extent_map_tree *em_tree;
 			struct extent_map_tree *em_tree;
 			em_tree = &BTRFS_I(inode)->extent_tree;
 			em_tree = &BTRFS_I(inode)->extent_tree;
-			em = alloc_extent_map(GFP_NOFS);
+			em = alloc_extent_map();
 			BUG_ON(!em);
 			BUG_ON(!em);
 			em->start = cur_offset;
 			em->start = cur_offset;
 			em->orig_start = em->start;
 			em->orig_start = em->start;
@@ -1234,7 +1235,7 @@ out_check:
 		if (cur_offset > end)
 		if (cur_offset > end)
 			break;
 			break;
 	}
 	}
-	btrfs_release_path(root, path);
+	btrfs_release_path(path);
 
 
 	if (cur_offset <= end && cow_start == (u64)-1)
 	if (cur_offset <= end && cow_start == (u64)-1)
 		cow_start = cur_offset;
 		cow_start = cur_offset;
@@ -1865,7 +1866,7 @@ static int btrfs_io_failed_hook(struct bio *failed_bio,
 		}
 		}
 		read_unlock(&em_tree->lock);
 		read_unlock(&em_tree->lock);
 
 
-		if (!em || IS_ERR(em)) {
+		if (IS_ERR_OR_NULL(em)) {
 			kfree(failrec);
 			kfree(failrec);
 			return -EIO;
 			return -EIO;
 		}
 		}
@@ -2014,13 +2015,11 @@ good:
 	return 0;
 	return 0;
 
 
 zeroit:
 zeroit:
-	if (printk_ratelimit()) {
-		printk(KERN_INFO "btrfs csum failed ino %llu off %llu csum %u "
+	printk_ratelimited(KERN_INFO "btrfs csum failed ino %llu off %llu csum %u "
 		       "private %llu\n",
 		       "private %llu\n",
 		       (unsigned long long)btrfs_ino(page->mapping->host),
 		       (unsigned long long)btrfs_ino(page->mapping->host),
 		       (unsigned long long)start, csum,
 		       (unsigned long long)start, csum,
 		       (unsigned long long)private);
 		       (unsigned long long)private);
-	}
 	memset(kaddr + offset, 1, end - start + 1);
 	memset(kaddr + offset, 1, end - start + 1);
 	flush_dcache_page(page);
 	flush_dcache_page(page);
 	kunmap_atomic(kaddr, KM_USER0);
 	kunmap_atomic(kaddr, KM_USER0);
@@ -2357,7 +2356,7 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
 			break;
 			break;
 
 
 		/* release the path since we're done with it */
 		/* release the path since we're done with it */
-		btrfs_release_path(root, path);
+		btrfs_release_path(path);
 
 
 		/*
 		/*
 		 * this is where we are basically btrfs_lookup, without the
 		 * this is where we are basically btrfs_lookup, without the
@@ -2740,7 +2739,7 @@ static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
 	ret = btrfs_delete_one_dir_name(trans, root, path, di);
 	ret = btrfs_delete_one_dir_name(trans, root, path, di);
 	if (ret)
 	if (ret)
 		goto err;
 		goto err;
-	btrfs_release_path(root, path);
+	btrfs_release_path(path);
 
 
 	ret = btrfs_del_inode_ref(trans, root, name, name_len, ino,
 	ret = btrfs_del_inode_ref(trans, root, name, name_len, ino,
 				  dir_ino, &index);
 				  dir_ino, &index);
@@ -2882,7 +2881,7 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir,
 	} else {
 	} else {
 		check_link = 0;
 		check_link = 0;
 	}
 	}
-	btrfs_release_path(root, path);
+	btrfs_release_path(path);
 
 
 	ret = btrfs_lookup_inode(trans, root, path,
 	ret = btrfs_lookup_inode(trans, root, path,
 				&BTRFS_I(inode)->location, 0);
 				&BTRFS_I(inode)->location, 0);
@@ -2896,7 +2895,7 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir,
 	} else {
 	} else {
 		check_link = 0;
 		check_link = 0;
 	}
 	}
-	btrfs_release_path(root, path);
+	btrfs_release_path(path);
 
 
 	if (ret == 0 && S_ISREG(inode->i_mode)) {
 	if (ret == 0 && S_ISREG(inode->i_mode)) {
 		ret = btrfs_lookup_file_extent(trans, root, path,
 		ret = btrfs_lookup_file_extent(trans, root, path,
@@ -2908,7 +2907,7 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir,
 		BUG_ON(ret == 0);
 		BUG_ON(ret == 0);
 		if (check_path_shared(root, path))
 		if (check_path_shared(root, path))
 			goto out;
 			goto out;
-		btrfs_release_path(root, path);
+		btrfs_release_path(path);
 	}
 	}
 
 
 	if (!check_link) {
 	if (!check_link) {
@@ -2929,7 +2928,7 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir,
 		err = 0;
 		err = 0;
 		goto out;
 		goto out;
 	}
 	}
-	btrfs_release_path(root, path);
+	btrfs_release_path(path);
 
 
 	ref = btrfs_lookup_inode_ref(trans, root, path,
 	ref = btrfs_lookup_inode_ref(trans, root, path,
 				dentry->d_name.name, dentry->d_name.len,
 				dentry->d_name.name, dentry->d_name.len,
@@ -2942,7 +2941,7 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir,
 	if (check_path_shared(root, path))
 	if (check_path_shared(root, path))
 		goto out;
 		goto out;
 	index = btrfs_inode_ref_index(path->nodes[0], ref);
 	index = btrfs_inode_ref_index(path->nodes[0], ref);
-	btrfs_release_path(root, path);
+	btrfs_release_path(path);
 
 
 	/*
 	/*
 	 * This is a commit root search, if we can lookup inode item and other
 	 * This is a commit root search, if we can lookup inode item and other
@@ -3035,14 +3034,14 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
 
 
 	di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
 	di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
 				   name, name_len, -1);
 				   name, name_len, -1);
-	BUG_ON(!di || IS_ERR(di));
+	BUG_ON(IS_ERR_OR_NULL(di));
 
 
 	leaf = path->nodes[0];
 	leaf = path->nodes[0];
 	btrfs_dir_item_key_to_cpu(leaf, di, &key);
 	btrfs_dir_item_key_to_cpu(leaf, di, &key);
 	WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
 	WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
 	ret = btrfs_delete_one_dir_name(trans, root, path, di);
 	ret = btrfs_delete_one_dir_name(trans, root, path, di);
 	BUG_ON(ret);
 	BUG_ON(ret);
-	btrfs_release_path(root, path);
+	btrfs_release_path(path);
 
 
 	ret = btrfs_del_root_ref(trans, root->fs_info->tree_root,
 	ret = btrfs_del_root_ref(trans, root->fs_info->tree_root,
 				 objectid, root->root_key.objectid,
 				 objectid, root->root_key.objectid,
@@ -3051,14 +3050,14 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
 		BUG_ON(ret != -ENOENT);
 		BUG_ON(ret != -ENOENT);
 		di = btrfs_search_dir_index_item(root, path, dir_ino,
 		di = btrfs_search_dir_index_item(root, path, dir_ino,
 						 name, name_len);
 						 name, name_len);
-		BUG_ON(!di || IS_ERR(di));
+		BUG_ON(IS_ERR_OR_NULL(di));
 
 
 		leaf = path->nodes[0];
 		leaf = path->nodes[0];
 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
-		btrfs_release_path(root, path);
+		btrfs_release_path(path);
 		index = key.offset;
 		index = key.offset;
 	}
 	}
-	btrfs_release_path(root, path);
+	btrfs_release_path(path);
 
 
 	ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
 	ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
 	BUG_ON(ret);
 	BUG_ON(ret);
@@ -3114,178 +3113,6 @@ out:
 	return err;
 	return err;
 }
 }
 
 
-#if 0
-/*
- * when truncating bytes in a file, it is possible to avoid reading
- * the leaves that contain only checksum items.  This can be the
- * majority of the IO required to delete a large file, but it must
- * be done carefully.
- *
- * The keys in the level just above the leaves are checked to make sure
- * the lowest key in a given leaf is a csum key, and starts at an offset
- * after the new  size.
- *
- * Then the key for the next leaf is checked to make sure it also has
- * a checksum item for the same file.  If it does, we know our target leaf
- * contains only checksum items, and it can be safely freed without reading
- * it.
- *
- * This is just an optimization targeted at large files.  It may do
- * nothing.  It will return 0 unless things went badly.
- */
-static noinline int drop_csum_leaves(struct btrfs_trans_handle *trans,
-				     struct btrfs_root *root,
-				     struct btrfs_path *path,
-				     struct inode *inode, u64 new_size)
-{
-	struct btrfs_key key;
-	int ret;
-	int nritems;
-	struct btrfs_key found_key;
-	struct btrfs_key other_key;
-	struct btrfs_leaf_ref *ref;
-	u64 leaf_gen;
-	u64 leaf_start;
-
-	path->lowest_level = 1;
-	key.objectid = inode->i_ino;
-	key.type = BTRFS_CSUM_ITEM_KEY;
-	key.offset = new_size;
-again:
-	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
-	if (ret < 0)
-		goto out;
-
-	if (path->nodes[1] == NULL) {
-		ret = 0;
-		goto out;
-	}
-	ret = 0;
-	btrfs_node_key_to_cpu(path->nodes[1], &found_key, path->slots[1]);
-	nritems = btrfs_header_nritems(path->nodes[1]);
-
-	if (!nritems)
-		goto out;
-
-	if (path->slots[1] >= nritems)
-		goto next_node;
-
-	/* did we find a key greater than anything we want to delete? */
-	if (found_key.objectid > inode->i_ino ||
-	   (found_key.objectid == inode->i_ino && found_key.type > key.type))
-		goto out;
-
-	/* we check the next key in the node to make sure the leave contains
-	 * only checksum items.  This comparison doesn't work if our
-	 * leaf is the last one in the node
-	 */
-	if (path->slots[1] + 1 >= nritems) {
-next_node:
-		/* search forward from the last key in the node, this
-		 * will bring us into the next node in the tree
-		 */
-		btrfs_node_key_to_cpu(path->nodes[1], &found_key, nritems - 1);
-
-		/* unlikely, but we inc below, so check to be safe */
-		if (found_key.offset == (u64)-1)
-			goto out;
-
-		/* search_forward needs a path with locks held, do the
-		 * search again for the original key.  It is possible
-		 * this will race with a balance and return a path that
-		 * we could modify, but this drop is just an optimization
-		 * and is allowed to miss some leaves.
-		 */
-		btrfs_release_path(root, path);
-		found_key.offset++;
-
-		/* setup a max key for search_forward */
-		other_key.offset = (u64)-1;
-		other_key.type = key.type;
-		other_key.objectid = key.objectid;
-
-		path->keep_locks = 1;
-		ret = btrfs_search_forward(root, &found_key, &other_key,
-					   path, 0, 0);
-		path->keep_locks = 0;
-		if (ret || found_key.objectid != key.objectid ||
-		    found_key.type != key.type) {
-			ret = 0;
-			goto out;
-		}
-
-		key.offset = found_key.offset;
-		btrfs_release_path(root, path);
-		cond_resched();
-		goto again;
-	}
-
-	/* we know there's one more slot after us in the tree,
-	 * read that key so we can verify it is also a checksum item
-	 */
-	btrfs_node_key_to_cpu(path->nodes[1], &other_key, path->slots[1] + 1);
-
-	if (found_key.objectid < inode->i_ino)
-		goto next_key;
-
-	if (found_key.type != key.type || found_key.offset < new_size)
-		goto next_key;
-
-	/*
-	 * if the key for the next leaf isn't a csum key from this objectid,
-	 * we can't be sure there aren't good items inside this leaf.
-	 * Bail out
-	 */
-	if (other_key.objectid != inode->i_ino || other_key.type != key.type)
-		goto out;
-
-	leaf_start = btrfs_node_blockptr(path->nodes[1], path->slots[1]);
-	leaf_gen = btrfs_node_ptr_generation(path->nodes[1], path->slots[1]);
-	/*
-	 * it is safe to delete this leaf, it contains only
-	 * csum items from this inode at an offset >= new_size
-	 */
-	ret = btrfs_del_leaf(trans, root, path, leaf_start);
-	BUG_ON(ret);
-
-	if (root->ref_cows && leaf_gen < trans->transid) {
-		ref = btrfs_alloc_leaf_ref(root, 0);
-		if (ref) {
-			ref->root_gen = root->root_key.offset;
-			ref->bytenr = leaf_start;
-			ref->owner = 0;
-			ref->generation = leaf_gen;
-			ref->nritems = 0;
-
-			btrfs_sort_leaf_ref(ref);
-
-			ret = btrfs_add_leaf_ref(root, ref, 0);
-			WARN_ON(ret);
-			btrfs_free_leaf_ref(root, ref);
-		} else {
-			WARN_ON(1);
-		}
-	}
-next_key:
-	btrfs_release_path(root, path);
-
-	if (other_key.objectid == inode->i_ino &&
-	    other_key.type == key.type && other_key.offset > key.offset) {
-		key.offset = other_key.offset;
-		cond_resched();
-		goto again;
-	}
-	ret = 0;
-out:
-	/* fixup any changes we've made to the path */
-	path->lowest_level = 0;
-	path->keep_locks = 0;
-	btrfs_release_path(root, path);
-	return ret;
-}
-
-#endif
-
 /*
 /*
  * this can truncate away extent items, csum items and directory items.
  * this can truncate away extent items, csum items and directory items.
  * It starts at a high offset and removes keys until it can't find
  * It starts at a high offset and removes keys until it can't find
@@ -3510,7 +3337,7 @@ delete:
 				BUG_ON(ret);
 				BUG_ON(ret);
 				pending_del_nr = 0;
 				pending_del_nr = 0;
 			}
 			}
-			btrfs_release_path(root, path);
+			btrfs_release_path(path);
 			goto search_again;
 			goto search_again;
 		} else {
 		} else {
 			path->slots[0]--;
 			path->slots[0]--;
@@ -3668,7 +3495,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
 	while (1) {
 	while (1) {
 		em = btrfs_get_extent(inode, NULL, 0, cur_offset,
 		em = btrfs_get_extent(inode, NULL, 0, cur_offset,
 				block_end - cur_offset, 0);
 				block_end - cur_offset, 0);
-		BUG_ON(IS_ERR(em) || !em);
+		BUG_ON(IS_ERR_OR_NULL(em));
 		last_byte = min(extent_map_end(em), block_end);
 		last_byte = min(extent_map_end(em), block_end);
 		last_byte = (last_byte + mask) & ~mask;
 		last_byte = (last_byte + mask) & ~mask;
 		if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
 		if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
@@ -3878,7 +3705,7 @@ static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
 	if (IS_ERR(di))
 	if (IS_ERR(di))
 		ret = PTR_ERR(di);
 		ret = PTR_ERR(di);
 
 
-	if (!di || IS_ERR(di))
+	if (IS_ERR_OR_NULL(di))
 		goto out_err;
 		goto out_err;
 
 
 	btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
 	btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
@@ -3936,7 +3763,7 @@ static int fixup_tree_root_location(struct btrfs_root *root,
 	if (ret)
 	if (ret)
 		goto out;
 		goto out;
 
 
-	btrfs_release_path(root->fs_info->tree_root, path);
+	btrfs_release_path(path);
 
 
 	new_root = btrfs_read_fs_root_no_name(root->fs_info, location);
 	new_root = btrfs_read_fs_root_no_name(root->fs_info, location);
 	if (IS_ERR(new_root)) {
 	if (IS_ERR(new_root)) {
@@ -4479,24 +4306,20 @@ void btrfs_dirty_inode(struct inode *inode)
 		btrfs_end_transaction(trans, root);
 		btrfs_end_transaction(trans, root);
 		trans = btrfs_start_transaction(root, 1);
 		trans = btrfs_start_transaction(root, 1);
 		if (IS_ERR(trans)) {
 		if (IS_ERR(trans)) {
-			if (printk_ratelimit()) {
-				printk(KERN_ERR "btrfs: fail to "
+			printk_ratelimited(KERN_ERR "btrfs: fail to "
 				       "dirty  inode %llu error %ld\n",
 				       "dirty  inode %llu error %ld\n",
 				       (unsigned long long)btrfs_ino(inode),
 				       (unsigned long long)btrfs_ino(inode),
 				       PTR_ERR(trans));
 				       PTR_ERR(trans));
-			}
 			return;
 			return;
 		}
 		}
 		btrfs_set_trans_block_group(trans, inode);
 		btrfs_set_trans_block_group(trans, inode);
 
 
 		ret = btrfs_update_inode(trans, root, inode);
 		ret = btrfs_update_inode(trans, root, inode);
 		if (ret) {
 		if (ret) {
-			if (printk_ratelimit()) {
-				printk(KERN_ERR "btrfs: fail to "
+			printk_ratelimited(KERN_ERR "btrfs: fail to "
 				       "dirty  inode %llu error %d\n",
 				       "dirty  inode %llu error %d\n",
 				       (unsigned long long)btrfs_ino(inode),
 				       (unsigned long long)btrfs_ino(inode),
 				       ret);
 				       ret);
-			}
 		}
 		}
 	}
 	}
 	btrfs_end_transaction(trans, root);
 	btrfs_end_transaction(trans, root);
@@ -5146,7 +4969,7 @@ again:
 		else
 		else
 			goto out;
 			goto out;
 	}
 	}
-	em = alloc_extent_map(GFP_NOFS);
+	em = alloc_extent_map();
 	if (!em) {
 	if (!em) {
 		err = -ENOMEM;
 		err = -ENOMEM;
 		goto out;
 		goto out;
@@ -5300,7 +5123,7 @@ again:
 				kunmap(page);
 				kunmap(page);
 				free_extent_map(em);
 				free_extent_map(em);
 				em = NULL;
 				em = NULL;
-				btrfs_release_path(root, path);
+				btrfs_release_path(path);
 				trans = btrfs_join_transaction(root, 1);
 				trans = btrfs_join_transaction(root, 1);
 				if (IS_ERR(trans))
 				if (IS_ERR(trans))
 					return ERR_CAST(trans);
 					return ERR_CAST(trans);
@@ -5326,7 +5149,7 @@ not_found_em:
 	em->block_start = EXTENT_MAP_HOLE;
 	em->block_start = EXTENT_MAP_HOLE;
 	set_bit(EXTENT_FLAG_VACANCY, &em->flags);
 	set_bit(EXTENT_FLAG_VACANCY, &em->flags);
 insert:
 insert:
-	btrfs_release_path(root, path);
+	btrfs_release_path(path);
 	if (em->start > start || extent_map_end(em) <= start) {
 	if (em->start > start || extent_map_end(em) <= start) {
 		printk(KERN_ERR "Btrfs: bad extent! em: [%llu %llu] passed "
 		printk(KERN_ERR "Btrfs: bad extent! em: [%llu %llu] passed "
 		       "[%llu %llu]\n", (unsigned long long)em->start,
 		       "[%llu %llu]\n", (unsigned long long)em->start,
@@ -5459,7 +5282,7 @@ struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *pag
 		u64 hole_start = start;
 		u64 hole_start = start;
 		u64 hole_len = len;
 		u64 hole_len = len;
 
 
-		em = alloc_extent_map(GFP_NOFS);
+		em = alloc_extent_map();
 		if (!em) {
 		if (!em) {
 			err = -ENOMEM;
 			err = -ENOMEM;
 			goto out;
 			goto out;
@@ -5560,7 +5383,7 @@ static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
 	}
 	}
 
 
 	if (!em) {
 	if (!em) {
-		em = alloc_extent_map(GFP_NOFS);
+		em = alloc_extent_map();
 		if (!em) {
 		if (!em) {
 			em = ERR_PTR(-ENOMEM);
 			em = ERR_PTR(-ENOMEM);
 			goto out;
 			goto out;
@@ -6865,9 +6688,9 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
 	ei->delayed_node = NULL;
 	ei->delayed_node = NULL;
 
 
 	inode = &ei->vfs_inode;
 	inode = &ei->vfs_inode;
-	extent_map_tree_init(&ei->extent_tree, GFP_NOFS);
-	extent_io_tree_init(&ei->io_tree, &inode->i_data, GFP_NOFS);
-	extent_io_tree_init(&ei->io_failure_tree, &inode->i_data, GFP_NOFS);
+	extent_map_tree_init(&ei->extent_tree);
+	extent_io_tree_init(&ei->io_tree, &inode->i_data);
+	extent_io_tree_init(&ei->io_failure_tree, &inode->i_data);
 	mutex_init(&ei->log_mutex);
 	mutex_init(&ei->log_mutex);
 	btrfs_ordered_inode_tree_init(&ei->ordered_tree);
 	btrfs_ordered_inode_tree_init(&ei->ordered_tree);
 	INIT_LIST_HEAD(&ei->i_orphan);
 	INIT_LIST_HEAD(&ei->i_orphan);
@@ -7265,58 +7088,6 @@ int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
 	return 0;
 	return 0;
 }
 }
 
 
-int btrfs_start_one_delalloc_inode(struct btrfs_root *root, int delay_iput,
-				   int sync)
-{
-	struct btrfs_inode *binode;
-	struct inode *inode = NULL;
-
-	spin_lock(&root->fs_info->delalloc_lock);
-	while (!list_empty(&root->fs_info->delalloc_inodes)) {
-		binode = list_entry(root->fs_info->delalloc_inodes.next,
-				    struct btrfs_inode, delalloc_inodes);
-		inode = igrab(&binode->vfs_inode);
-		if (inode) {
-			list_move_tail(&binode->delalloc_inodes,
-				       &root->fs_info->delalloc_inodes);
-			break;
-		}
-
-		list_del_init(&binode->delalloc_inodes);
-		cond_resched_lock(&root->fs_info->delalloc_lock);
-	}
-	spin_unlock(&root->fs_info->delalloc_lock);
-
-	if (inode) {
-		if (sync) {
-			filemap_write_and_wait(inode->i_mapping);
-			/*
-			 * We have to do this because compression doesn't
-			 * actually set PG_writeback until it submits the pages
-			 * for IO, which happens in an async thread, so we could
-			 * race and not actually wait for any writeback pages
-			 * because they've not been submitted yet.  Technically
-			 * this could still be the case for the ordered stuff
-			 * since the async thread may not have started to do its
-			 * work yet.  If this becomes the case then we need to
-			 * figure out a way to make sure that in writepage we
-			 * wait for any async pages to be submitted before
-			 * returning so that fdatawait does what its supposed to
-			 * do.
-			 */
-			btrfs_wait_ordered_range(inode, 0, (u64)-1);
-		} else {
-			filemap_flush(inode->i_mapping);
-		}
-		if (delay_iput)
-			btrfs_add_delayed_iput(inode);
-		else
-			iput(inode);
-		return 1;
-	}
-	return 0;
-}
-
 static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
 static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
 			 const char *symname)
 			 const char *symname)
 {
 {

+ 6 - 6
fs/btrfs/ioctl.c

@@ -1402,7 +1402,7 @@ static noinline int search_ioctl(struct inode *inode,
 		}
 		}
 		ret = copy_to_sk(root, path, &key, sk, args->buf,
 		ret = copy_to_sk(root, path, &key, sk, args->buf,
 				 &sk_offset, &num_found);
 				 &sk_offset, &num_found);
-		btrfs_release_path(root, path);
+		btrfs_release_path(path);
 		if (ret || num_found >= sk->nr_items)
 		if (ret || num_found >= sk->nr_items)
 			break;
 			break;
 
 
@@ -1509,7 +1509,7 @@ static noinline int btrfs_search_path_in_tree(struct btrfs_fs_info *info,
 		if (key.offset == BTRFS_FIRST_FREE_OBJECTID)
 		if (key.offset == BTRFS_FIRST_FREE_OBJECTID)
 			break;
 			break;
 
 
-		btrfs_release_path(root, path);
+		btrfs_release_path(path);
 		key.objectid = key.offset;
 		key.objectid = key.offset;
 		key.offset = (u64)-1;
 		key.offset = (u64)-1;
 		dirid = key.objectid;
 		dirid = key.objectid;
@@ -1988,7 +1988,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
 				datal = btrfs_file_extent_ram_bytes(leaf,
 				datal = btrfs_file_extent_ram_bytes(leaf,
 								    extent);
 								    extent);
 			}
 			}
-			btrfs_release_path(root, path);
+			btrfs_release_path(path);
 
 
 			if (key.offset + datal <= off ||
 			if (key.offset + datal <= off ||
 			    key.offset >= off+len)
 			    key.offset >= off+len)
@@ -2098,7 +2098,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
 			}
 			}
 
 
 			btrfs_mark_buffer_dirty(leaf);
 			btrfs_mark_buffer_dirty(leaf);
-			btrfs_release_path(root, path);
+			btrfs_release_path(path);
 
 
 			inode->i_mtime = inode->i_ctime = CURRENT_TIME;
 			inode->i_mtime = inode->i_ctime = CURRENT_TIME;
 
 
@@ -2119,12 +2119,12 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
 			btrfs_end_transaction(trans, root);
 			btrfs_end_transaction(trans, root);
 		}
 		}
 next:
 next:
-		btrfs_release_path(root, path);
+		btrfs_release_path(path);
 		key.offset++;
 		key.offset++;
 	}
 	}
 	ret = 0;
 	ret = 0;
 out:
 out:
-	btrfs_release_path(root, path);
+	btrfs_release_path(path);
 	unlock_extent(&BTRFS_I(src)->io_tree, off, off+len, GFP_NOFS);
 	unlock_extent(&BTRFS_I(src)->io_tree, off, off+len, GFP_NOFS);
 out_unlock:
 out_unlock:
 	mutex_unlock(&src->i_mutex);
 	mutex_unlock(&src->i_mutex);

+ 0 - 25
fs/btrfs/locking.c

@@ -185,31 +185,6 @@ sleep:
 	return 0;
 	return 0;
 }
 }
 
 
-/*
- * Very quick trylock, this does not spin or schedule.  It returns
- * 1 with the spinlock held if it was able to take the lock, or it
- * returns zero if it was unable to take the lock.
- *
- * After this call, scheduling is not safe without first calling
- * btrfs_set_lock_blocking()
- */
-int btrfs_try_tree_lock(struct extent_buffer *eb)
-{
-	if (spin_trylock(&eb->lock)) {
-		if (test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags)) {
-			/*
-			 * we've got the spinlock, but the real owner is
-			 * blocking.  Drop the spinlock and return failure
-			 */
-			spin_unlock(&eb->lock);
-			return 0;
-		}
-		return 1;
-	}
-	/* someone else has the spinlock giveup */
-	return 0;
-}
-
 int btrfs_tree_unlock(struct extent_buffer *eb)
 int btrfs_tree_unlock(struct extent_buffer *eb)
 {
 {
 	/*
 	/*

+ 0 - 2
fs/btrfs/locking.h

@@ -21,8 +21,6 @@
 
 
 int btrfs_tree_lock(struct extent_buffer *eb);
 int btrfs_tree_lock(struct extent_buffer *eb);
 int btrfs_tree_unlock(struct extent_buffer *eb);
 int btrfs_tree_unlock(struct extent_buffer *eb);
-
-int btrfs_try_tree_lock(struct extent_buffer *eb);
 int btrfs_try_spin_lock(struct extent_buffer *eb);
 int btrfs_try_spin_lock(struct extent_buffer *eb);
 
 
 void btrfs_set_lock_blocking(struct extent_buffer *eb);
 void btrfs_set_lock_blocking(struct extent_buffer *eb);

+ 0 - 164
fs/btrfs/ref-cache.c

@@ -23,56 +23,6 @@
 #include "ref-cache.h"
 #include "ref-cache.h"
 #include "transaction.h"
 #include "transaction.h"
 
 
-/*
- * leaf refs are used to cache the information about which extents
- * a given leaf has references on.  This allows us to process that leaf
- * in btrfs_drop_snapshot without needing to read it back from disk.
- */
-
-/*
- * kmalloc a leaf reference struct and update the counters for the
- * total ref cache size
- */
-struct btrfs_leaf_ref *btrfs_alloc_leaf_ref(struct btrfs_root *root,
-					    int nr_extents)
-{
-	struct btrfs_leaf_ref *ref;
-	size_t size = btrfs_leaf_ref_size(nr_extents);
-
-	ref = kmalloc(size, GFP_NOFS);
-	if (ref) {
-		spin_lock(&root->fs_info->ref_cache_lock);
-		root->fs_info->total_ref_cache_size += size;
-		spin_unlock(&root->fs_info->ref_cache_lock);
-
-		memset(ref, 0, sizeof(*ref));
-		atomic_set(&ref->usage, 1);
-		INIT_LIST_HEAD(&ref->list);
-	}
-	return ref;
-}
-
-/*
- * free a leaf reference struct and update the counters for the
- * total ref cache size
- */
-void btrfs_free_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref)
-{
-	if (!ref)
-		return;
-	WARN_ON(atomic_read(&ref->usage) == 0);
-	if (atomic_dec_and_test(&ref->usage)) {
-		size_t size = btrfs_leaf_ref_size(ref->nritems);
-
-		BUG_ON(ref->in_tree);
-		kfree(ref);
-
-		spin_lock(&root->fs_info->ref_cache_lock);
-		root->fs_info->total_ref_cache_size -= size;
-		spin_unlock(&root->fs_info->ref_cache_lock);
-	}
-}
-
 static struct rb_node *tree_insert(struct rb_root *root, u64 bytenr,
 static struct rb_node *tree_insert(struct rb_root *root, u64 bytenr,
 				   struct rb_node *node)
 				   struct rb_node *node)
 {
 {
@@ -116,117 +66,3 @@ static struct rb_node *tree_search(struct rb_root *root, u64 bytenr)
 	}
 	}
 	return NULL;
 	return NULL;
 }
 }
-
-int btrfs_remove_leaf_refs(struct btrfs_root *root, u64 max_root_gen,
-			   int shared)
-{
-	struct btrfs_leaf_ref *ref = NULL;
-	struct btrfs_leaf_ref_tree *tree = root->ref_tree;
-
-	if (shared)
-		tree = &root->fs_info->shared_ref_tree;
-	if (!tree)
-		return 0;
-
-	spin_lock(&tree->lock);
-	while (!list_empty(&tree->list)) {
-		ref = list_entry(tree->list.next, struct btrfs_leaf_ref, list);
-		BUG_ON(ref->tree != tree);
-		if (ref->root_gen > max_root_gen)
-			break;
-		if (!xchg(&ref->in_tree, 0)) {
-			cond_resched_lock(&tree->lock);
-			continue;
-		}
-
-		rb_erase(&ref->rb_node, &tree->root);
-		list_del_init(&ref->list);
-
-		spin_unlock(&tree->lock);
-		btrfs_free_leaf_ref(root, ref);
-		cond_resched();
-		spin_lock(&tree->lock);
-	}
-	spin_unlock(&tree->lock);
-	return 0;
-}
-
-/*
- * find the leaf ref for a given extent.  This returns the ref struct with
- * a usage reference incremented
- */
-struct btrfs_leaf_ref *btrfs_lookup_leaf_ref(struct btrfs_root *root,
-					     u64 bytenr)
-{
-	struct rb_node *rb;
-	struct btrfs_leaf_ref *ref = NULL;
-	struct btrfs_leaf_ref_tree *tree = root->ref_tree;
-again:
-	if (tree) {
-		spin_lock(&tree->lock);
-		rb = tree_search(&tree->root, bytenr);
-		if (rb)
-			ref = rb_entry(rb, struct btrfs_leaf_ref, rb_node);
-		if (ref)
-			atomic_inc(&ref->usage);
-		spin_unlock(&tree->lock);
-		if (ref)
-			return ref;
-	}
-	if (tree != &root->fs_info->shared_ref_tree) {
-		tree = &root->fs_info->shared_ref_tree;
-		goto again;
-	}
-	return NULL;
-}
-
-/*
- * add a fully filled in leaf ref struct
- * remove all the refs older than a given root generation
- */
-int btrfs_add_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref,
-		       int shared)
-{
-	int ret = 0;
-	struct rb_node *rb;
-	struct btrfs_leaf_ref_tree *tree = root->ref_tree;
-
-	if (shared)
-		tree = &root->fs_info->shared_ref_tree;
-
-	spin_lock(&tree->lock);
-	rb = tree_insert(&tree->root, ref->bytenr, &ref->rb_node);
-	if (rb) {
-		ret = -EEXIST;
-	} else {
-		atomic_inc(&ref->usage);
-		ref->tree = tree;
-		ref->in_tree = 1;
-		list_add_tail(&ref->list, &tree->list);
-	}
-	spin_unlock(&tree->lock);
-	return ret;
-}
-
-/*
- * remove a single leaf ref from the tree.  This drops the ref held by the tree
- * only
- */
-int btrfs_remove_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref)
-{
-	struct btrfs_leaf_ref_tree *tree;
-
-	if (!xchg(&ref->in_tree, 0))
-		return 0;
-
-	tree = ref->tree;
-	spin_lock(&tree->lock);
-
-	rb_erase(&ref->rb_node, &tree->root);
-	list_del_init(&ref->list);
-
-	spin_unlock(&tree->lock);
-
-	btrfs_free_leaf_ref(root, ref);
-	return 0;
-}

+ 0 - 24
fs/btrfs/ref-cache.h

@@ -49,28 +49,4 @@ static inline size_t btrfs_leaf_ref_size(int nr_extents)
 	return sizeof(struct btrfs_leaf_ref) +
 	return sizeof(struct btrfs_leaf_ref) +
 	       sizeof(struct btrfs_extent_info) * nr_extents;
 	       sizeof(struct btrfs_extent_info) * nr_extents;
 }
 }
-
-static inline void btrfs_leaf_ref_tree_init(struct btrfs_leaf_ref_tree *tree)
-{
-	tree->root = RB_ROOT;
-	INIT_LIST_HEAD(&tree->list);
-	spin_lock_init(&tree->lock);
-}
-
-static inline int btrfs_leaf_ref_tree_empty(struct btrfs_leaf_ref_tree *tree)
-{
-	return RB_EMPTY_ROOT(&tree->root);
-}
-
-void btrfs_leaf_ref_tree_init(struct btrfs_leaf_ref_tree *tree);
-struct btrfs_leaf_ref *btrfs_alloc_leaf_ref(struct btrfs_root *root,
-					    int nr_extents);
-void btrfs_free_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref);
-struct btrfs_leaf_ref *btrfs_lookup_leaf_ref(struct btrfs_root *root,
-					     u64 bytenr);
-int btrfs_add_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref,
-		       int shared);
-int btrfs_remove_leaf_refs(struct btrfs_root *root, u64 max_root_gen,
-			   int shared);
-int btrfs_remove_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref);
 #endif
 #endif

+ 19 - 19
fs/btrfs/relocation.c

@@ -508,6 +508,7 @@ static int update_backref_cache(struct btrfs_trans_handle *trans,
 	return 1;
 	return 1;
 }
 }
 
 
+
 static int should_ignore_root(struct btrfs_root *root)
 static int should_ignore_root(struct btrfs_root *root)
 {
 {
 	struct btrfs_root *reloc_root;
 	struct btrfs_root *reloc_root;
@@ -530,7 +531,6 @@ static int should_ignore_root(struct btrfs_root *root)
 	 */
 	 */
 	return 1;
 	return 1;
 }
 }
-
 /*
 /*
  * find reloc tree by address of tree root
  * find reloc tree by address of tree root
  */
  */
@@ -962,7 +962,7 @@ again:
 			lower = upper;
 			lower = upper;
 			upper = NULL;
 			upper = NULL;
 		}
 		}
-		btrfs_release_path(root, path2);
+		btrfs_release_path(path2);
 next:
 next:
 		if (ptr < end) {
 		if (ptr < end) {
 			ptr += btrfs_extent_inline_ref_size(key.type);
 			ptr += btrfs_extent_inline_ref_size(key.type);
@@ -975,7 +975,7 @@ next:
 		if (ptr >= end)
 		if (ptr >= end)
 			path1->slots[0]++;
 			path1->slots[0]++;
 	}
 	}
-	btrfs_release_path(rc->extent_root, path1);
+	btrfs_release_path(path1);
 
 
 	cur->checked = 1;
 	cur->checked = 1;
 	WARN_ON(exist);
 	WARN_ON(exist);
@@ -1750,7 +1750,7 @@ again:
 
 
 		btrfs_node_key_to_cpu(path->nodes[level], &key,
 		btrfs_node_key_to_cpu(path->nodes[level], &key,
 				      path->slots[level]);
 				      path->slots[level]);
-		btrfs_release_path(src, path);
+		btrfs_release_path(path);
 
 
 		path->lowest_level = level;
 		path->lowest_level = level;
 		ret = btrfs_search_slot(trans, src, &key, path, 0, 1);
 		ret = btrfs_search_slot(trans, src, &key, path, 0, 1);
@@ -2499,7 +2499,7 @@ static int do_relocation(struct btrfs_trans_handle *trans,
 			path->locks[upper->level] = 0;
 			path->locks[upper->level] = 0;
 
 
 			slot = path->slots[upper->level];
 			slot = path->slots[upper->level];
-			btrfs_release_path(NULL, path);
+			btrfs_release_path(path);
 		} else {
 		} else {
 			ret = btrfs_bin_search(upper->eb, key, upper->level,
 			ret = btrfs_bin_search(upper->eb, key, upper->level,
 					       &slot);
 					       &slot);
@@ -2740,7 +2740,7 @@ static int relocate_tree_block(struct btrfs_trans_handle *trans,
 		} else {
 		} else {
 			path->lowest_level = node->level;
 			path->lowest_level = node->level;
 			ret = btrfs_search_slot(trans, root, key, path, 0, 1);
 			ret = btrfs_search_slot(trans, root, key, path, 0, 1);
-			btrfs_release_path(root, path);
+			btrfs_release_path(path);
 			if (ret > 0)
 			if (ret > 0)
 				ret = 0;
 				ret = 0;
 		}
 		}
@@ -2873,7 +2873,7 @@ int setup_extent_mapping(struct inode *inode, u64 start, u64 end,
 	struct extent_map *em;
 	struct extent_map *em;
 	int ret = 0;
 	int ret = 0;
 
 
-	em = alloc_extent_map(GFP_NOFS);
+	em = alloc_extent_map();
 	if (!em)
 	if (!em)
 		return -ENOMEM;
 		return -ENOMEM;
 
 
@@ -3122,7 +3122,7 @@ static int add_tree_block(struct reloc_control *rc,
 #endif
 #endif
 	}
 	}
 
 
-	btrfs_release_path(rc->extent_root, path);
+	btrfs_release_path(path);
 
 
 	BUG_ON(level == -1);
 	BUG_ON(level == -1);
 
 
@@ -3223,7 +3223,7 @@ static int delete_block_group_cache(struct btrfs_fs_info *fs_info,
 	key.offset = 0;
 	key.offset = 0;
 
 
 	inode = btrfs_iget(fs_info->sb, &key, root, NULL);
 	inode = btrfs_iget(fs_info->sb, &key, root, NULL);
-	if (!inode || IS_ERR(inode) || is_bad_inode(inode)) {
+	if (IS_ERR_OR_NULL(inode) || is_bad_inode(inode)) {
 		if (inode && !IS_ERR(inode))
 		if (inode && !IS_ERR(inode))
 			iput(inode);
 			iput(inode);
 		return -ENOENT;
 		return -ENOENT;
@@ -3508,7 +3508,7 @@ int add_data_references(struct reloc_control *rc,
 		}
 		}
 		path->slots[0]++;
 		path->slots[0]++;
 	}
 	}
-	btrfs_release_path(rc->extent_root, path);
+	btrfs_release_path(path);
 	if (err)
 	if (err)
 		free_block_list(blocks);
 		free_block_list(blocks);
 	return err;
 	return err;
@@ -3571,7 +3571,7 @@ next:
 					    EXTENT_DIRTY);
 					    EXTENT_DIRTY);
 
 
 		if (ret == 0 && start <= key.objectid) {
 		if (ret == 0 && start <= key.objectid) {
-			btrfs_release_path(rc->extent_root, path);
+			btrfs_release_path(path);
 			rc->search_start = end + 1;
 			rc->search_start = end + 1;
 		} else {
 		} else {
 			rc->search_start = key.objectid + key.offset;
 			rc->search_start = key.objectid + key.offset;
@@ -3579,7 +3579,7 @@ next:
 			return 0;
 			return 0;
 		}
 		}
 	}
 	}
-	btrfs_release_path(rc->extent_root, path);
+	btrfs_release_path(path);
 	return ret;
 	return ret;
 }
 }
 
 
@@ -3716,7 +3716,7 @@ restart:
 				flags = BTRFS_EXTENT_FLAG_DATA;
 				flags = BTRFS_EXTENT_FLAG_DATA;
 
 
 			if (path_change) {
 			if (path_change) {
-				btrfs_release_path(rc->extent_root, path);
+				btrfs_release_path(path);
 
 
 				path->search_commit_root = 1;
 				path->search_commit_root = 1;
 				path->skip_locking = 1;
 				path->skip_locking = 1;
@@ -3739,7 +3739,7 @@ restart:
 			   (flags & BTRFS_EXTENT_FLAG_DATA)) {
 			   (flags & BTRFS_EXTENT_FLAG_DATA)) {
 			ret = add_data_references(rc, &key, path, &blocks);
 			ret = add_data_references(rc, &key, path, &blocks);
 		} else {
 		} else {
-			btrfs_release_path(rc->extent_root, path);
+			btrfs_release_path(path);
 			ret = 0;
 			ret = 0;
 		}
 		}
 		if (ret < 0) {
 		if (ret < 0) {
@@ -3802,7 +3802,7 @@ restart:
 		}
 		}
 	}
 	}
 
 
-	btrfs_release_path(rc->extent_root, path);
+	btrfs_release_path(path);
 	clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY,
 	clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY,
 			  GFP_NOFS);
 			  GFP_NOFS);
 
 
@@ -3870,7 +3870,7 @@ static int __insert_orphan_inode(struct btrfs_trans_handle *trans,
 	btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NOCOMPRESS |
 	btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NOCOMPRESS |
 					  BTRFS_INODE_PREALLOC);
 					  BTRFS_INODE_PREALLOC);
 	btrfs_mark_buffer_dirty(leaf);
 	btrfs_mark_buffer_dirty(leaf);
-	btrfs_release_path(root, path);
+	btrfs_release_path(path);
 out:
 out:
 	btrfs_free_path(path);
 	btrfs_free_path(path);
 	return ret;
 	return ret;
@@ -3938,7 +3938,7 @@ static struct reloc_control *alloc_reloc_control(void)
 	INIT_LIST_HEAD(&rc->reloc_roots);
 	INIT_LIST_HEAD(&rc->reloc_roots);
 	backref_cache_init(&rc->backref_cache);
 	backref_cache_init(&rc->backref_cache);
 	mapping_tree_init(&rc->reloc_root_tree);
 	mapping_tree_init(&rc->reloc_root_tree);
-	extent_io_tree_init(&rc->processed_blocks, NULL, GFP_NOFS);
+	extent_io_tree_init(&rc->processed_blocks, NULL);
 	return rc;
 	return rc;
 }
 }
 
 
@@ -4112,7 +4112,7 @@ int btrfs_recover_relocation(struct btrfs_root *root)
 		}
 		}
 		leaf = path->nodes[0];
 		leaf = path->nodes[0];
 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
-		btrfs_release_path(root->fs_info->tree_root, path);
+		btrfs_release_path(path);
 
 
 		if (key.objectid != BTRFS_TREE_RELOC_OBJECTID ||
 		if (key.objectid != BTRFS_TREE_RELOC_OBJECTID ||
 		    key.type != BTRFS_ROOT_ITEM_KEY)
 		    key.type != BTRFS_ROOT_ITEM_KEY)
@@ -4144,7 +4144,7 @@ int btrfs_recover_relocation(struct btrfs_root *root)
 
 
 		key.offset--;
 		key.offset--;
 	}
 	}
-	btrfs_release_path(root->fs_info->tree_root, path);
+	btrfs_release_path(path);
 
 
 	if (list_empty(&reloc_roots))
 	if (list_empty(&reloc_roots))
 		goto out;
 		goto out;

+ 4 - 51
fs/btrfs/root-tree.c

@@ -21,53 +21,6 @@
 #include "disk-io.h"
 #include "disk-io.h"
 #include "print-tree.h"
 #include "print-tree.h"
 
 
-/*
- *  search forward for a root, starting with objectid 'search_start'
- *  if a root key is found, the objectid we find is filled into 'found_objectid'
- *  and 0 is returned.  < 0 is returned on error, 1 if there is nothing
- *  left in the tree.
- */
-int btrfs_search_root(struct btrfs_root *root, u64 search_start,
-		      u64 *found_objectid)
-{
-	struct btrfs_path *path;
-	struct btrfs_key search_key;
-	int ret;
-
-	root = root->fs_info->tree_root;
-	search_key.objectid = search_start;
-	search_key.type = (u8)-1;
-	search_key.offset = (u64)-1;
-
-	path = btrfs_alloc_path();
-	BUG_ON(!path);
-again:
-	ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
-	if (ret < 0)
-		goto out;
-	if (ret == 0) {
-		ret = 1;
-		goto out;
-	}
-	if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
-		ret = btrfs_next_leaf(root, path);
-		if (ret)
-			goto out;
-	}
-	btrfs_item_key_to_cpu(path->nodes[0], &search_key, path->slots[0]);
-	if (search_key.type != BTRFS_ROOT_ITEM_KEY) {
-		search_key.offset++;
-		btrfs_release_path(root, path);
-		goto again;
-	}
-	ret = 0;
-	*found_objectid = search_key.objectid;
-
-out:
-	btrfs_free_path(path);
-	return ret;
-}
-
 /*
 /*
  * lookup the root with the highest offset for a given objectid.  The key we do
  * lookup the root with the highest offset for a given objectid.  The key we do
  * find is copied into 'key'.  If we find something return 0, otherwise 1, < 0
  * find is copied into 'key'.  If we find something return 0, otherwise 1, < 0
@@ -230,7 +183,7 @@ again:
 
 
 		memcpy(&found_key, &key, sizeof(key));
 		memcpy(&found_key, &key, sizeof(key));
 		key.offset++;
 		key.offset++;
-		btrfs_release_path(root, path);
+		btrfs_release_path(path);
 		dead_root =
 		dead_root =
 			btrfs_read_fs_root_no_radix(root->fs_info->tree_root,
 			btrfs_read_fs_root_no_radix(root->fs_info->tree_root,
 						    &found_key);
 						    &found_key);
@@ -292,7 +245,7 @@ int btrfs_find_orphan_roots(struct btrfs_root *tree_root)
 		}
 		}
 
 
 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
-		btrfs_release_path(tree_root, path);
+		btrfs_release_path(path);
 
 
 		if (key.objectid != BTRFS_ORPHAN_OBJECTID ||
 		if (key.objectid != BTRFS_ORPHAN_OBJECTID ||
 		    key.type != BTRFS_ORPHAN_ITEM_KEY)
 		    key.type != BTRFS_ORPHAN_ITEM_KEY)
@@ -390,7 +343,7 @@ again:
 		err = -ENOENT;
 		err = -ENOENT;
 
 
 	if (key.type == BTRFS_ROOT_BACKREF_KEY) {
 	if (key.type == BTRFS_ROOT_BACKREF_KEY) {
-		btrfs_release_path(tree_root, path);
+		btrfs_release_path(path);
 		key.objectid = ref_id;
 		key.objectid = ref_id;
 		key.type = BTRFS_ROOT_REF_KEY;
 		key.type = BTRFS_ROOT_REF_KEY;
 		key.offset = root_id;
 		key.offset = root_id;
@@ -463,7 +416,7 @@ again:
 	btrfs_mark_buffer_dirty(leaf);
 	btrfs_mark_buffer_dirty(leaf);
 
 
 	if (key.type == BTRFS_ROOT_BACKREF_KEY) {
 	if (key.type == BTRFS_ROOT_BACKREF_KEY) {
-		btrfs_release_path(tree_root, path);
+		btrfs_release_path(path);
 		key.objectid = ref_id;
 		key.objectid = ref_id;
 		key.type = BTRFS_ROOT_REF_KEY;
 		key.type = BTRFS_ROOT_REF_KEY;
 		key.offset = root_id;
 		key.offset = root_id;

+ 2 - 2
fs/btrfs/super.c

@@ -740,7 +740,7 @@ static int btrfs_set_super(struct super_block *s, void *data)
  *	  for multiple device setup.  Make sure to keep it in sync.
  *	  for multiple device setup.  Make sure to keep it in sync.
  */
  */
 static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
 static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
-		const char *dev_name, void *data)
+		const char *device_name, void *data)
 {
 {
 	struct block_device *bdev = NULL;
 	struct block_device *bdev = NULL;
 	struct super_block *s;
 	struct super_block *s;
@@ -763,7 +763,7 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
 	if (error)
 	if (error)
 		return ERR_PTR(error);
 		return ERR_PTR(error);
 
 
-	error = btrfs_scan_one_device(dev_name, mode, fs_type, &fs_devices);
+	error = btrfs_scan_one_device(device_name, mode, fs_type, &fs_devices);
 	if (error)
 	if (error)
 		goto error_free_subvol_name;
 		goto error_free_subvol_name;
 
 

+ 0 - 77
fs/btrfs/sysfs.c

@@ -174,86 +174,9 @@ static const struct sysfs_ops btrfs_root_attr_ops = {
 	.store	= btrfs_root_attr_store,
 	.store	= btrfs_root_attr_store,
 };
 };
 
 
-static struct kobj_type btrfs_root_ktype = {
-	.default_attrs	= btrfs_root_attrs,
-	.sysfs_ops	= &btrfs_root_attr_ops,
-	.release	= btrfs_root_release,
-};
-
-static struct kobj_type btrfs_super_ktype = {
-	.default_attrs	= btrfs_super_attrs,
-	.sysfs_ops	= &btrfs_super_attr_ops,
-	.release	= btrfs_super_release,
-};
-
 /* /sys/fs/btrfs/ entry */
 /* /sys/fs/btrfs/ entry */
 static struct kset *btrfs_kset;
 static struct kset *btrfs_kset;
 
 
-int btrfs_sysfs_add_super(struct btrfs_fs_info *fs)
-{
-	int error;
-	char *name;
-	char c;
-	int len = strlen(fs->sb->s_id) + 1;
-	int i;
-
-	name = kmalloc(len, GFP_NOFS);
-	if (!name) {
-		error = -ENOMEM;
-		goto fail;
-	}
-
-	for (i = 0; i < len; i++) {
-		c = fs->sb->s_id[i];
-		if (c == '/' || c == '\\')
-			c = '!';
-		name[i] = c;
-	}
-	name[len] = '\0';
-
-	fs->super_kobj.kset = btrfs_kset;
-	error = kobject_init_and_add(&fs->super_kobj, &btrfs_super_ktype,
-				     NULL, "%s", name);
-	kfree(name);
-	if (error)
-		goto fail;
-
-	return 0;
-
-fail:
-	printk(KERN_ERR "btrfs: sysfs creation for super failed\n");
-	return error;
-}
-
-int btrfs_sysfs_add_root(struct btrfs_root *root)
-{
-	int error;
-
-	error = kobject_init_and_add(&root->root_kobj, &btrfs_root_ktype,
-				     &root->fs_info->super_kobj,
-				     "%s", root->name);
-	if (error)
-		goto fail;
-
-	return 0;
-
-fail:
-	printk(KERN_ERR "btrfs: sysfs creation for root failed\n");
-	return error;
-}
-
-void btrfs_sysfs_del_root(struct btrfs_root *root)
-{
-	kobject_put(&root->root_kobj);
-	wait_for_completion(&root->kobj_unregister);
-}
-
-void btrfs_sysfs_del_super(struct btrfs_fs_info *fs)
-{
-	kobject_put(&fs->super_kobj);
-	wait_for_completion(&fs->kobj_unregister);
-}
-
 int btrfs_init_sysfs(void)
 int btrfs_init_sysfs(void)
 {
 {
 	btrfs_kset = kset_create_and_add("btrfs", NULL, fs_kobj);
 	btrfs_kset = kset_create_and_add("btrfs", NULL, fs_kobj);

+ 1 - 136
fs/btrfs/transaction.c

@@ -81,8 +81,7 @@ static noinline int join_transaction(struct btrfs_root *root)
 		INIT_LIST_HEAD(&cur_trans->pending_snapshots);
 		INIT_LIST_HEAD(&cur_trans->pending_snapshots);
 		list_add_tail(&cur_trans->list, &root->fs_info->trans_list);
 		list_add_tail(&cur_trans->list, &root->fs_info->trans_list);
 		extent_io_tree_init(&cur_trans->dirty_pages,
 		extent_io_tree_init(&cur_trans->dirty_pages,
-				     root->fs_info->btree_inode->i_mapping,
-				     GFP_NOFS);
+				     root->fs_info->btree_inode->i_mapping);
 		spin_lock(&root->fs_info->new_trans_lock);
 		spin_lock(&root->fs_info->new_trans_lock);
 		root->fs_info->running_transaction = cur_trans;
 		root->fs_info->running_transaction = cur_trans;
 		spin_unlock(&root->fs_info->new_trans_lock);
 		spin_unlock(&root->fs_info->new_trans_lock);
@@ -348,49 +347,6 @@ out_unlock:
 	return ret;
 	return ret;
 }
 }
 
 
-#if 0
-/*
- * rate limit against the drop_snapshot code.  This helps to slow down new
- * operations if the drop_snapshot code isn't able to keep up.
- */
-static void throttle_on_drops(struct btrfs_root *root)
-{
-	struct btrfs_fs_info *info = root->fs_info;
-	int harder_count = 0;
-
-harder:
-	if (atomic_read(&info->throttles)) {
-		DEFINE_WAIT(wait);
-		int thr;
-		thr = atomic_read(&info->throttle_gen);
-
-		do {
-			prepare_to_wait(&info->transaction_throttle,
-					&wait, TASK_UNINTERRUPTIBLE);
-			if (!atomic_read(&info->throttles)) {
-				finish_wait(&info->transaction_throttle, &wait);
-				break;
-			}
-			schedule();
-			finish_wait(&info->transaction_throttle, &wait);
-		} while (thr == atomic_read(&info->throttle_gen));
-		harder_count++;
-
-		if (root->fs_info->total_ref_cache_size > 1 * 1024 * 1024 &&
-		    harder_count < 2)
-			goto harder;
-
-		if (root->fs_info->total_ref_cache_size > 5 * 1024 * 1024 &&
-		    harder_count < 10)
-			goto harder;
-
-		if (root->fs_info->total_ref_cache_size > 10 * 1024 * 1024 &&
-		    harder_count < 20)
-			goto harder;
-	}
-}
-#endif
-
 void btrfs_throttle(struct btrfs_root *root)
 void btrfs_throttle(struct btrfs_root *root)
 {
 {
 	mutex_lock(&root->fs_info->trans_mutex);
 	mutex_lock(&root->fs_info->trans_mutex);
@@ -837,97 +793,6 @@ int btrfs_defrag_root(struct btrfs_root *root, int cacheonly)
 	return ret;
 	return ret;
 }
 }
 
 
-#if 0
-/*
- * when dropping snapshots, we generate a ton of delayed refs, and it makes
- * sense not to join the transaction while it is trying to flush the current
- * queue of delayed refs out.
- *
- * This is used by the drop snapshot code only
- */
-static noinline int wait_transaction_pre_flush(struct btrfs_fs_info *info)
-{
-	DEFINE_WAIT(wait);
-
-	mutex_lock(&info->trans_mutex);
-	while (info->running_transaction &&
-	       info->running_transaction->delayed_refs.flushing) {
-		prepare_to_wait(&info->transaction_wait, &wait,
-				TASK_UNINTERRUPTIBLE);
-		mutex_unlock(&info->trans_mutex);
-
-		schedule();
-
-		mutex_lock(&info->trans_mutex);
-		finish_wait(&info->transaction_wait, &wait);
-	}
-	mutex_unlock(&info->trans_mutex);
-	return 0;
-}
-
-/*
- * Given a list of roots that need to be deleted, call btrfs_drop_snapshot on
- * all of them
- */
-int btrfs_drop_dead_root(struct btrfs_root *root)
-{
-	struct btrfs_trans_handle *trans;
-	struct btrfs_root *tree_root = root->fs_info->tree_root;
-	unsigned long nr;
-	int ret;
-
-	while (1) {
-		/*
-		 * we don't want to jump in and create a bunch of
-		 * delayed refs if the transaction is starting to close
-		 */
-		wait_transaction_pre_flush(tree_root->fs_info);
-		trans = btrfs_start_transaction(tree_root, 1);
-
-		/*
-		 * we've joined a transaction, make sure it isn't
-		 * closing right now
-		 */
-		if (trans->transaction->delayed_refs.flushing) {
-			btrfs_end_transaction(trans, tree_root);
-			continue;
-		}
-
-		ret = btrfs_drop_snapshot(trans, root);
-		if (ret != -EAGAIN)
-			break;
-
-		ret = btrfs_update_root(trans, tree_root,
-					&root->root_key,
-					&root->root_item);
-		if (ret)
-			break;
-
-		nr = trans->blocks_used;
-		ret = btrfs_end_transaction(trans, tree_root);
-		BUG_ON(ret);
-
-		btrfs_btree_balance_dirty(tree_root, nr);
-		cond_resched();
-	}
-	BUG_ON(ret);
-
-	ret = btrfs_del_root(trans, tree_root, &root->root_key);
-	BUG_ON(ret);
-
-	nr = trans->blocks_used;
-	ret = btrfs_end_transaction(trans, tree_root);
-	BUG_ON(ret);
-
-	free_extent_buffer(root->node);
-	free_extent_buffer(root->commit_root);
-	kfree(root);
-
-	btrfs_btree_balance_dirty(tree_root, nr);
-	return ret;
-}
-#endif
-
 /*
 /*
  * new snapshots need to be created at a very specific time in the
  * new snapshots need to be created at a very specific time in the
  * transaction commit.  This does the actual creation
  * transaction commit.  This does the actual creation

+ 0 - 3
fs/btrfs/transaction.h

@@ -101,11 +101,8 @@ struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *r,
 int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid);
 int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid);
 int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
 int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
 				     struct btrfs_root *root);
 				     struct btrfs_root *root);
-int btrfs_commit_tree_roots(struct btrfs_trans_handle *trans,
-			    struct btrfs_root *root);
 
 
 int btrfs_add_dead_root(struct btrfs_root *root);
 int btrfs_add_dead_root(struct btrfs_root *root);
-int btrfs_drop_dead_root(struct btrfs_root *root);
 int btrfs_defrag_root(struct btrfs_root *root, int cacheonly);
 int btrfs_defrag_root(struct btrfs_root *root, int cacheonly);
 int btrfs_clean_old_snapshots(struct btrfs_root *root);
 int btrfs_clean_old_snapshots(struct btrfs_root *root);
 int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
 int btrfs_commit_transaction(struct btrfs_trans_handle *trans,

+ 1 - 1
fs/btrfs/tree-defrag.c

@@ -97,7 +97,7 @@ int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
 		ret = 0;
 		ret = 0;
 		goto out;
 		goto out;
 	}
 	}
-	btrfs_release_path(root, path);
+	btrfs_release_path(path);
 	wret = btrfs_search_slot(trans, root, &key, path, 0, 1);
 	wret = btrfs_search_slot(trans, root, &key, path, 0, 1);
 
 
 	if (wret < 0) {
 	if (wret < 0) {

+ 51 - 51
fs/btrfs/tree-log.c

@@ -333,13 +333,13 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans,
 			goto insert;
 			goto insert;
 
 
 		if (item_size == 0) {
 		if (item_size == 0) {
-			btrfs_release_path(root, path);
+			btrfs_release_path(path);
 			return 0;
 			return 0;
 		}
 		}
 		dst_copy = kmalloc(item_size, GFP_NOFS);
 		dst_copy = kmalloc(item_size, GFP_NOFS);
 		src_copy = kmalloc(item_size, GFP_NOFS);
 		src_copy = kmalloc(item_size, GFP_NOFS);
 		if (!dst_copy || !src_copy) {
 		if (!dst_copy || !src_copy) {
-			btrfs_release_path(root, path);
+			btrfs_release_path(path);
 			kfree(dst_copy);
 			kfree(dst_copy);
 			kfree(src_copy);
 			kfree(src_copy);
 			return -ENOMEM;
 			return -ENOMEM;
@@ -361,13 +361,13 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans,
 		 * sync
 		 * sync
 		 */
 		 */
 		if (ret == 0) {
 		if (ret == 0) {
-			btrfs_release_path(root, path);
+			btrfs_release_path(path);
 			return 0;
 			return 0;
 		}
 		}
 
 
 	}
 	}
 insert:
 insert:
-	btrfs_release_path(root, path);
+	btrfs_release_path(path);
 	/* try to insert the key into the destination tree */
 	/* try to insert the key into the destination tree */
 	ret = btrfs_insert_empty_item(trans, root, path,
 	ret = btrfs_insert_empty_item(trans, root, path,
 				      key, item_size);
 				      key, item_size);
@@ -438,7 +438,7 @@ insert:
 	}
 	}
 no_copy:
 no_copy:
 	btrfs_mark_buffer_dirty(path->nodes[0]);
 	btrfs_mark_buffer_dirty(path->nodes[0]);
-	btrfs_release_path(root, path);
+	btrfs_release_path(path);
 	return 0;
 	return 0;
 }
 }
 
 
@@ -544,11 +544,11 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
 		 * we don't have to do anything
 		 * we don't have to do anything
 		 */
 		 */
 		if (memcmp(&cmp1, &cmp2, sizeof(cmp1)) == 0) {
 		if (memcmp(&cmp1, &cmp2, sizeof(cmp1)) == 0) {
-			btrfs_release_path(root, path);
+			btrfs_release_path(path);
 			goto out;
 			goto out;
 		}
 		}
 	}
 	}
-	btrfs_release_path(root, path);
+	btrfs_release_path(path);
 
 
 	saved_nbytes = inode_get_bytes(inode);
 	saved_nbytes = inode_get_bytes(inode);
 	/* drop any overlapping extents */
 	/* drop any overlapping extents */
@@ -600,7 +600,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
 						key->objectid, offset, &ins);
 						key->objectid, offset, &ins);
 				BUG_ON(ret);
 				BUG_ON(ret);
 			}
 			}
-			btrfs_release_path(root, path);
+			btrfs_release_path(path);
 
 
 			if (btrfs_file_extent_compression(eb, item)) {
 			if (btrfs_file_extent_compression(eb, item)) {
 				csum_start = ins.objectid;
 				csum_start = ins.objectid;
@@ -629,7 +629,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
 				kfree(sums);
 				kfree(sums);
 			}
 			}
 		} else {
 		} else {
-			btrfs_release_path(root, path);
+			btrfs_release_path(path);
 		}
 		}
 	} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
 	} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
 		/* inline extents are easy, we just overwrite them */
 		/* inline extents are easy, we just overwrite them */
@@ -675,7 +675,7 @@ static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans,
 		return -ENOMEM;
 		return -ENOMEM;
 
 
 	read_extent_buffer(leaf, name, (unsigned long)(di + 1), name_len);
 	read_extent_buffer(leaf, name, (unsigned long)(di + 1), name_len);
-	btrfs_release_path(root, path);
+	btrfs_release_path(path);
 
 
 	inode = read_one_inode(root, location.objectid);
 	inode = read_one_inode(root, location.objectid);
 	BUG_ON(!inode);
 	BUG_ON(!inode);
@@ -713,7 +713,7 @@ static noinline int inode_in_dir(struct btrfs_root *root,
 			goto out;
 			goto out;
 	} else
 	} else
 		goto out;
 		goto out;
-	btrfs_release_path(root, path);
+	btrfs_release_path(path);
 
 
 	di = btrfs_lookup_dir_item(NULL, root, path, dirid, name, name_len, 0);
 	di = btrfs_lookup_dir_item(NULL, root, path, dirid, name, name_len, 0);
 	if (di && !IS_ERR(di)) {
 	if (di && !IS_ERR(di)) {
@@ -724,7 +724,7 @@ static noinline int inode_in_dir(struct btrfs_root *root,
 		goto out;
 		goto out;
 	match = 1;
 	match = 1;
 out:
 out:
-	btrfs_release_path(root, path);
+	btrfs_release_path(path);
 	return match;
 	return match;
 }
 }
 
 
@@ -884,7 +884,7 @@ again:
 			if (!backref_in_log(log, key, victim_name,
 			if (!backref_in_log(log, key, victim_name,
 					    victim_name_len)) {
 					    victim_name_len)) {
 				btrfs_inc_nlink(inode);
 				btrfs_inc_nlink(inode);
-				btrfs_release_path(root, path);
+				btrfs_release_path(path);
 
 
 				ret = btrfs_unlink_inode(trans, root, dir,
 				ret = btrfs_unlink_inode(trans, root, dir,
 							 inode, victim_name,
 							 inode, victim_name,
@@ -901,7 +901,7 @@ again:
 		 */
 		 */
 		search_done = 1;
 		search_done = 1;
 	}
 	}
-	btrfs_release_path(root, path);
+	btrfs_release_path(path);
 
 
 insert:
 insert:
 	/* insert our name */
 	/* insert our name */
@@ -922,7 +922,7 @@ out:
 	BUG_ON(ret);
 	BUG_ON(ret);
 
 
 out_nowrite:
 out_nowrite:
-	btrfs_release_path(root, path);
+	btrfs_release_path(path);
 	iput(dir);
 	iput(dir);
 	iput(inode);
 	iput(inode);
 	return 0;
 	return 0;
@@ -1000,9 +1000,9 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
 		if (key.offset == 0)
 		if (key.offset == 0)
 			break;
 			break;
 		key.offset--;
 		key.offset--;
-		btrfs_release_path(root, path);
+		btrfs_release_path(path);
 	}
 	}
-	btrfs_release_path(root, path);
+	btrfs_release_path(path);
 	if (nlink != inode->i_nlink) {
 	if (nlink != inode->i_nlink) {
 		inode->i_nlink = nlink;
 		inode->i_nlink = nlink;
 		btrfs_update_inode(trans, root, inode);
 		btrfs_update_inode(trans, root, inode);
@@ -1053,7 +1053,7 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
 		ret = btrfs_del_item(trans, root, path);
 		ret = btrfs_del_item(trans, root, path);
 		BUG_ON(ret);
 		BUG_ON(ret);
 
 
-		btrfs_release_path(root, path);
+		btrfs_release_path(path);
 		inode = read_one_inode(root, key.offset);
 		inode = read_one_inode(root, key.offset);
 		BUG_ON(!inode);
 		BUG_ON(!inode);
 
 
@@ -1069,7 +1069,7 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
 		 */
 		 */
 		key.offset = (u64)-1;
 		key.offset = (u64)-1;
 	}
 	}
-	btrfs_release_path(root, path);
+	btrfs_release_path(path);
 	return 0;
 	return 0;
 }
 }
 
 
@@ -1097,7 +1097,7 @@ static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans,
 
 
 	ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
 	ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
 
 
-	btrfs_release_path(root, path);
+	btrfs_release_path(path);
 	if (ret == 0) {
 	if (ret == 0) {
 		btrfs_inc_nlink(inode);
 		btrfs_inc_nlink(inode);
 		btrfs_update_inode(trans, root, inode);
 		btrfs_update_inode(trans, root, inode);
@@ -1193,7 +1193,7 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans,
 		exists = 1;
 		exists = 1;
 	else
 	else
 		exists = 0;
 		exists = 0;
-	btrfs_release_path(root, path);
+	btrfs_release_path(path);
 
 
 	if (key->type == BTRFS_DIR_ITEM_KEY) {
 	if (key->type == BTRFS_DIR_ITEM_KEY) {
 		dst_di = btrfs_lookup_dir_item(trans, root, path, key->objectid,
 		dst_di = btrfs_lookup_dir_item(trans, root, path, key->objectid,
@@ -1206,7 +1206,7 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans,
 	} else {
 	} else {
 		BUG();
 		BUG();
 	}
 	}
-	if (!dst_di || IS_ERR(dst_di)) {
+	if (IS_ERR_OR_NULL(dst_di)) {
 		/* we need a sequence number to insert, so we only
 		/* we need a sequence number to insert, so we only
 		 * do inserts for the BTRFS_DIR_INDEX_KEY types
 		 * do inserts for the BTRFS_DIR_INDEX_KEY types
 		 */
 		 */
@@ -1237,13 +1237,13 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans,
 	if (key->type == BTRFS_DIR_INDEX_KEY)
 	if (key->type == BTRFS_DIR_INDEX_KEY)
 		goto insert;
 		goto insert;
 out:
 out:
-	btrfs_release_path(root, path);
+	btrfs_release_path(path);
 	kfree(name);
 	kfree(name);
 	iput(dir);
 	iput(dir);
 	return 0;
 	return 0;
 
 
 insert:
 insert:
-	btrfs_release_path(root, path);
+	btrfs_release_path(path);
 	ret = insert_one_name(trans, root, path, key->objectid, key->offset,
 	ret = insert_one_name(trans, root, path, key->objectid, key->offset,
 			      name, name_len, log_type, &log_key);
 			      name, name_len, log_type, &log_key);
 
 
@@ -1364,7 +1364,7 @@ next:
 	*end_ret = found_end;
 	*end_ret = found_end;
 	ret = 0;
 	ret = 0;
 out:
 out:
-	btrfs_release_path(root, path);
+	btrfs_release_path(path);
 	return ret;
 	return ret;
 }
 }
 
 
@@ -1427,10 +1427,10 @@ again:
 						     dir_key->offset,
 						     dir_key->offset,
 						     name, name_len, 0);
 						     name, name_len, 0);
 		}
 		}
-		if (!log_di || IS_ERR(log_di)) {
+		if (IS_ERR_OR_NULL(log_di)) {
 			btrfs_dir_item_key_to_cpu(eb, di, &location);
 			btrfs_dir_item_key_to_cpu(eb, di, &location);
-			btrfs_release_path(root, path);
-			btrfs_release_path(log, log_path);
+			btrfs_release_path(path);
+			btrfs_release_path(log_path);
 			inode = read_one_inode(root, location.objectid);
 			inode = read_one_inode(root, location.objectid);
 			BUG_ON(!inode);
 			BUG_ON(!inode);
 
 
@@ -1454,7 +1454,7 @@ again:
 			ret = 0;
 			ret = 0;
 			goto out;
 			goto out;
 		}
 		}
-		btrfs_release_path(log, log_path);
+		btrfs_release_path(log_path);
 		kfree(name);
 		kfree(name);
 
 
 		ptr = (unsigned long)(di + 1);
 		ptr = (unsigned long)(di + 1);
@@ -1462,8 +1462,8 @@ again:
 	}
 	}
 	ret = 0;
 	ret = 0;
 out:
 out:
-	btrfs_release_path(root, path);
-	btrfs_release_path(log, log_path);
+	btrfs_release_path(path);
+	btrfs_release_path(log_path);
 	return ret;
 	return ret;
 }
 }
 
 
@@ -1551,7 +1551,7 @@ again:
 				break;
 				break;
 			dir_key.offset = found_key.offset + 1;
 			dir_key.offset = found_key.offset + 1;
 		}
 		}
-		btrfs_release_path(root, path);
+		btrfs_release_path(path);
 		if (range_end == (u64)-1)
 		if (range_end == (u64)-1)
 			break;
 			break;
 		range_start = range_end + 1;
 		range_start = range_end + 1;
@@ -1562,11 +1562,11 @@ next_type:
 	if (key_type == BTRFS_DIR_LOG_ITEM_KEY) {
 	if (key_type == BTRFS_DIR_LOG_ITEM_KEY) {
 		key_type = BTRFS_DIR_LOG_INDEX_KEY;
 		key_type = BTRFS_DIR_LOG_INDEX_KEY;
 		dir_key.type = BTRFS_DIR_INDEX_KEY;
 		dir_key.type = BTRFS_DIR_INDEX_KEY;
-		btrfs_release_path(root, path);
+		btrfs_release_path(path);
 		goto again;
 		goto again;
 	}
 	}
 out:
 out:
-	btrfs_release_path(root, path);
+	btrfs_release_path(path);
 	btrfs_free_path(log_path);
 	btrfs_free_path(log_path);
 	iput(dir);
 	iput(dir);
 	return ret;
 	return ret;
@@ -2227,7 +2227,7 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
 		bytes_del += name_len;
 		bytes_del += name_len;
 		BUG_ON(ret);
 		BUG_ON(ret);
 	}
 	}
-	btrfs_release_path(log, path);
+	btrfs_release_path(path);
 	di = btrfs_lookup_dir_index_item(trans, log, path, dir_ino,
 	di = btrfs_lookup_dir_index_item(trans, log, path, dir_ino,
 					 index, name, name_len, -1);
 					 index, name, name_len, -1);
 	if (IS_ERR(di)) {
 	if (IS_ERR(di)) {
@@ -2249,7 +2249,7 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
 		key.objectid = dir_ino;
 		key.objectid = dir_ino;
 		key.offset = 0;
 		key.offset = 0;
 		key.type = BTRFS_INODE_ITEM_KEY;
 		key.type = BTRFS_INODE_ITEM_KEY;
-		btrfs_release_path(log, path);
+		btrfs_release_path(path);
 
 
 		ret = btrfs_search_slot(trans, log, &key, path, 0, 1);
 		ret = btrfs_search_slot(trans, log, &key, path, 0, 1);
 		if (ret < 0) {
 		if (ret < 0) {
@@ -2271,7 +2271,7 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
 			btrfs_mark_buffer_dirty(path->nodes[0]);
 			btrfs_mark_buffer_dirty(path->nodes[0]);
 		} else
 		} else
 			ret = 0;
 			ret = 0;
-		btrfs_release_path(log, path);
+		btrfs_release_path(path);
 	}
 	}
 fail:
 fail:
 	btrfs_free_path(path);
 	btrfs_free_path(path);
@@ -2346,7 +2346,7 @@ static noinline int insert_dir_log_key(struct btrfs_trans_handle *trans,
 			      struct btrfs_dir_log_item);
 			      struct btrfs_dir_log_item);
 	btrfs_set_dir_log_end(path->nodes[0], item, last_offset);
 	btrfs_set_dir_log_end(path->nodes[0], item, last_offset);
 	btrfs_mark_buffer_dirty(path->nodes[0]);
 	btrfs_mark_buffer_dirty(path->nodes[0]);
-	btrfs_release_path(log, path);
+	btrfs_release_path(path);
 	return 0;
 	return 0;
 }
 }
 
 
@@ -2395,10 +2395,10 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
 		min_key.objectid = ino;
 		min_key.objectid = ino;
 		min_key.type = key_type;
 		min_key.type = key_type;
 		min_key.offset = (u64)-1;
 		min_key.offset = (u64)-1;
-		btrfs_release_path(root, path);
+		btrfs_release_path(path);
 		ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
 		ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
 		if (ret < 0) {
 		if (ret < 0) {
-			btrfs_release_path(root, path);
+			btrfs_release_path(path);
 			return ret;
 			return ret;
 		}
 		}
 		ret = btrfs_previous_item(root, path, ino, key_type);
 		ret = btrfs_previous_item(root, path, ino, key_type);
@@ -2434,7 +2434,7 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
 			}
 			}
 		}
 		}
 	}
 	}
-	btrfs_release_path(root, path);
+	btrfs_release_path(path);
 
 
 	/* find the first key from this transaction again */
 	/* find the first key from this transaction again */
 	ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
 	ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
@@ -2491,8 +2491,8 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
 		}
 		}
 	}
 	}
 done:
 done:
-	btrfs_release_path(root, path);
-	btrfs_release_path(log, dst_path);
+	btrfs_release_path(path);
+	btrfs_release_path(dst_path);
 
 
 	if (err == 0) {
 	if (err == 0) {
 		*last_offset_ret = last_offset;
 		*last_offset_ret = last_offset;
@@ -2588,9 +2588,9 @@ static int drop_objectid_items(struct btrfs_trans_handle *trans,
 
 
 		ret = btrfs_del_item(trans, log, path);
 		ret = btrfs_del_item(trans, log, path);
 		BUG_ON(ret);
 		BUG_ON(ret);
-		btrfs_release_path(log, path);
+		btrfs_release_path(path);
 	}
 	}
-	btrfs_release_path(log, path);
+	btrfs_release_path(path);
 	return ret;
 	return ret;
 }
 }
 
 
@@ -2696,7 +2696,7 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
 	}
 	}
 
 
 	btrfs_mark_buffer_dirty(dst_path->nodes[0]);
 	btrfs_mark_buffer_dirty(dst_path->nodes[0]);
-	btrfs_release_path(log, dst_path);
+	btrfs_release_path(dst_path);
 	kfree(ins_data);
 	kfree(ins_data);
 
 
 	/*
 	/*
@@ -2852,7 +2852,7 @@ next_slot:
 			}
 			}
 			ins_nr = 0;
 			ins_nr = 0;
 		}
 		}
-		btrfs_release_path(root, path);
+		btrfs_release_path(path);
 
 
 		if (min_key.offset < (u64)-1)
 		if (min_key.offset < (u64)-1)
 			min_key.offset++;
 			min_key.offset++;
@@ -2875,8 +2875,8 @@ next_slot:
 	}
 	}
 	WARN_ON(ins_nr);
 	WARN_ON(ins_nr);
 	if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->i_mode)) {
 	if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->i_mode)) {
-		btrfs_release_path(root, path);
-		btrfs_release_path(log, dst_path);
+		btrfs_release_path(path);
+		btrfs_release_path(dst_path);
 		ret = log_directory_changes(trans, root, inode, path, dst_path);
 		ret = log_directory_changes(trans, root, inode, path, dst_path);
 		if (ret) {
 		if (ret) {
 			err = ret;
 			err = ret;
@@ -3143,7 +3143,7 @@ again:
 		}
 		}
 		btrfs_item_key_to_cpu(path->nodes[0], &found_key,
 		btrfs_item_key_to_cpu(path->nodes[0], &found_key,
 				      path->slots[0]);
 				      path->slots[0]);
-		btrfs_release_path(log_root_tree, path);
+		btrfs_release_path(path);
 		if (found_key.objectid != BTRFS_TREE_LOG_OBJECTID)
 		if (found_key.objectid != BTRFS_TREE_LOG_OBJECTID)
 			break;
 			break;
 
 
@@ -3178,7 +3178,7 @@ again:
 		if (found_key.offset == 0)
 		if (found_key.offset == 0)
 			break;
 			break;
 	}
 	}
-	btrfs_release_path(log_root_tree, path);
+	btrfs_release_path(path);
 
 
 	/* step one is to pin it all, step two is to replay just inodes */
 	/* step one is to pin it all, step two is to replay just inodes */
 	if (wc.pin) {
 	if (wc.pin) {

+ 0 - 1
fs/btrfs/tree-log.h

@@ -38,7 +38,6 @@ int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans,
 			       struct btrfs_root *root,
 			       struct btrfs_root *root,
 			       const char *name, int name_len,
 			       const char *name, int name_len,
 			       struct inode *inode, u64 dirid);
 			       struct inode *inode, u64 dirid);
-int btrfs_join_running_log_trans(struct btrfs_root *root);
 int btrfs_end_log_trans(struct btrfs_root *root);
 int btrfs_end_log_trans(struct btrfs_root *root);
 int btrfs_pin_log_trans(struct btrfs_root *root);
 int btrfs_pin_log_trans(struct btrfs_root *root);
 int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
 int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,

+ 11 - 30
fs/btrfs/volumes.c

@@ -44,16 +44,6 @@ static int btrfs_relocate_sys_chunks(struct btrfs_root *root);
 static DEFINE_MUTEX(uuid_mutex);
 static DEFINE_MUTEX(uuid_mutex);
 static LIST_HEAD(fs_uuids);
 static LIST_HEAD(fs_uuids);
 
 
-void btrfs_lock_volumes(void)
-{
-	mutex_lock(&uuid_mutex);
-}
-
-void btrfs_unlock_volumes(void)
-{
-	mutex_unlock(&uuid_mutex);
-}
-
 static void lock_chunks(struct btrfs_root *root)
 static void lock_chunks(struct btrfs_root *root)
 {
 {
 	mutex_lock(&root->fs_info->chunk_mutex);
 	mutex_lock(&root->fs_info->chunk_mutex);
@@ -1475,7 +1465,7 @@ next_slot:
 				goto error;
 				goto error;
 			leaf = path->nodes[0];
 			leaf = path->nodes[0];
 			btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
 			btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
-			btrfs_release_path(root, path);
+			btrfs_release_path(path);
 			continue;
 			continue;
 		}
 		}
 
 
@@ -1947,7 +1937,7 @@ again:
 		chunk = btrfs_item_ptr(leaf, path->slots[0],
 		chunk = btrfs_item_ptr(leaf, path->slots[0],
 				       struct btrfs_chunk);
 				       struct btrfs_chunk);
 		chunk_type = btrfs_chunk_type(leaf, chunk);
 		chunk_type = btrfs_chunk_type(leaf, chunk);
-		btrfs_release_path(chunk_root, path);
+		btrfs_release_path(path);
 
 
 		if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
 		if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
 			ret = btrfs_relocate_chunk(chunk_root, chunk_tree,
 			ret = btrfs_relocate_chunk(chunk_root, chunk_tree,
@@ -2065,7 +2055,7 @@ int btrfs_balance(struct btrfs_root *dev_root)
 		if (found_key.offset == 0)
 		if (found_key.offset == 0)
 			break;
 			break;
 
 
-		btrfs_release_path(chunk_root, path);
+		btrfs_release_path(path);
 		ret = btrfs_relocate_chunk(chunk_root,
 		ret = btrfs_relocate_chunk(chunk_root,
 					   chunk_root->root_key.objectid,
 					   chunk_root->root_key.objectid,
 					   found_key.objectid,
 					   found_key.objectid,
@@ -2137,7 +2127,7 @@ again:
 			goto done;
 			goto done;
 		if (ret) {
 		if (ret) {
 			ret = 0;
 			ret = 0;
-			btrfs_release_path(root, path);
+			btrfs_release_path(path);
 			break;
 			break;
 		}
 		}
 
 
@@ -2146,7 +2136,7 @@ again:
 		btrfs_item_key_to_cpu(l, &key, path->slots[0]);
 		btrfs_item_key_to_cpu(l, &key, path->slots[0]);
 
 
 		if (key.objectid != device->devid) {
 		if (key.objectid != device->devid) {
-			btrfs_release_path(root, path);
+			btrfs_release_path(path);
 			break;
 			break;
 		}
 		}
 
 
@@ -2154,14 +2144,14 @@ again:
 		length = btrfs_dev_extent_length(l, dev_extent);
 		length = btrfs_dev_extent_length(l, dev_extent);
 
 
 		if (key.offset + length <= new_size) {
 		if (key.offset + length <= new_size) {
-			btrfs_release_path(root, path);
+			btrfs_release_path(path);
 			break;
 			break;
 		}
 		}
 
 
 		chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
 		chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
 		chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
 		chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
 		chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
 		chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
-		btrfs_release_path(root, path);
+		btrfs_release_path(path);
 
 
 		ret = btrfs_relocate_chunk(root, chunk_tree, chunk_objectid,
 		ret = btrfs_relocate_chunk(root, chunk_tree, chunk_objectid,
 					   chunk_offset);
 					   chunk_offset);
@@ -2609,7 +2599,7 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
 
 
 	trace_btrfs_chunk_alloc(info->chunk_root, map, start, *num_bytes);
 	trace_btrfs_chunk_alloc(info->chunk_root, map, start, *num_bytes);
 
 
-	em = alloc_extent_map(GFP_NOFS);
+	em = alloc_extent_map();
 	if (!em) {
 	if (!em) {
 		ret = -ENOMEM;
 		ret = -ENOMEM;
 		goto error;
 		goto error;
@@ -2849,7 +2839,7 @@ int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
 
 
 void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
 void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
 {
 {
-	extent_map_tree_init(&tree->map_tree, GFP_NOFS);
+	extent_map_tree_init(&tree->map_tree);
 }
 }
 
 
 void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
 void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
@@ -3499,7 +3489,7 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
 		free_extent_map(em);
 		free_extent_map(em);
 	}
 	}
 
 
-	em = alloc_extent_map(GFP_NOFS);
+	em = alloc_extent_map();
 	if (!em)
 	if (!em)
 		return -ENOMEM;
 		return -ENOMEM;
 	num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
 	num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
@@ -3688,15 +3678,6 @@ static int read_one_dev(struct btrfs_root *root,
 	return ret;
 	return ret;
 }
 }
 
 
-int btrfs_read_super_device(struct btrfs_root *root, struct extent_buffer *buf)
-{
-	struct btrfs_dev_item *dev_item;
-
-	dev_item = (struct btrfs_dev_item *)offsetof(struct btrfs_super_block,
-						     dev_item);
-	return read_one_dev(root, buf, dev_item);
-}
-
 int btrfs_read_sys_array(struct btrfs_root *root)
 int btrfs_read_sys_array(struct btrfs_root *root)
 {
 {
 	struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
 	struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
@@ -3813,7 +3794,7 @@ again:
 	}
 	}
 	if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
 	if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
 		key.objectid = 0;
 		key.objectid = 0;
-		btrfs_release_path(root, path);
+		btrfs_release_path(path);
 		goto again;
 		goto again;
 	}
 	}
 	ret = 0;
 	ret = 0;

+ 0 - 5
fs/btrfs/volumes.h

@@ -196,7 +196,6 @@ void btrfs_mapping_init(struct btrfs_mapping_tree *tree);
 void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree);
 void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree);
 int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
 int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
 		  int mirror_num, int async_submit);
 		  int mirror_num, int async_submit);
-int btrfs_read_super_device(struct btrfs_root *root, struct extent_buffer *buf);
 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
 		       fmode_t flags, void *holder);
 		       fmode_t flags, void *holder);
 int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
 int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
@@ -209,8 +208,6 @@ int btrfs_add_device(struct btrfs_trans_handle *trans,
 int btrfs_rm_device(struct btrfs_root *root, char *device_path);
 int btrfs_rm_device(struct btrfs_root *root, char *device_path);
 int btrfs_cleanup_fs_uuids(void);
 int btrfs_cleanup_fs_uuids(void);
 int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len);
 int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len);
-int btrfs_unplug_page(struct btrfs_mapping_tree *map_tree,
-		      u64 logical, struct page *page);
 int btrfs_grow_device(struct btrfs_trans_handle *trans,
 int btrfs_grow_device(struct btrfs_trans_handle *trans,
 		      struct btrfs_device *device, u64 new_size);
 		      struct btrfs_device *device, u64 new_size);
 struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid,
 struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid,
@@ -218,8 +215,6 @@ struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid,
 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size);
 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size);
 int btrfs_init_new_device(struct btrfs_root *root, char *path);
 int btrfs_init_new_device(struct btrfs_root *root, char *path);
 int btrfs_balance(struct btrfs_root *dev_root);
 int btrfs_balance(struct btrfs_root *dev_root);
-void btrfs_unlock_volumes(void);
-void btrfs_lock_volumes(void);
 int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset);
 int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset);
 int find_free_dev_extent(struct btrfs_trans_handle *trans,
 int find_free_dev_extent(struct btrfs_trans_handle *trans,
 			 struct btrfs_device *device, u64 num_bytes,
 			 struct btrfs_device *device, u64 num_bytes,

+ 2 - 2
fs/btrfs/xattr.c

@@ -120,13 +120,13 @@ static int do_setxattr(struct btrfs_trans_handle *trans,
 
 
 		ret = btrfs_delete_one_dir_name(trans, root, path, di);
 		ret = btrfs_delete_one_dir_name(trans, root, path, di);
 		BUG_ON(ret);
 		BUG_ON(ret);
-		btrfs_release_path(root, path);
+		btrfs_release_path(path);
 
 
 		/* if we don't have a value then we are removing the xattr */
 		/* if we don't have a value then we are removing the xattr */
 		if (!value)
 		if (!value)
 			goto out;
 			goto out;
 	} else {
 	} else {
-		btrfs_release_path(root, path);
+		btrfs_release_path(path);
 
 
 		if (flags & XATTR_REPLACE) {
 		if (flags & XATTR_REPLACE) {
 			/* we couldn't find the attr to replace */
 			/* we couldn't find the attr to replace */

部分文件因文件數量過多而無法顯示