Przeglądaj źródła

Btrfs: Count async bios separately from async checksum work items

Signed-off-by: Chris Mason <chris.mason@oracle.com>
Chris Mason 16 lat temu
rodzic
commit
0986fe9eac
3 zmienionych plików z 26 dodań i 6 usunięć
  1. 1 0
      fs/btrfs/ctree.h
  2. 22 3
      fs/btrfs/disk-io.c
  3. 3 3
      fs/btrfs/volumes.c

+ 1 - 0
fs/btrfs/ctree.h

@@ -544,6 +544,7 @@ struct btrfs_fs_info {
 	struct list_head hashers;
 	struct list_head hashers;
 	struct list_head dead_roots;
 	struct list_head dead_roots;
 	atomic_t nr_async_submits;
 	atomic_t nr_async_submits;
+	atomic_t nr_async_bios;
 
 
 	/*
 	/*
 	 * this is used by the balancing code to wait for all the pending
 	 * this is used by the balancing code to wait for all the pending

+ 22 - 3
fs/btrfs/disk-io.c

@@ -429,6 +429,21 @@ int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
 	return 0;
 	return 0;
 }
 }
 
 
+static int congested_async(struct btrfs_fs_info *info, int iodone)
+{
+	int limit = 256 * info->fs_devices->open_devices;
+
+	if (iodone)
+		limit = (limit * 3) / 2;
+	if (atomic_read(&info->nr_async_submits) > limit)
+		return 1;
+
+	limit = 8192 * info->fs_devices->open_devices;
+	if (iodone)
+		limit = (limit * 3) / 2;
+	return atomic_read(&info->nr_async_bios) > limit;
+}
+
 static void run_one_async_submit(struct btrfs_work *work)
 static void run_one_async_submit(struct btrfs_work *work)
 {
 {
 	struct btrfs_fs_info *fs_info;
 	struct btrfs_fs_info *fs_info;
@@ -437,6 +452,11 @@ static void run_one_async_submit(struct btrfs_work *work)
 	async = container_of(work, struct  async_submit_bio, work);
 	async = container_of(work, struct  async_submit_bio, work);
 	fs_info = BTRFS_I(async->inode)->root->fs_info;
 	fs_info = BTRFS_I(async->inode)->root->fs_info;
 	atomic_dec(&fs_info->nr_async_submits);
 	atomic_dec(&fs_info->nr_async_submits);
+
+	if ((async->bio->bi_rw & (1 << BIO_RW)) &&
+	    !congested_async(fs_info, 1)) {
+		clear_bdi_congested(&fs_info->bdi, WRITE);
+	}
 	async->submit_bio_hook(async->inode, async->rw, async->bio,
 	async->submit_bio_hook(async->inode, async->rw, async->bio,
 			       async->mirror_num);
 			       async->mirror_num);
 	kfree(async);
 	kfree(async);
@@ -938,15 +958,13 @@ static int btrfs_congested_fn(void *congested_data, int bdi_bits)
 {
 {
 	struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data;
 	struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data;
 	int ret = 0;
 	int ret = 0;
-	int limit = 256 * info->fs_devices->open_devices;
 	struct list_head *cur;
 	struct list_head *cur;
 	struct btrfs_device *device;
 	struct btrfs_device *device;
 	struct backing_dev_info *bdi;
 	struct backing_dev_info *bdi;
 
 
 	if ((bdi_bits & (1 << BDI_write_congested)) &&
 	if ((bdi_bits & (1 << BDI_write_congested)) &&
-	    atomic_read(&info->nr_async_submits) > limit) {
+	    congested_async(info, 0))
 		return 1;
 		return 1;
-	}
 
 
 	list_for_each(cur, &info->fs_devices->devices) {
 	list_for_each(cur, &info->fs_devices->devices) {
 		device = list_entry(cur, struct btrfs_device, dev_list);
 		device = list_entry(cur, struct btrfs_device, dev_list);
@@ -1250,6 +1268,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
 	INIT_LIST_HEAD(&fs_info->space_info);
 	INIT_LIST_HEAD(&fs_info->space_info);
 	btrfs_mapping_init(&fs_info->mapping_tree);
 	btrfs_mapping_init(&fs_info->mapping_tree);
 	atomic_set(&fs_info->nr_async_submits, 0);
 	atomic_set(&fs_info->nr_async_submits, 0);
+	atomic_set(&fs_info->nr_async_bios, 0);
 	atomic_set(&fs_info->throttles, 0);
 	atomic_set(&fs_info->throttles, 0);
 	atomic_set(&fs_info->throttle_gen, 0);
 	atomic_set(&fs_info->throttle_gen, 0);
 	fs_info->sb = sb;
 	fs_info->sb = sb;

+ 3 - 3
fs/btrfs/volumes.c

@@ -179,7 +179,7 @@ loop:
 		cur = pending;
 		cur = pending;
 		pending = pending->bi_next;
 		pending = pending->bi_next;
 		cur->bi_next = NULL;
 		cur->bi_next = NULL;
-		atomic_dec(&device->dev_root->fs_info->nr_async_submits);
+		atomic_dec(&device->dev_root->fs_info->nr_async_bios);
 
 
 		BUG_ON(atomic_read(&cur->bi_cnt) == 0);
 		BUG_ON(atomic_read(&cur->bi_cnt) == 0);
 		bio_get(cur);
 		bio_get(cur);
@@ -2145,12 +2145,12 @@ int schedule_bio(struct btrfs_root *root, struct btrfs_device *device,
 	}
 	}
 
 
 	/*
 	/*
-	 * nr_async_sumbits allows us to reliably return congestion to the
+	 * nr_async_bios allows us to reliably return congestion to the
 	 * higher layers.  Otherwise, the async bio makes it appear we have
 	 * higher layers.  Otherwise, the async bio makes it appear we have
 	 * made progress against dirty pages when we've really just put it
 	 * made progress against dirty pages when we've really just put it
 	 * on a queue for later
 	 * on a queue for later
 	 */
 	 */
-	atomic_inc(&root->fs_info->nr_async_submits);
+	atomic_inc(&root->fs_info->nr_async_bios);
 	WARN_ON(bio->bi_next);
 	WARN_ON(bio->bi_next);
 	bio->bi_next = NULL;
 	bio->bi_next = NULL;
 	bio->bi_rw |= rw;
 	bio->bi_rw |= rw;