浏览代码

[PATCH] writeback: fix range handling

When a writeback_control's `start' and `end' fields are used to
indicate a one-byte-range starting at file offset zero, the required
values of .start=0,.end=0 mean that the ->writepages() implementation
has no way of telling that it is being asked to perform a range
request.  Because we're currently overloading (start == 0 && end == 0)
to mean "this is not a write-a-range request".

To make all this sane, the patch changes range of writeback_control.

So caller does: If it is calling ->writepages() to write pages, it
sets range (range_start/end or range_cyclic) always.

And if range_cyclic is true, ->writepages() thinks the range is
cyclic, otherwise it just uses range_start and range_end.

This patch does,

    - Add LLONG_MAX, LLONG_MIN, ULLONG_MAX to include/linux/kernel.h
      -1 is usually ok for range_end (type is long long). But, if someone did,

		range_end += val;		range_end is "val - 1"
		u64val = range_end >> bits;	u64val is "~(0ULL)"

      or something, they are wrong. So, this adds LLONG_MAX to avoid nasty
      things, and uses LLONG_MAX for range_end.

    - All callers of ->writepages() sets range_start/end or range_cyclic.

    - Fix updates of ->writeback_index. It seems already bit strange.
      If it starts at 0 and ended by check of nr_to_write, this last
      index may reduce chance to scan end of file.  So, this updates
      ->writeback_index only if range_cyclic is true or whole-file is
      scanned.

Signed-off-by: OGAWA Hirofumi <hirofumi@mail.parknet.co.jp>
Cc: Nathan Scott <nathans@sgi.com>
Cc: Anton Altaparmakov <aia21@cantab.net>
Cc: Steven French <sfrench@us.ibm.com>
Cc: "Vladimir V. Saveliev" <vs@namesys.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
OGAWA Hirofumi 19 年之前
父节点
当前提交
111ebb6e6f
共有 9 个文件被更改,包括 40 次插入31 次删除
  1. 11 13
      fs/cifs/file.c
  2. 4 0
      fs/fs-writeback.c
  3. 10 12
      fs/mpage.c
  4. 1 1
      fs/sync.c
  5. 3 0
      include/linux/kernel.h
  6. 3 2
      include/linux/writeback.h
  7. 3 3
      mm/filemap.c
  8. 3 0
      mm/page-writeback.c
  9. 2 0
      mm/vmscan.c

+ 11 - 13
fs/cifs/file.c

@@ -1079,9 +1079,9 @@ static int cifs_writepages(struct address_space *mapping,
 	unsigned int bytes_written;
 	unsigned int bytes_written;
 	struct cifs_sb_info *cifs_sb;
 	struct cifs_sb_info *cifs_sb;
 	int done = 0;
 	int done = 0;
-	pgoff_t end = -1;
+	pgoff_t end;
 	pgoff_t index;
 	pgoff_t index;
-	int is_range = 0;
+ 	int range_whole = 0;
 	struct kvec iov[32];
 	struct kvec iov[32];
 	int len;
 	int len;
 	int n_iov = 0;
 	int n_iov = 0;
@@ -1122,16 +1122,14 @@ static int cifs_writepages(struct address_space *mapping,
 	xid = GetXid();
 	xid = GetXid();
 
 
 	pagevec_init(&pvec, 0);
 	pagevec_init(&pvec, 0);
-	if (wbc->sync_mode == WB_SYNC_NONE)
+	if (wbc->range_cyclic) {
 		index = mapping->writeback_index; /* Start from prev offset */
 		index = mapping->writeback_index; /* Start from prev offset */
-	else {
-		index = 0;
-		scanned = 1;
-	}
-	if (wbc->start || wbc->end) {
-		index = wbc->start >> PAGE_CACHE_SHIFT;
-		end = wbc->end >> PAGE_CACHE_SHIFT;
-		is_range = 1;
+		end = -1;
+	} else {
+		index = wbc->range_start >> PAGE_CACHE_SHIFT;
+		end = wbc->range_end >> PAGE_CACHE_SHIFT;
+		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
+			range_whole = 1;
 		scanned = 1;
 		scanned = 1;
 	}
 	}
 retry:
 retry:
@@ -1167,7 +1165,7 @@ retry:
 				break;
 				break;
 			}
 			}
 
 
-			if (unlikely(is_range) && (page->index > end)) {
+			if (!wbc->range_cyclic && page->index > end) {
 				done = 1;
 				done = 1;
 				unlock_page(page);
 				unlock_page(page);
 				break;
 				break;
@@ -1271,7 +1269,7 @@ retry:
 		index = 0;
 		index = 0;
 		goto retry;
 		goto retry;
 	}
 	}
-	if (!is_range)
+	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
 		mapping->writeback_index = index;
 		mapping->writeback_index = index;
 
 
 	FreeXid(xid);
 	FreeXid(xid);

+ 4 - 0
fs/fs-writeback.c

@@ -461,6 +461,8 @@ void sync_inodes_sb(struct super_block *sb, int wait)
 {
 {
 	struct writeback_control wbc = {
 	struct writeback_control wbc = {
 		.sync_mode	= wait ? WB_SYNC_ALL : WB_SYNC_HOLD,
 		.sync_mode	= wait ? WB_SYNC_ALL : WB_SYNC_HOLD,
+		.range_start	= 0,
+		.range_end	= LLONG_MAX,
 	};
 	};
 	unsigned long nr_dirty = read_page_state(nr_dirty);
 	unsigned long nr_dirty = read_page_state(nr_dirty);
 	unsigned long nr_unstable = read_page_state(nr_unstable);
 	unsigned long nr_unstable = read_page_state(nr_unstable);
@@ -559,6 +561,8 @@ int write_inode_now(struct inode *inode, int sync)
 	struct writeback_control wbc = {
 	struct writeback_control wbc = {
 		.nr_to_write = LONG_MAX,
 		.nr_to_write = LONG_MAX,
 		.sync_mode = WB_SYNC_ALL,
 		.sync_mode = WB_SYNC_ALL,
+		.range_start = 0,
+		.range_end = LLONG_MAX,
 	};
 	};
 
 
 	if (!mapping_cap_writeback_dirty(inode->i_mapping))
 	if (!mapping_cap_writeback_dirty(inode->i_mapping))

+ 10 - 12
fs/mpage.c

@@ -707,9 +707,9 @@ mpage_writepages(struct address_space *mapping,
 	struct pagevec pvec;
 	struct pagevec pvec;
 	int nr_pages;
 	int nr_pages;
 	pgoff_t index;
 	pgoff_t index;
-	pgoff_t end = -1;		/* Inclusive */
+	pgoff_t end;		/* Inclusive */
 	int scanned = 0;
 	int scanned = 0;
-	int is_range = 0;
+	int range_whole = 0;
 
 
 	if (wbc->nonblocking && bdi_write_congested(bdi)) {
 	if (wbc->nonblocking && bdi_write_congested(bdi)) {
 		wbc->encountered_congestion = 1;
 		wbc->encountered_congestion = 1;
@@ -721,16 +721,14 @@ mpage_writepages(struct address_space *mapping,
 		writepage = mapping->a_ops->writepage;
 		writepage = mapping->a_ops->writepage;
 
 
 	pagevec_init(&pvec, 0);
 	pagevec_init(&pvec, 0);
-	if (wbc->sync_mode == WB_SYNC_NONE) {
+	if (wbc->range_cyclic) {
 		index = mapping->writeback_index; /* Start from prev offset */
 		index = mapping->writeback_index; /* Start from prev offset */
+		end = -1;
 	} else {
 	} else {
-		index = 0;			  /* whole-file sweep */
-		scanned = 1;
-	}
-	if (wbc->start || wbc->end) {
-		index = wbc->start >> PAGE_CACHE_SHIFT;
-		end = wbc->end >> PAGE_CACHE_SHIFT;
-		is_range = 1;
+		index = wbc->range_start >> PAGE_CACHE_SHIFT;
+		end = wbc->range_end >> PAGE_CACHE_SHIFT;
+		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
+			range_whole = 1;
 		scanned = 1;
 		scanned = 1;
 	}
 	}
 retry:
 retry:
@@ -759,7 +757,7 @@ retry:
 				continue;
 				continue;
 			}
 			}
 
 
-			if (unlikely(is_range) && page->index > end) {
+			if (!wbc->range_cyclic && page->index > end) {
 				done = 1;
 				done = 1;
 				unlock_page(page);
 				unlock_page(page);
 				continue;
 				continue;
@@ -810,7 +808,7 @@ retry:
 		index = 0;
 		index = 0;
 		goto retry;
 		goto retry;
 	}
 	}
-	if (!is_range)
+	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
 		mapping->writeback_index = index;
 		mapping->writeback_index = index;
 	if (bio)
 	if (bio)
 		mpage_bio_submit(WRITE, bio);
 		mpage_bio_submit(WRITE, bio);

+ 1 - 1
fs/sync.c

@@ -100,7 +100,7 @@ asmlinkage long sys_sync_file_range(int fd, loff_t offset, loff_t nbytes,
 	}
 	}
 
 
 	if (nbytes == 0)
 	if (nbytes == 0)
-		endbyte = -1;
+		endbyte = LLONG_MAX;
 	else
 	else
 		endbyte--;		/* inclusive */
 		endbyte--;		/* inclusive */
 
 

+ 3 - 0
include/linux/kernel.h

@@ -24,6 +24,9 @@ extern const char linux_banner[];
 #define LONG_MAX	((long)(~0UL>>1))
 #define LONG_MAX	((long)(~0UL>>1))
 #define LONG_MIN	(-LONG_MAX - 1)
 #define LONG_MIN	(-LONG_MAX - 1)
 #define ULONG_MAX	(~0UL)
 #define ULONG_MAX	(~0UL)
+#define LLONG_MAX	((long long)(~0ULL>>1))
+#define LLONG_MIN	(-LLONG_MAX - 1)
+#define ULLONG_MAX	(~0ULL)
 
 
 #define STACK_MAGIC	0xdeadbeef
 #define STACK_MAGIC	0xdeadbeef
 
 

+ 3 - 2
include/linux/writeback.h

@@ -50,14 +50,15 @@ struct writeback_control {
 	 * a hint that the filesystem need only write out the pages inside that
 	 * a hint that the filesystem need only write out the pages inside that
 	 * byterange.  The byte at `end' is included in the writeout request.
 	 * byterange.  The byte at `end' is included in the writeout request.
 	 */
 	 */
-	loff_t start;
-	loff_t end;
+	loff_t range_start;
+	loff_t range_end;
 
 
 	unsigned nonblocking:1;		/* Don't get stuck on request queues */
 	unsigned nonblocking:1;		/* Don't get stuck on request queues */
 	unsigned encountered_congestion:1; /* An output: a queue is full */
 	unsigned encountered_congestion:1; /* An output: a queue is full */
 	unsigned for_kupdate:1;		/* A kupdate writeback */
 	unsigned for_kupdate:1;		/* A kupdate writeback */
 	unsigned for_reclaim:1;		/* Invoked from the page allocator */
 	unsigned for_reclaim:1;		/* Invoked from the page allocator */
 	unsigned for_writepages:1;	/* This is a writepages() call */
 	unsigned for_writepages:1;	/* This is a writepages() call */
+	unsigned range_cyclic:1;	/* range_start is cyclic */
 };
 };
 
 
 /*
 /*

+ 3 - 3
mm/filemap.c

@@ -190,8 +190,8 @@ int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
 	struct writeback_control wbc = {
 	struct writeback_control wbc = {
 		.sync_mode = sync_mode,
 		.sync_mode = sync_mode,
 		.nr_to_write = mapping->nrpages * 2,
 		.nr_to_write = mapping->nrpages * 2,
-		.start = start,
-		.end = end,
+		.range_start = start,
+		.range_end = end,
 	};
 	};
 
 
 	if (!mapping_cap_writeback_dirty(mapping))
 	if (!mapping_cap_writeback_dirty(mapping))
@@ -204,7 +204,7 @@ int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
 static inline int __filemap_fdatawrite(struct address_space *mapping,
 static inline int __filemap_fdatawrite(struct address_space *mapping,
 	int sync_mode)
 	int sync_mode)
 {
 {
-	return __filemap_fdatawrite_range(mapping, 0, 0, sync_mode);
+	return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode);
 }
 }
 
 
 int filemap_fdatawrite(struct address_space *mapping)
 int filemap_fdatawrite(struct address_space *mapping)

+ 3 - 0
mm/page-writeback.c

@@ -204,6 +204,7 @@ static void balance_dirty_pages(struct address_space *mapping)
 			.sync_mode	= WB_SYNC_NONE,
 			.sync_mode	= WB_SYNC_NONE,
 			.older_than_this = NULL,
 			.older_than_this = NULL,
 			.nr_to_write	= write_chunk,
 			.nr_to_write	= write_chunk,
+			.range_cyclic	= 1,
 		};
 		};
 
 
 		get_dirty_limits(&wbs, &background_thresh,
 		get_dirty_limits(&wbs, &background_thresh,
@@ -331,6 +332,7 @@ static void background_writeout(unsigned long _min_pages)
 		.older_than_this = NULL,
 		.older_than_this = NULL,
 		.nr_to_write	= 0,
 		.nr_to_write	= 0,
 		.nonblocking	= 1,
 		.nonblocking	= 1,
+		.range_cyclic	= 1,
 	};
 	};
 
 
 	for ( ; ; ) {
 	for ( ; ; ) {
@@ -407,6 +409,7 @@ static void wb_kupdate(unsigned long arg)
 		.nr_to_write	= 0,
 		.nr_to_write	= 0,
 		.nonblocking	= 1,
 		.nonblocking	= 1,
 		.for_kupdate	= 1,
 		.for_kupdate	= 1,
+		.range_cyclic	= 1,
 	};
 	};
 
 
 	sync_supers();
 	sync_supers();

+ 2 - 0
mm/vmscan.c

@@ -339,6 +339,8 @@ pageout_t pageout(struct page *page, struct address_space *mapping)
 		struct writeback_control wbc = {
 		struct writeback_control wbc = {
 			.sync_mode = WB_SYNC_NONE,
 			.sync_mode = WB_SYNC_NONE,
 			.nr_to_write = SWAP_CLUSTER_MAX,
 			.nr_to_write = SWAP_CLUSTER_MAX,
+			.range_start = 0,
+			.range_end = LLONG_MAX,
 			.nonblocking = 1,
 			.nonblocking = 1,
 			.for_reclaim = 1,
 			.for_reclaim = 1,
 		};
 		};