Переглянути джерело

xfs: kill b_file_offset

Seeing as we pass block numbers around everywhere in the buffer
cache now, it makes no sense to index everything by byte offset.
Replace all the byte offset indexing with block number based
indexing, and replace all uses of the byte offset with direct
conversion from the block index.

Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
Dave Chinner 13 роки тому
батько
коміт
de1cbee462
2 змінених файлів з 8 додано та 14 видалено
  1. 7 10
      fs/xfs/xfs_buf.c
  2. 1 4
      fs/xfs/xfs_buf.h

+ 7 - 10
fs/xfs/xfs_buf.c

@@ -196,7 +196,7 @@ xfs_buf_alloc(
 	sema_init(&bp->b_sema, 0); /* held, no waiters */
 	sema_init(&bp->b_sema, 0); /* held, no waiters */
 	XB_SET_OWNER(bp);
 	XB_SET_OWNER(bp);
 	bp->b_target = target;
 	bp->b_target = target;
-	bp->b_file_offset = blkno << BBSHIFT;
+
 	/*
 	/*
 	 * Set buffer_length and count_desired to the same value initially.
 	 * Set buffer_length and count_desired to the same value initially.
 	 * I/O routines should use count_desired, which will be the same in
 	 * I/O routines should use count_desired, which will be the same in
@@ -337,8 +337,8 @@ xfs_buf_allocate_memory(
 	}
 	}
 
 
 use_alloc_page:
 use_alloc_page:
-	end = bp->b_file_offset + bp->b_buffer_length;
-	page_count = xfs_buf_btoc(end) - xfs_buf_btoct(bp->b_file_offset);
+	end = BBTOB(bp->b_bn) + bp->b_buffer_length;
+	page_count = xfs_buf_btoc(end) - xfs_buf_btoct(BBTOB(bp->b_bn));
 	error = _xfs_buf_get_pages(bp, page_count, flags);
 	error = _xfs_buf_get_pages(bp, page_count, flags);
 	if (unlikely(error))
 	if (unlikely(error))
 		return error;
 		return error;
@@ -439,19 +439,17 @@ _xfs_buf_find(
 	xfs_buf_flags_t		flags,
 	xfs_buf_flags_t		flags,
 	xfs_buf_t		*new_bp)
 	xfs_buf_t		*new_bp)
 {
 {
-	xfs_off_t		offset;
 	size_t			numbytes;
 	size_t			numbytes;
 	struct xfs_perag	*pag;
 	struct xfs_perag	*pag;
 	struct rb_node		**rbp;
 	struct rb_node		**rbp;
 	struct rb_node		*parent;
 	struct rb_node		*parent;
 	xfs_buf_t		*bp;
 	xfs_buf_t		*bp;
 
 
-	offset = BBTOB(blkno);
 	numbytes = BBTOB(numblks);
 	numbytes = BBTOB(numblks);
 
 
 	/* Check for IOs smaller than the sector size / not sector aligned */
 	/* Check for IOs smaller than the sector size / not sector aligned */
 	ASSERT(!(numbytes < (1 << btp->bt_sshift)));
 	ASSERT(!(numbytes < (1 << btp->bt_sshift)));
-	ASSERT(!(offset & (xfs_off_t)btp->bt_smask));
+	ASSERT(!(BBTOB(blkno) & (xfs_off_t)btp->bt_smask));
 
 
 	/* get tree root */
 	/* get tree root */
 	pag = xfs_perag_get(btp->bt_mount,
 	pag = xfs_perag_get(btp->bt_mount,
@@ -466,13 +464,13 @@ _xfs_buf_find(
 		parent = *rbp;
 		parent = *rbp;
 		bp = rb_entry(parent, struct xfs_buf, b_rbnode);
 		bp = rb_entry(parent, struct xfs_buf, b_rbnode);
 
 
-		if (offset < bp->b_file_offset)
+		if (blkno < bp->b_bn)
 			rbp = &(*rbp)->rb_left;
 			rbp = &(*rbp)->rb_left;
-		else if (offset > bp->b_file_offset)
+		else if (blkno > bp->b_bn)
 			rbp = &(*rbp)->rb_right;
 			rbp = &(*rbp)->rb_right;
 		else {
 		else {
 			/*
 			/*
-			 * found a block offset match. If the range doesn't
+			 * found a block number match. If the range doesn't
 			 * match, the only way this is allowed is if the buffer
 			 * match, the only way this is allowed is if the buffer
 			 * in the cache is stale and the transaction that made
 			 * in the cache is stale and the transaction that made
 			 * it stale has not yet committed. i.e. we are
 			 * it stale has not yet committed. i.e. we are
@@ -718,7 +716,6 @@ xfs_buf_set_empty(
 	bp->b_pages = NULL;
 	bp->b_pages = NULL;
 	bp->b_page_count = 0;
 	bp->b_page_count = 0;
 	bp->b_addr = NULL;
 	bp->b_addr = NULL;
-	bp->b_file_offset = 0;
 	bp->b_buffer_length = bp->b_count_desired = numblks << BBSHIFT;
 	bp->b_buffer_length = bp->b_count_desired = numblks << BBSHIFT;
 	bp->b_bn = XFS_BUF_DADDR_NULL;
 	bp->b_bn = XFS_BUF_DADDR_NULL;
 	bp->b_flags &= ~XBF_MAPPED;
 	bp->b_flags &= ~XBF_MAPPED;

+ 1 - 4
fs/xfs/xfs_buf.h

@@ -116,7 +116,7 @@ typedef struct xfs_buf {
 	 * fast-path on locking.
 	 * fast-path on locking.
 	 */
 	 */
 	struct rb_node		b_rbnode;	/* rbtree node */
 	struct rb_node		b_rbnode;	/* rbtree node */
-	xfs_off_t		b_file_offset;	/* offset in file */
+	xfs_daddr_t		b_bn;		/* block number for I/O */
 	size_t			b_buffer_length;/* size of buffer in bytes */
 	size_t			b_buffer_length;/* size of buffer in bytes */
 	atomic_t		b_hold;		/* reference count */
 	atomic_t		b_hold;		/* reference count */
 	atomic_t		b_lru_ref;	/* lru reclaim ref count */
 	atomic_t		b_lru_ref;	/* lru reclaim ref count */
@@ -128,7 +128,6 @@ typedef struct xfs_buf {
 	struct list_head	b_list;
 	struct list_head	b_list;
 	struct xfs_perag	*b_pag;		/* contains rbtree root */
 	struct xfs_perag	*b_pag;		/* contains rbtree root */
 	xfs_buftarg_t		*b_target;	/* buffer target (device) */
 	xfs_buftarg_t		*b_target;	/* buffer target (device) */
-	xfs_daddr_t		b_bn;		/* block number for I/O */
 	size_t			b_count_desired;/* desired transfer size */
 	size_t			b_count_desired;/* desired transfer size */
 	void			*b_addr;	/* virtual address of buffer */
 	void			*b_addr;	/* virtual address of buffer */
 	struct work_struct	b_iodone_work;
 	struct work_struct	b_iodone_work;
@@ -245,8 +244,6 @@ void xfs_buf_stale(struct xfs_buf *bp);
 
 
 #define XFS_BUF_ADDR(bp)		((bp)->b_bn)
 #define XFS_BUF_ADDR(bp)		((bp)->b_bn)
 #define XFS_BUF_SET_ADDR(bp, bno)	((bp)->b_bn = (xfs_daddr_t)(bno))
 #define XFS_BUF_SET_ADDR(bp, bno)	((bp)->b_bn = (xfs_daddr_t)(bno))
-#define XFS_BUF_OFFSET(bp)		((bp)->b_file_offset)
-#define XFS_BUF_SET_OFFSET(bp, off)	((bp)->b_file_offset = (off))
 #define XFS_BUF_COUNT(bp)		((bp)->b_count_desired)
 #define XFS_BUF_COUNT(bp)		((bp)->b_count_desired)
 #define XFS_BUF_SET_COUNT(bp, cnt)	((bp)->b_count_desired = (cnt))
 #define XFS_BUF_SET_COUNT(bp, cnt)	((bp)->b_count_desired = (cnt))
 #define XFS_BUF_SIZE(bp)		((bp)->b_buffer_length)
 #define XFS_BUF_SIZE(bp)		((bp)->b_buffer_length)