Browse Source

[XFS] Fix racy access to pb_flags. pagebuf_rele() modified pb_flags after
the pagebuf had been unlocked if the buffer was delwri. At high load, this
could result in a race when the superblock was being synced that would
result the flags being incorrect and the iodone functions being executed
incorrectly. This then leads to iclog callback failures or AIL list
corruptions resulting in filesystem shutdowns.

SGI-PV: 923981
SGI-Modid: xfs-linux:xfs-kern:23616a

Signed-off-by: David Chinner <dgc@sgi.com>
Signed-off-by: Nathan Scott <nathans@sgi.com>

David Chinner 20 years ago
parent
commit
2f92658751
2 changed files with 51 additions and 18 deletions
  1. 50 16
      fs/xfs/linux-2.6/xfs_buf.c
  2. 1 2
      fs/xfs/linux-2.6/xfs_buf.h

+ 50 - 16
fs/xfs/linux-2.6/xfs_buf.c

@@ -590,8 +590,10 @@ found:
 		PB_SET_OWNER(pb);
 		PB_SET_OWNER(pb);
 	}
 	}
 
 
-	if (pb->pb_flags & PBF_STALE)
+	if (pb->pb_flags & PBF_STALE) {
+		ASSERT((pb->pb_flags & _PBF_DELWRI_Q) == 0);
 		pb->pb_flags &= PBF_MAPPED;
 		pb->pb_flags &= PBF_MAPPED;
+	}
 	PB_TRACE(pb, "got_lock", 0);
 	PB_TRACE(pb, "got_lock", 0);
 	XFS_STATS_INC(pb_get_locked);
 	XFS_STATS_INC(pb_get_locked);
 	return (pb);
 	return (pb);
@@ -872,6 +874,17 @@ pagebuf_rele(
 
 
 	PB_TRACE(pb, "rele", pb->pb_relse);
 	PB_TRACE(pb, "rele", pb->pb_relse);
 
 
+	/*
+	 * pagebuf_lookup buffers are not hashed, not delayed write,
+	 * and don't have their own release routines.  Special case.
+	 */
+	if (unlikely(!hash)) {
+		ASSERT(!pb->pb_relse);
+		if (atomic_dec_and_test(&pb->pb_hold))
+			xfs_buf_free(pb);
+		return;
+	}
+
 	if (atomic_dec_and_lock(&pb->pb_hold, &hash->bh_lock)) {
 	if (atomic_dec_and_lock(&pb->pb_hold, &hash->bh_lock)) {
 		int		do_free = 1;
 		int		do_free = 1;
 
 
@@ -883,22 +896,23 @@ pagebuf_rele(
 			do_free = 0;
 			do_free = 0;
 		}
 		}
 
 
-		if (pb->pb_flags & PBF_DELWRI) {
-			pb->pb_flags |= PBF_ASYNC;
-			atomic_inc(&pb->pb_hold);
-			pagebuf_delwri_queue(pb, 0);
-			do_free = 0;
-		} else if (pb->pb_flags & PBF_FS_MANAGED) {
+		if (pb->pb_flags & PBF_FS_MANAGED) {
 			do_free = 0;
 			do_free = 0;
 		}
 		}
 
 
 		if (do_free) {
 		if (do_free) {
+			ASSERT((pb->pb_flags & (PBF_DELWRI|_PBF_DELWRI_Q)) == 0);
 			list_del_init(&pb->pb_hash_list);
 			list_del_init(&pb->pb_hash_list);
 			spin_unlock(&hash->bh_lock);
 			spin_unlock(&hash->bh_lock);
 			pagebuf_free(pb);
 			pagebuf_free(pb);
 		} else {
 		} else {
 			spin_unlock(&hash->bh_lock);
 			spin_unlock(&hash->bh_lock);
 		}
 		}
+	} else {
+		/*
+		 * Catch reference count leaks
+		 */
+		ASSERT(atomic_read(&pb->pb_hold) >= 0);
 	}
 	}
 }
 }
 
 
@@ -976,13 +990,24 @@ pagebuf_lock(
  *	pagebuf_unlock
  *	pagebuf_unlock
  *
  *
  *	pagebuf_unlock releases the lock on the buffer object created by
  *	pagebuf_unlock releases the lock on the buffer object created by
- *	pagebuf_lock or pagebuf_cond_lock (not any
- *	pinning of underlying pages created by pagebuf_pin).
+ *	pagebuf_lock or pagebuf_cond_lock (not any pinning of underlying pages
+ *	created by pagebuf_pin).
+ *
+ *	If the buffer is marked delwri but is not queued, do so before we
+ *	unlock the buffer as we need to set flags correctly. We also need to
+ *	take a reference for the delwri queue because the unlocker is going to
+ *	drop their's and they don't know we just queued it.
  */
  */
 void
 void
 pagebuf_unlock(				/* unlock buffer		*/
 pagebuf_unlock(				/* unlock buffer		*/
 	xfs_buf_t		*pb)	/* buffer to unlock		*/
 	xfs_buf_t		*pb)	/* buffer to unlock		*/
 {
 {
+	if ((pb->pb_flags & (PBF_DELWRI|_PBF_DELWRI_Q)) == PBF_DELWRI) {
+		atomic_inc(&pb->pb_hold);
+		pb->pb_flags |= PBF_ASYNC;
+		pagebuf_delwri_queue(pb, 0);
+	}
+
 	PB_CLEAR_OWNER(pb);
 	PB_CLEAR_OWNER(pb);
 	up(&pb->pb_sema);
 	up(&pb->pb_sema);
 	PB_TRACE(pb, "unlock", 0);
 	PB_TRACE(pb, "unlock", 0);
@@ -1486,6 +1511,11 @@ again:
 			ASSERT(btp == bp->pb_target);
 			ASSERT(btp == bp->pb_target);
 			if (!(bp->pb_flags & PBF_FS_MANAGED)) {
 			if (!(bp->pb_flags & PBF_FS_MANAGED)) {
 				spin_unlock(&hash->bh_lock);
 				spin_unlock(&hash->bh_lock);
+				/*
+				 * Catch superblock reference count leaks
+				 * immediately
+				 */
+				BUG_ON(bp->pb_bn == 0);
 				delay(100);
 				delay(100);
 				goto again;
 				goto again;
 			}
 			}
@@ -1661,17 +1691,20 @@ pagebuf_delwri_queue(
 	int			unlock)
 	int			unlock)
 {
 {
 	PB_TRACE(pb, "delwri_q", (long)unlock);
 	PB_TRACE(pb, "delwri_q", (long)unlock);
-	ASSERT(pb->pb_flags & PBF_DELWRI);
+	ASSERT((pb->pb_flags & (PBF_DELWRI|PBF_ASYNC)) ==
+					(PBF_DELWRI|PBF_ASYNC));
 
 
 	spin_lock(&pbd_delwrite_lock);
 	spin_lock(&pbd_delwrite_lock);
 	/* If already in the queue, dequeue and place at tail */
 	/* If already in the queue, dequeue and place at tail */
 	if (!list_empty(&pb->pb_list)) {
 	if (!list_empty(&pb->pb_list)) {
+		ASSERT(pb->pb_flags & _PBF_DELWRI_Q);
 		if (unlock) {
 		if (unlock) {
 			atomic_dec(&pb->pb_hold);
 			atomic_dec(&pb->pb_hold);
 		}
 		}
 		list_del(&pb->pb_list);
 		list_del(&pb->pb_list);
 	}
 	}
 
 
+	pb->pb_flags |= _PBF_DELWRI_Q;
 	list_add_tail(&pb->pb_list, &pbd_delwrite_queue);
 	list_add_tail(&pb->pb_list, &pbd_delwrite_queue);
 	pb->pb_queuetime = jiffies;
 	pb->pb_queuetime = jiffies;
 	spin_unlock(&pbd_delwrite_lock);
 	spin_unlock(&pbd_delwrite_lock);
@@ -1688,10 +1721,11 @@ pagebuf_delwri_dequeue(
 
 
 	spin_lock(&pbd_delwrite_lock);
 	spin_lock(&pbd_delwrite_lock);
 	if ((pb->pb_flags & PBF_DELWRI) && !list_empty(&pb->pb_list)) {
 	if ((pb->pb_flags & PBF_DELWRI) && !list_empty(&pb->pb_list)) {
+		ASSERT(pb->pb_flags & _PBF_DELWRI_Q);
 		list_del_init(&pb->pb_list);
 		list_del_init(&pb->pb_list);
 		dequeued = 1;
 		dequeued = 1;
 	}
 	}
-	pb->pb_flags &= ~PBF_DELWRI;
+	pb->pb_flags &= ~(PBF_DELWRI|_PBF_DELWRI_Q);
 	spin_unlock(&pbd_delwrite_lock);
 	spin_unlock(&pbd_delwrite_lock);
 
 
 	if (dequeued)
 	if (dequeued)
@@ -1770,7 +1804,7 @@ xfsbufd(
 					break;
 					break;
 				}
 				}
 
 
-				pb->pb_flags &= ~PBF_DELWRI;
+				pb->pb_flags &= ~(PBF_DELWRI|_PBF_DELWRI_Q);
 				pb->pb_flags |= PBF_WRITE;
 				pb->pb_flags |= PBF_WRITE;
 				list_move(&pb->pb_list, &tmp);
 				list_move(&pb->pb_list, &tmp);
 			}
 			}
@@ -1820,15 +1854,13 @@ xfs_flush_buftarg(
 		if (pb->pb_target != target)
 		if (pb->pb_target != target)
 			continue;
 			continue;
 
 
-		ASSERT(pb->pb_flags & PBF_DELWRI);
+		ASSERT(pb->pb_flags & (PBF_DELWRI|_PBF_DELWRI_Q));
 		PB_TRACE(pb, "walkq2", (long)pagebuf_ispin(pb));
 		PB_TRACE(pb, "walkq2", (long)pagebuf_ispin(pb));
 		if (pagebuf_ispin(pb)) {
 		if (pagebuf_ispin(pb)) {
 			pincount++;
 			pincount++;
 			continue;
 			continue;
 		}
 		}
 
 
-		pb->pb_flags &= ~PBF_DELWRI;
-		pb->pb_flags |= PBF_WRITE;
 		list_move(&pb->pb_list, &tmp);
 		list_move(&pb->pb_list, &tmp);
 	}
 	}
 	spin_unlock(&pbd_delwrite_lock);
 	spin_unlock(&pbd_delwrite_lock);
@@ -1837,12 +1869,14 @@ xfs_flush_buftarg(
 	 * Dropped the delayed write list lock, now walk the temporary list
 	 * Dropped the delayed write list lock, now walk the temporary list
 	 */
 	 */
 	list_for_each_entry_safe(pb, n, &tmp, pb_list) {
 	list_for_each_entry_safe(pb, n, &tmp, pb_list) {
+		pagebuf_lock(pb);
+		pb->pb_flags &= ~(PBF_DELWRI|_PBF_DELWRI_Q);
+		pb->pb_flags |= PBF_WRITE;
 		if (wait)
 		if (wait)
 			pb->pb_flags &= ~PBF_ASYNC;
 			pb->pb_flags &= ~PBF_ASYNC;
 		else
 		else
 			list_del_init(&pb->pb_list);
 			list_del_init(&pb->pb_list);
 
 
-		pagebuf_lock(pb);
 		pagebuf_iostrategy(pb);
 		pagebuf_iostrategy(pb);
 	}
 	}
 
 

+ 1 - 2
fs/xfs/linux-2.6/xfs_buf.h

@@ -89,6 +89,7 @@ typedef enum page_buf_flags_e {		/* pb_flags values */
 	_PBF_PAGE_CACHE = (1 << 17),/* backed by pagecache		   */
 	_PBF_PAGE_CACHE = (1 << 17),/* backed by pagecache		   */
 	_PBF_KMEM_ALLOC = (1 << 18),/* backed by kmem_alloc()		   */
 	_PBF_KMEM_ALLOC = (1 << 18),/* backed by kmem_alloc()		   */
 	_PBF_RUN_QUEUES = (1 << 19),/* run block device task queue	   */
 	_PBF_RUN_QUEUES = (1 << 19),/* run block device task queue	   */
+	_PBF_DELWRI_Q = (1 << 21),   /* buffer on delwri queue		   */
 } page_buf_flags_t;
 } page_buf_flags_t;
 
 
 #define PBF_UPDATE (PBF_READ | PBF_WRITE)
 #define PBF_UPDATE (PBF_READ | PBF_WRITE)
@@ -337,8 +338,6 @@ extern void pagebuf_trace(
 
 
 
 
 
 
-
-
 /* These are just for xfs_syncsub... it sets an internal variable
 /* These are just for xfs_syncsub... it sets an internal variable
  * then passes it to VOP_FLUSH_PAGES or adds the flags to a newly gotten buf_t
  * then passes it to VOP_FLUSH_PAGES or adds the flags to a newly gotten buf_t
  */
  */