|
@@ -488,29 +488,16 @@ found:
|
|
|
spin_unlock(&pag->pag_buf_lock);
|
|
|
xfs_perag_put(pag);
|
|
|
|
|
|
- /* Attempt to get the semaphore without sleeping,
|
|
|
- * if this does not work then we need to drop the
|
|
|
- * spinlock and do a hard attempt on the semaphore.
|
|
|
- */
|
|
|
- if (down_trylock(&bp->b_sema)) {
|
|
|
+ if (xfs_buf_cond_lock(bp)) {
|
|
|
+ /* failed, so wait for the lock if requested. */
|
|
|
if (!(flags & XBF_TRYLOCK)) {
|
|
|
- /* wait for buffer ownership */
|
|
|
xfs_buf_lock(bp);
|
|
|
XFS_STATS_INC(xb_get_locked_waited);
|
|
|
} else {
|
|
|
- /* We asked for a trylock and failed, no need
|
|
|
- * to look at file offset and length here, we
|
|
|
- * know that this buffer at least overlaps our
|
|
|
- * buffer and is locked, therefore our buffer
|
|
|
- * either does not exist, or is this buffer.
|
|
|
- */
|
|
|
xfs_buf_rele(bp);
|
|
|
XFS_STATS_INC(xb_busy_locked);
|
|
|
return NULL;
|
|
|
}
|
|
|
- } else {
|
|
|
- /* trylock worked */
|
|
|
- XB_SET_OWNER(bp);
|
|
|
}
|
|
|
|
|
|
if (bp->b_flags & XBF_STALE) {
|
|
@@ -876,10 +863,18 @@ xfs_buf_rele(
|
|
|
*/
|
|
|
|
|
|
/*
|
|
|
- * Locks a buffer object, if it is not already locked.
|
|
|
- * Note that this in no way locks the underlying pages, so it is only
|
|
|
- * useful for synchronizing concurrent use of buffer objects, not for
|
|
|
- * synchronizing independent access to the underlying pages.
|
|
|
+ * Locks a buffer object, if it is not already locked. Note that this in
|
|
|
+ * no way locks the underlying pages, so it is only useful for
|
|
|
+ * synchronizing concurrent use of buffer objects, not for synchronizing
|
|
|
+ * independent access to the underlying pages.
|
|
|
+ *
|
|
|
+ * If we come across a stale, pinned, locked buffer, we know that we are
|
|
|
+ * being asked to lock a buffer that has been reallocated. Because it is
|
|
|
+ * pinned, we know that the log has not been pushed to disk and hence it
|
|
|
+ * will still be locked. Rather than continuing to have trylock attempts
|
|
|
+ * fail until someone else pushes the log, push it ourselves before
|
|
|
+ * returning. This means that the xfsaild will not get stuck trying
|
|
|
+ * to push on stale inode buffers.
|
|
|
*/
|
|
|
int
|
|
|
xfs_buf_cond_lock(
|
|
@@ -890,6 +885,8 @@ xfs_buf_cond_lock(
|
|
|
locked = down_trylock(&bp->b_sema) == 0;
|
|
|
if (locked)
|
|
|
XB_SET_OWNER(bp);
|
|
|
+ else if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
|
|
|
+ xfs_log_force(bp->b_target->bt_mount, 0);
|
|
|
|
|
|
trace_xfs_buf_cond_lock(bp, _RET_IP_);
|
|
|
return locked ? 0 : -EBUSY;
|