|
@@ -879,100 +879,6 @@ xfs_qm_dqdetach(
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
-int
|
|
|
|
-xfs_qm_sync(
|
|
|
|
- struct xfs_mount *mp,
|
|
|
|
- int flags)
|
|
|
|
-{
|
|
|
|
- struct xfs_quotainfo *q = mp->m_quotainfo;
|
|
|
|
- int recl, restarts;
|
|
|
|
- struct xfs_dquot *dqp;
|
|
|
|
- int error;
|
|
|
|
-
|
|
|
|
- if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
|
|
|
|
- return 0;
|
|
|
|
-
|
|
|
|
- restarts = 0;
|
|
|
|
-
|
|
|
|
- again:
|
|
|
|
- mutex_lock(&q->qi_dqlist_lock);
|
|
|
|
- /*
|
|
|
|
- * dqpurge_all() also takes the mplist lock and iterate thru all dquots
|
|
|
|
- * in quotaoff. However, if the QUOTA_ACTIVE bits are not cleared
|
|
|
|
- * when we have the mplist lock, we know that dquots will be consistent
|
|
|
|
- * as long as we have it locked.
|
|
|
|
- */
|
|
|
|
- if (!XFS_IS_QUOTA_ON(mp)) {
|
|
|
|
- mutex_unlock(&q->qi_dqlist_lock);
|
|
|
|
- return 0;
|
|
|
|
- }
|
|
|
|
- ASSERT(mutex_is_locked(&q->qi_dqlist_lock));
|
|
|
|
- list_for_each_entry(dqp, &q->qi_dqlist, q_mplist) {
|
|
|
|
- /*
|
|
|
|
- * If this is vfs_sync calling, then skip the dquots that
|
|
|
|
- * don't 'seem' to be dirty. ie. don't acquire dqlock.
|
|
|
|
- * This is very similar to what xfs_sync does with inodes.
|
|
|
|
- */
|
|
|
|
- if (flags & SYNC_TRYLOCK) {
|
|
|
|
- if (!XFS_DQ_IS_DIRTY(dqp))
|
|
|
|
- continue;
|
|
|
|
- if (!xfs_qm_dqlock_nowait(dqp))
|
|
|
|
- continue;
|
|
|
|
- } else {
|
|
|
|
- xfs_dqlock(dqp);
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- /*
|
|
|
|
- * Now, find out for sure if this dquot is dirty or not.
|
|
|
|
- */
|
|
|
|
- if (! XFS_DQ_IS_DIRTY(dqp)) {
|
|
|
|
- xfs_dqunlock(dqp);
|
|
|
|
- continue;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- /* XXX a sentinel would be better */
|
|
|
|
- recl = q->qi_dqreclaims;
|
|
|
|
- if (!xfs_dqflock_nowait(dqp)) {
|
|
|
|
- if (flags & SYNC_TRYLOCK) {
|
|
|
|
- xfs_dqunlock(dqp);
|
|
|
|
- continue;
|
|
|
|
- }
|
|
|
|
- /*
|
|
|
|
- * If we can't grab the flush lock then if the caller
|
|
|
|
- * really wanted us to give this our best shot, so
|
|
|
|
- * see if we can give a push to the buffer before we wait
|
|
|
|
- * on the flush lock. At this point, we know that
|
|
|
|
- * even though the dquot is being flushed,
|
|
|
|
- * it has (new) dirty data.
|
|
|
|
- */
|
|
|
|
- xfs_qm_dqflock_pushbuf_wait(dqp);
|
|
|
|
- }
|
|
|
|
- /*
|
|
|
|
- * Let go of the mplist lock. We don't want to hold it
|
|
|
|
- * across a disk write
|
|
|
|
- */
|
|
|
|
- mutex_unlock(&q->qi_dqlist_lock);
|
|
|
|
- error = xfs_qm_dqflush(dqp, flags);
|
|
|
|
- xfs_dqunlock(dqp);
|
|
|
|
- if (error && XFS_FORCED_SHUTDOWN(mp))
|
|
|
|
- return 0; /* Need to prevent umount failure */
|
|
|
|
- else if (error)
|
|
|
|
- return error;
|
|
|
|
-
|
|
|
|
- mutex_lock(&q->qi_dqlist_lock);
|
|
|
|
- if (recl != q->qi_dqreclaims) {
|
|
|
|
- if (++restarts >= XFS_QM_SYNC_MAX_RESTARTS)
|
|
|
|
- break;
|
|
|
|
-
|
|
|
|
- mutex_unlock(&q->qi_dqlist_lock);
|
|
|
|
- goto again;
|
|
|
|
- }
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- mutex_unlock(&q->qi_dqlist_lock);
|
|
|
|
- return 0;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
/*
|
|
/*
|
|
* The hash chains and the mplist use the same xfs_dqhash structure as
|
|
* The hash chains and the mplist use the same xfs_dqhash structure as
|
|
* their list head, but we can take the mplist qh_lock and one of the
|
|
* their list head, but we can take the mplist qh_lock and one of the
|