|
@@ -67,9 +67,6 @@ static cred_t xfs_zerocr;
|
|
|
STATIC void xfs_qm_list_init(xfs_dqlist_t *, char *, int);
|
|
|
STATIC void xfs_qm_list_destroy(xfs_dqlist_t *);
|
|
|
|
|
|
-STATIC void xfs_qm_freelist_init(xfs_frlist_t *);
|
|
|
-STATIC void xfs_qm_freelist_destroy(xfs_frlist_t *);
|
|
|
-
|
|
|
STATIC int xfs_qm_init_quotainos(xfs_mount_t *);
|
|
|
STATIC int xfs_qm_init_quotainfo(xfs_mount_t *);
|
|
|
STATIC int xfs_qm_shake(int, gfp_t);
|
|
@@ -84,21 +81,25 @@ extern struct mutex qcheck_lock;
|
|
|
#endif
|
|
|
|
|
|
#ifdef QUOTADEBUG
|
|
|
-#define XQM_LIST_PRINT(l, NXT, title) \
|
|
|
-{ \
|
|
|
- xfs_dquot_t *dqp; int i = 0; \
|
|
|
- cmn_err(CE_DEBUG, "%s (#%d)", title, (int) (l)->qh_nelems); \
|
|
|
- for (dqp = (l)->qh_next; dqp != NULL; dqp = dqp->NXT) { \
|
|
|
- cmn_err(CE_DEBUG, " %d. \"%d (%s)\" " \
|
|
|
- "bcnt = %d, icnt = %d, refs = %d", \
|
|
|
- ++i, (int) be32_to_cpu(dqp->q_core.d_id), \
|
|
|
- DQFLAGTO_TYPESTR(dqp), \
|
|
|
- (int) be64_to_cpu(dqp->q_core.d_bcount), \
|
|
|
- (int) be64_to_cpu(dqp->q_core.d_icount), \
|
|
|
- (int) dqp->q_nrefs); } \
|
|
|
+static void
|
|
|
+xfs_qm_dquot_list_print(
|
|
|
+ struct xfs_mount *mp)
|
|
|
+{
|
|
|
+ xfs_dquot_t *dqp;
|
|
|
+ int i = 0;
|
|
|
+
|
|
|
+ list_for_each_entry(dqp, &mp->m_quotainfo->qi_dqlist_lock, qi_mplist) {
|
|
|
+ cmn_err(CE_DEBUG, " %d. \"%d (%s)\" "
|
|
|
+ "bcnt = %lld, icnt = %lld, refs = %d",
|
|
|
+ i++, be32_to_cpu(dqp->q_core.d_id),
|
|
|
+ DQFLAGTO_TYPESTR(dqp),
|
|
|
+ (long long)be64_to_cpu(dqp->q_core.d_bcount),
|
|
|
+ (long long)be64_to_cpu(dqp->q_core.d_icount),
|
|
|
+ dqp->q_nrefs);
|
|
|
+ }
|
|
|
}
|
|
|
#else
|
|
|
-#define XQM_LIST_PRINT(l, NXT, title) do { } while (0)
|
|
|
+static void xfs_qm_dquot_list_print(struct xfs_mount *mp) { }
|
|
|
#endif
|
|
|
|
|
|
/*
|
|
@@ -144,7 +145,9 @@ xfs_Gqm_init(void)
|
|
|
/*
|
|
|
* Freelist of all dquots of all file systems
|
|
|
*/
|
|
|
- xfs_qm_freelist_init(&(xqm->qm_dqfreelist));
|
|
|
+ INIT_LIST_HEAD(&xqm->qm_dqfrlist);
|
|
|
+ xqm->qm_dqfrlist_cnt = 0;
|
|
|
+ mutex_init(&xqm->qm_dqfrlist_lock);
|
|
|
|
|
|
/*
|
|
|
* dquot zone. we register our own low-memory callback.
|
|
@@ -189,6 +192,7 @@ STATIC void
|
|
|
xfs_qm_destroy(
|
|
|
struct xfs_qm *xqm)
|
|
|
{
|
|
|
+ struct xfs_dquot *dqp, *n;
|
|
|
int hsize, i;
|
|
|
|
|
|
ASSERT(xqm != NULL);
|
|
@@ -204,7 +208,21 @@ xfs_qm_destroy(
|
|
|
xqm->qm_usr_dqhtable = NULL;
|
|
|
xqm->qm_grp_dqhtable = NULL;
|
|
|
xqm->qm_dqhashmask = 0;
|
|
|
- xfs_qm_freelist_destroy(&(xqm->qm_dqfreelist));
|
|
|
+
|
|
|
+ /* frlist cleanup */
|
|
|
+ mutex_lock(&xqm->qm_dqfrlist_lock);
|
|
|
+ list_for_each_entry_safe(dqp, n, &xqm->qm_dqfrlist, q_freelist) {
|
|
|
+ xfs_dqlock(dqp);
|
|
|
+#ifdef QUOTADEBUG
|
|
|
+ cmn_err(CE_DEBUG, "FREELIST destroy 0x%p", dqp);
|
|
|
+#endif
|
|
|
+ list_del_init(&dqp->q_freelist);
|
|
|
+ xfs_Gqm->qm_dqfrlist_cnt--;
|
|
|
+ xfs_dqunlock(dqp);
|
|
|
+ xfs_qm_dqdestroy(dqp);
|
|
|
+ }
|
|
|
+ mutex_unlock(&xqm->qm_dqfrlist_lock);
|
|
|
+ mutex_destroy(&xqm->qm_dqfrlist_lock);
|
|
|
#ifdef DEBUG
|
|
|
mutex_destroy(&qcheck_lock);
|
|
|
#endif
|
|
@@ -256,7 +274,7 @@ STATIC void
|
|
|
xfs_qm_rele_quotafs_ref(
|
|
|
struct xfs_mount *mp)
|
|
|
{
|
|
|
- xfs_dquot_t *dqp, *nextdqp;
|
|
|
+ xfs_dquot_t *dqp, *n;
|
|
|
|
|
|
ASSERT(xfs_Gqm);
|
|
|
ASSERT(xfs_Gqm->qm_nrefs > 0);
|
|
@@ -264,26 +282,24 @@ xfs_qm_rele_quotafs_ref(
|
|
|
/*
|
|
|
* Go thru the freelist and destroy all inactive dquots.
|
|
|
*/
|
|
|
- xfs_qm_freelist_lock(xfs_Gqm);
|
|
|
+ mutex_lock(&xfs_Gqm->qm_dqfrlist_lock);
|
|
|
|
|
|
- for (dqp = xfs_Gqm->qm_dqfreelist.qh_next;
|
|
|
- dqp != (xfs_dquot_t *)&(xfs_Gqm->qm_dqfreelist); ) {
|
|
|
+ list_for_each_entry_safe(dqp, n, &xfs_Gqm->qm_dqfrlist, q_freelist) {
|
|
|
xfs_dqlock(dqp);
|
|
|
- nextdqp = dqp->dq_flnext;
|
|
|
if (dqp->dq_flags & XFS_DQ_INACTIVE) {
|
|
|
ASSERT(dqp->q_mount == NULL);
|
|
|
ASSERT(! XFS_DQ_IS_DIRTY(dqp));
|
|
|
- ASSERT(dqp->HL_PREVP == NULL);
|
|
|
- ASSERT(dqp->MPL_PREVP == NULL);
|
|
|
- XQM_FREELIST_REMOVE(dqp);
|
|
|
+ ASSERT(list_empty(&dqp->q_hashlist));
|
|
|
+ ASSERT(list_empty(&dqp->q_mplist));
|
|
|
+ list_del_init(&dqp->q_freelist);
|
|
|
+ xfs_Gqm->qm_dqfrlist_cnt--;
|
|
|
xfs_dqunlock(dqp);
|
|
|
xfs_qm_dqdestroy(dqp);
|
|
|
} else {
|
|
|
xfs_dqunlock(dqp);
|
|
|
}
|
|
|
- dqp = nextdqp;
|
|
|
}
|
|
|
- xfs_qm_freelist_unlock(xfs_Gqm);
|
|
|
+ mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock);
|
|
|
|
|
|
/*
|
|
|
* Destroy the entire XQM. If somebody mounts with quotaon, this'll
|
|
@@ -305,7 +321,7 @@ xfs_qm_unmount(
|
|
|
struct xfs_mount *mp)
|
|
|
{
|
|
|
if (mp->m_quotainfo) {
|
|
|
- xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL | XFS_QMOPT_UMOUNTING);
|
|
|
+ xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL);
|
|
|
xfs_qm_destroy_quotainfo(mp);
|
|
|
}
|
|
|
}
|
|
@@ -449,20 +465,21 @@ xfs_qm_unmount_quotas(
|
|
|
*/
|
|
|
STATIC int
|
|
|
xfs_qm_dqflush_all(
|
|
|
- xfs_mount_t *mp,
|
|
|
- int sync_mode)
|
|
|
+ struct xfs_mount *mp,
|
|
|
+ int sync_mode)
|
|
|
{
|
|
|
- int recl;
|
|
|
- xfs_dquot_t *dqp;
|
|
|
- int niters;
|
|
|
- int error;
|
|
|
+ struct xfs_quotainfo *q = mp->m_quotainfo;
|
|
|
+ int recl;
|
|
|
+ struct xfs_dquot *dqp;
|
|
|
+ int niters;
|
|
|
+ int error;
|
|
|
|
|
|
- if (mp->m_quotainfo == NULL)
|
|
|
+ if (!q)
|
|
|
return 0;
|
|
|
niters = 0;
|
|
|
again:
|
|
|
- xfs_qm_mplist_lock(mp);
|
|
|
- FOREACH_DQUOT_IN_MP(dqp, mp) {
|
|
|
+ mutex_lock(&q->qi_dqlist_lock);
|
|
|
+ list_for_each_entry(dqp, &q->qi_dqlist, q_mplist) {
|
|
|
xfs_dqlock(dqp);
|
|
|
if (! XFS_DQ_IS_DIRTY(dqp)) {
|
|
|
xfs_dqunlock(dqp);
|
|
@@ -470,7 +487,7 @@ again:
|
|
|
}
|
|
|
|
|
|
/* XXX a sentinel would be better */
|
|
|
- recl = XFS_QI_MPLRECLAIMS(mp);
|
|
|
+ recl = q->qi_dqreclaims;
|
|
|
if (!xfs_dqflock_nowait(dqp)) {
|
|
|
/*
|
|
|
* If we can't grab the flush lock then check
|
|
@@ -485,21 +502,21 @@ again:
|
|
|
* Let go of the mplist lock. We don't want to hold it
|
|
|
* across a disk write.
|
|
|
*/
|
|
|
- xfs_qm_mplist_unlock(mp);
|
|
|
+ mutex_unlock(&q->qi_dqlist_lock);
|
|
|
error = xfs_qm_dqflush(dqp, sync_mode);
|
|
|
xfs_dqunlock(dqp);
|
|
|
if (error)
|
|
|
return error;
|
|
|
|
|
|
- xfs_qm_mplist_lock(mp);
|
|
|
- if (recl != XFS_QI_MPLRECLAIMS(mp)) {
|
|
|
- xfs_qm_mplist_unlock(mp);
|
|
|
+ mutex_lock(&q->qi_dqlist_lock);
|
|
|
+ if (recl != q->qi_dqreclaims) {
|
|
|
+ mutex_unlock(&q->qi_dqlist_lock);
|
|
|
/* XXX restart limit */
|
|
|
goto again;
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- xfs_qm_mplist_unlock(mp);
|
|
|
+ mutex_unlock(&q->qi_dqlist_lock);
|
|
|
/* return ! busy */
|
|
|
return 0;
|
|
|
}
|
|
@@ -509,15 +526,15 @@ again:
|
|
|
*/
|
|
|
STATIC void
|
|
|
xfs_qm_detach_gdquots(
|
|
|
- xfs_mount_t *mp)
|
|
|
+ struct xfs_mount *mp)
|
|
|
{
|
|
|
- xfs_dquot_t *dqp, *gdqp;
|
|
|
- int nrecl;
|
|
|
+ struct xfs_quotainfo *q = mp->m_quotainfo;
|
|
|
+ struct xfs_dquot *dqp, *gdqp;
|
|
|
+ int nrecl;
|
|
|
|
|
|
again:
|
|
|
- ASSERT(XFS_QM_IS_MPLIST_LOCKED(mp));
|
|
|
- dqp = XFS_QI_MPLNEXT(mp);
|
|
|
- while (dqp) {
|
|
|
+ ASSERT(mutex_is_locked(&q->qi_dqlist_lock));
|
|
|
+ list_for_each_entry(dqp, &q->qi_dqlist, q_mplist) {
|
|
|
xfs_dqlock(dqp);
|
|
|
if ((gdqp = dqp->q_gdquot)) {
|
|
|
xfs_dqlock(gdqp);
|
|
@@ -530,15 +547,14 @@ xfs_qm_detach_gdquots(
|
|
|
* Can't hold the mplist lock across a dqput.
|
|
|
* XXXmust convert to marker based iterations here.
|
|
|
*/
|
|
|
- nrecl = XFS_QI_MPLRECLAIMS(mp);
|
|
|
- xfs_qm_mplist_unlock(mp);
|
|
|
+ nrecl = q->qi_dqreclaims;
|
|
|
+ mutex_unlock(&q->qi_dqlist_lock);
|
|
|
xfs_qm_dqput(gdqp);
|
|
|
|
|
|
- xfs_qm_mplist_lock(mp);
|
|
|
- if (nrecl != XFS_QI_MPLRECLAIMS(mp))
|
|
|
+ mutex_lock(&q->qi_dqlist_lock);
|
|
|
+ if (nrecl != q->qi_dqreclaims)
|
|
|
goto again;
|
|
|
}
|
|
|
- dqp = dqp->MPL_NEXT;
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -550,23 +566,23 @@ xfs_qm_detach_gdquots(
|
|
|
*/
|
|
|
STATIC int
|
|
|
xfs_qm_dqpurge_int(
|
|
|
- xfs_mount_t *mp,
|
|
|
- uint flags) /* QUOTAOFF/UMOUNTING/UQUOTA/PQUOTA/GQUOTA */
|
|
|
+ struct xfs_mount *mp,
|
|
|
+ uint flags)
|
|
|
{
|
|
|
- xfs_dquot_t *dqp;
|
|
|
- uint dqtype;
|
|
|
- int nrecl;
|
|
|
- xfs_dquot_t *nextdqp;
|
|
|
- int nmisses;
|
|
|
+ struct xfs_quotainfo *q = mp->m_quotainfo;
|
|
|
+ struct xfs_dquot *dqp, *n;
|
|
|
+ uint dqtype;
|
|
|
+ int nrecl;
|
|
|
+ int nmisses;
|
|
|
|
|
|
- if (mp->m_quotainfo == NULL)
|
|
|
+ if (!q)
|
|
|
return 0;
|
|
|
|
|
|
dqtype = (flags & XFS_QMOPT_UQUOTA) ? XFS_DQ_USER : 0;
|
|
|
dqtype |= (flags & XFS_QMOPT_PQUOTA) ? XFS_DQ_PROJ : 0;
|
|
|
dqtype |= (flags & XFS_QMOPT_GQUOTA) ? XFS_DQ_GROUP : 0;
|
|
|
|
|
|
- xfs_qm_mplist_lock(mp);
|
|
|
+ mutex_lock(&q->qi_dqlist_lock);
|
|
|
|
|
|
/*
|
|
|
* In the first pass through all incore dquots of this filesystem,
|
|
@@ -578,28 +594,25 @@ xfs_qm_dqpurge_int(
|
|
|
|
|
|
again:
|
|
|
nmisses = 0;
|
|
|
- ASSERT(XFS_QM_IS_MPLIST_LOCKED(mp));
|
|
|
+ ASSERT(mutex_is_locked(&q->qi_dqlist_lock));
|
|
|
/*
|
|
|
* Try to get rid of all of the unwanted dquots. The idea is to
|
|
|
* get them off mplist and hashlist, but leave them on freelist.
|
|
|
*/
|
|
|
- dqp = XFS_QI_MPLNEXT(mp);
|
|
|
- while (dqp) {
|
|
|
+ list_for_each_entry_safe(dqp, n, &q->qi_dqlist, q_mplist) {
|
|
|
/*
|
|
|
* It's OK to look at the type without taking dqlock here.
|
|
|
* We're holding the mplist lock here, and that's needed for
|
|
|
* a dqreclaim.
|
|
|
*/
|
|
|
- if ((dqp->dq_flags & dqtype) == 0) {
|
|
|
- dqp = dqp->MPL_NEXT;
|
|
|
+ if ((dqp->dq_flags & dqtype) == 0)
|
|
|
continue;
|
|
|
- }
|
|
|
|
|
|
if (!mutex_trylock(&dqp->q_hash->qh_lock)) {
|
|
|
- nrecl = XFS_QI_MPLRECLAIMS(mp);
|
|
|
- xfs_qm_mplist_unlock(mp);
|
|
|
+ nrecl = q->qi_dqreclaims;
|
|
|
+ mutex_unlock(&q->qi_dqlist_lock);
|
|
|
mutex_lock(&dqp->q_hash->qh_lock);
|
|
|
- xfs_qm_mplist_lock(mp);
|
|
|
+ mutex_lock(&q->qi_dqlist_lock);
|
|
|
|
|
|
/*
|
|
|
* XXXTheoretically, we can get into a very long
|
|
@@ -607,7 +620,7 @@ xfs_qm_dqpurge_int(
|
|
|
* No one can be adding dquots to the mplist at
|
|
|
* this point, but somebody might be taking things off.
|
|
|
*/
|
|
|
- if (nrecl != XFS_QI_MPLRECLAIMS(mp)) {
|
|
|
+ if (nrecl != q->qi_dqreclaims) {
|
|
|
mutex_unlock(&dqp->q_hash->qh_lock);
|
|
|
goto again;
|
|
|
}
|
|
@@ -617,11 +630,9 @@ xfs_qm_dqpurge_int(
|
|
|
* Take the dquot off the mplist and hashlist. It may remain on
|
|
|
* freelist in INACTIVE state.
|
|
|
*/
|
|
|
- nextdqp = dqp->MPL_NEXT;
|
|
|
nmisses += xfs_qm_dqpurge(dqp);
|
|
|
- dqp = nextdqp;
|
|
|
}
|
|
|
- xfs_qm_mplist_unlock(mp);
|
|
|
+ mutex_unlock(&q->qi_dqlist_lock);
|
|
|
return nmisses;
|
|
|
}
|
|
|
|
|
@@ -921,12 +932,13 @@ xfs_qm_dqdetach(
|
|
|
|
|
|
int
|
|
|
xfs_qm_sync(
|
|
|
- xfs_mount_t *mp,
|
|
|
- int flags)
|
|
|
+ struct xfs_mount *mp,
|
|
|
+ int flags)
|
|
|
{
|
|
|
- int recl, restarts;
|
|
|
- xfs_dquot_t *dqp;
|
|
|
- int error;
|
|
|
+ struct xfs_quotainfo *q = mp->m_quotainfo;
|
|
|
+ int recl, restarts;
|
|
|
+ struct xfs_dquot *dqp;
|
|
|
+ int error;
|
|
|
|
|
|
if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
|
|
|
return 0;
|
|
@@ -934,18 +946,19 @@ xfs_qm_sync(
|
|
|
restarts = 0;
|
|
|
|
|
|
again:
|
|
|
- xfs_qm_mplist_lock(mp);
|
|
|
+ mutex_lock(&q->qi_dqlist_lock);
|
|
|
/*
|
|
|
* dqpurge_all() also takes the mplist lock and iterate thru all dquots
|
|
|
* in quotaoff. However, if the QUOTA_ACTIVE bits are not cleared
|
|
|
* when we have the mplist lock, we know that dquots will be consistent
|
|
|
* as long as we have it locked.
|
|
|
*/
|
|
|
- if (! XFS_IS_QUOTA_ON(mp)) {
|
|
|
- xfs_qm_mplist_unlock(mp);
|
|
|
+ if (!XFS_IS_QUOTA_ON(mp)) {
|
|
|
+ mutex_unlock(&q->qi_dqlist_lock);
|
|
|
return 0;
|
|
|
}
|
|
|
- FOREACH_DQUOT_IN_MP(dqp, mp) {
|
|
|
+ ASSERT(mutex_is_locked(&q->qi_dqlist_lock));
|
|
|
+ list_for_each_entry(dqp, &q->qi_dqlist, q_mplist) {
|
|
|
/*
|
|
|
* If this is vfs_sync calling, then skip the dquots that
|
|
|
* don't 'seem' to be dirty. ie. don't acquire dqlock.
|
|
@@ -969,7 +982,7 @@ xfs_qm_sync(
|
|
|
}
|
|
|
|
|
|
/* XXX a sentinel would be better */
|
|
|
- recl = XFS_QI_MPLRECLAIMS(mp);
|
|
|
+ recl = q->qi_dqreclaims;
|
|
|
if (!xfs_dqflock_nowait(dqp)) {
|
|
|
if (flags & SYNC_TRYLOCK) {
|
|
|
xfs_dqunlock(dqp);
|
|
@@ -989,7 +1002,7 @@ xfs_qm_sync(
|
|
|
* Let go of the mplist lock. We don't want to hold it
|
|
|
* across a disk write
|
|
|
*/
|
|
|
- xfs_qm_mplist_unlock(mp);
|
|
|
+ mutex_unlock(&q->qi_dqlist_lock);
|
|
|
error = xfs_qm_dqflush(dqp, flags);
|
|
|
xfs_dqunlock(dqp);
|
|
|
if (error && XFS_FORCED_SHUTDOWN(mp))
|
|
@@ -997,17 +1010,17 @@ xfs_qm_sync(
|
|
|
else if (error)
|
|
|
return error;
|
|
|
|
|
|
- xfs_qm_mplist_lock(mp);
|
|
|
- if (recl != XFS_QI_MPLRECLAIMS(mp)) {
|
|
|
+ mutex_lock(&q->qi_dqlist_lock);
|
|
|
+ if (recl != q->qi_dqreclaims) {
|
|
|
if (++restarts >= XFS_QM_SYNC_MAX_RESTARTS)
|
|
|
break;
|
|
|
|
|
|
- xfs_qm_mplist_unlock(mp);
|
|
|
+ mutex_unlock(&q->qi_dqlist_lock);
|
|
|
goto again;
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- xfs_qm_mplist_unlock(mp);
|
|
|
+ mutex_unlock(&q->qi_dqlist_lock);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -1052,8 +1065,9 @@ xfs_qm_init_quotainfo(
|
|
|
return error;
|
|
|
}
|
|
|
|
|
|
- xfs_qm_list_init(&qinf->qi_dqlist, "mpdqlist", 0);
|
|
|
- lockdep_set_class(&qinf->qi_dqlist.qh_lock, &xfs_quota_mplist_class);
|
|
|
+ INIT_LIST_HEAD(&qinf->qi_dqlist);
|
|
|
+ mutex_init(&qinf->qi_dqlist_lock);
|
|
|
+ lockdep_set_class(&qinf->qi_dqlist_lock, &xfs_quota_mplist_class);
|
|
|
|
|
|
qinf->qi_dqreclaims = 0;
|
|
|
|
|
@@ -1150,7 +1164,8 @@ xfs_qm_destroy_quotainfo(
|
|
|
*/
|
|
|
xfs_qm_rele_quotafs_ref(mp);
|
|
|
|
|
|
- xfs_qm_list_destroy(&qi->qi_dqlist);
|
|
|
+ ASSERT(list_empty(&qi->qi_dqlist));
|
|
|
+ mutex_destroy(&qi->qi_dqlist_lock);
|
|
|
|
|
|
if (qi->qi_uquotaip) {
|
|
|
IRELE(qi->qi_uquotaip);
|
|
@@ -1177,7 +1192,7 @@ xfs_qm_list_init(
|
|
|
int n)
|
|
|
{
|
|
|
mutex_init(&list->qh_lock);
|
|
|
- list->qh_next = NULL;
|
|
|
+ INIT_LIST_HEAD(&list->qh_list);
|
|
|
list->qh_version = 0;
|
|
|
list->qh_nelems = 0;
|
|
|
}
|
|
@@ -1316,9 +1331,6 @@ xfs_qm_qino_alloc(
|
|
|
*/
|
|
|
spin_lock(&mp->m_sb_lock);
|
|
|
if (flags & XFS_QMOPT_SBVERSION) {
|
|
|
-#if defined(DEBUG) && defined(XFS_LOUD_RECOVERY)
|
|
|
- unsigned oldv = mp->m_sb.sb_versionnum;
|
|
|
-#endif
|
|
|
ASSERT(!xfs_sb_version_hasquota(&mp->m_sb));
|
|
|
ASSERT((sbfields & (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |
|
|
|
XFS_SB_GQUOTINO | XFS_SB_QFLAGS)) ==
|
|
@@ -1331,11 +1343,6 @@ xfs_qm_qino_alloc(
|
|
|
|
|
|
/* qflags will get updated _after_ quotacheck */
|
|
|
mp->m_sb.sb_qflags = 0;
|
|
|
-#if defined(DEBUG) && defined(XFS_LOUD_RECOVERY)
|
|
|
- cmn_err(CE_NOTE,
|
|
|
- "Old superblock version %x, converting to %x.",
|
|
|
- oldv, mp->m_sb.sb_versionnum);
|
|
|
-#endif
|
|
|
}
|
|
|
if (flags & XFS_QMOPT_UQUOTA)
|
|
|
mp->m_sb.sb_uquotino = (*ip)->i_ino;
|
|
@@ -1371,10 +1378,10 @@ xfs_qm_reset_dqcounts(
|
|
|
#ifdef DEBUG
|
|
|
j = XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
|
|
|
do_div(j, sizeof(xfs_dqblk_t));
|
|
|
- ASSERT(XFS_QM_DQPERBLK(mp) == j);
|
|
|
+ ASSERT(mp->m_quotainfo->qi_dqperchunk == j);
|
|
|
#endif
|
|
|
ddq = (xfs_disk_dquot_t *)XFS_BUF_PTR(bp);
|
|
|
- for (j = 0; j < XFS_QM_DQPERBLK(mp); j++) {
|
|
|
+ for (j = 0; j < mp->m_quotainfo->qi_dqperchunk; j++) {
|
|
|
/*
|
|
|
* Do a sanity check, and if needed, repair the dqblk. Don't
|
|
|
* output any warnings because it's perfectly possible to
|
|
@@ -1429,7 +1436,7 @@ xfs_qm_dqiter_bufs(
|
|
|
while (blkcnt--) {
|
|
|
error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
|
|
|
XFS_FSB_TO_DADDR(mp, bno),
|
|
|
- (int)XFS_QI_DQCHUNKLEN(mp), 0, &bp);
|
|
|
+ mp->m_quotainfo->qi_dqchunklen, 0, &bp);
|
|
|
if (error)
|
|
|
break;
|
|
|
|
|
@@ -1439,7 +1446,7 @@ xfs_qm_dqiter_bufs(
|
|
|
* goto the next block.
|
|
|
*/
|
|
|
bno++;
|
|
|
- firstid += XFS_QM_DQPERBLK(mp);
|
|
|
+ firstid += mp->m_quotainfo->qi_dqperchunk;
|
|
|
}
|
|
|
return error;
|
|
|
}
|
|
@@ -1505,7 +1512,7 @@ xfs_qm_dqiterate(
|
|
|
continue;
|
|
|
|
|
|
firstid = (xfs_dqid_t) map[i].br_startoff *
|
|
|
- XFS_QM_DQPERBLK(mp);
|
|
|
+ mp->m_quotainfo->qi_dqperchunk;
|
|
|
/*
|
|
|
* Do a read-ahead on the next extent.
|
|
|
*/
|
|
@@ -1516,7 +1523,7 @@ xfs_qm_dqiterate(
|
|
|
while (rablkcnt--) {
|
|
|
xfs_baread(mp->m_ddev_targp,
|
|
|
XFS_FSB_TO_DADDR(mp, rablkno),
|
|
|
- (int)XFS_QI_DQCHUNKLEN(mp));
|
|
|
+ mp->m_quotainfo->qi_dqchunklen);
|
|
|
rablkno++;
|
|
|
}
|
|
|
}
|
|
@@ -1576,8 +1583,10 @@ xfs_qm_quotacheck_dqadjust(
|
|
|
|
|
|
/*
|
|
|
* Set default limits, adjust timers (since we changed usages)
|
|
|
+ *
|
|
|
+ * There are no timers for the default values set in the root dquot.
|
|
|
*/
|
|
|
- if (! XFS_IS_SUSER_DQUOT(dqp)) {
|
|
|
+ if (dqp->q_core.d_id) {
|
|
|
xfs_qm_adjust_dqlimits(dqp->q_mount, &dqp->q_core);
|
|
|
xfs_qm_adjust_dqtimers(dqp->q_mount, &dqp->q_core);
|
|
|
}
|
|
@@ -1747,14 +1756,14 @@ xfs_qm_quotacheck(
|
|
|
lastino = 0;
|
|
|
flags = 0;
|
|
|
|
|
|
- ASSERT(XFS_QI_UQIP(mp) || XFS_QI_GQIP(mp));
|
|
|
+ ASSERT(mp->m_quotainfo->qi_uquotaip || mp->m_quotainfo->qi_gquotaip);
|
|
|
ASSERT(XFS_IS_QUOTA_RUNNING(mp));
|
|
|
|
|
|
/*
|
|
|
* There should be no cached dquots. The (simplistic) quotacheck
|
|
|
* algorithm doesn't like that.
|
|
|
*/
|
|
|
- ASSERT(XFS_QI_MPLNDQUOTS(mp) == 0);
|
|
|
+ ASSERT(list_empty(&mp->m_quotainfo->qi_dqlist));
|
|
|
|
|
|
cmn_err(CE_NOTE, "XFS quotacheck %s: Please wait.", mp->m_fsname);
|
|
|
|
|
@@ -1763,15 +1772,19 @@ xfs_qm_quotacheck(
|
|
|
* their counters to zero. We need a clean slate.
|
|
|
* We don't log our changes till later.
|
|
|
*/
|
|
|
- if ((uip = XFS_QI_UQIP(mp))) {
|
|
|
- if ((error = xfs_qm_dqiterate(mp, uip, XFS_QMOPT_UQUOTA)))
|
|
|
+ uip = mp->m_quotainfo->qi_uquotaip;
|
|
|
+ if (uip) {
|
|
|
+ error = xfs_qm_dqiterate(mp, uip, XFS_QMOPT_UQUOTA);
|
|
|
+ if (error)
|
|
|
goto error_return;
|
|
|
flags |= XFS_UQUOTA_CHKD;
|
|
|
}
|
|
|
|
|
|
- if ((gip = XFS_QI_GQIP(mp))) {
|
|
|
- if ((error = xfs_qm_dqiterate(mp, gip, XFS_IS_GQUOTA_ON(mp) ?
|
|
|
- XFS_QMOPT_GQUOTA : XFS_QMOPT_PQUOTA)))
|
|
|
+ gip = mp->m_quotainfo->qi_gquotaip;
|
|
|
+ if (gip) {
|
|
|
+ error = xfs_qm_dqiterate(mp, gip, XFS_IS_GQUOTA_ON(mp) ?
|
|
|
+ XFS_QMOPT_GQUOTA : XFS_QMOPT_PQUOTA);
|
|
|
+ if (error)
|
|
|
goto error_return;
|
|
|
flags |= XFS_OQUOTA_CHKD;
|
|
|
}
|
|
@@ -1804,7 +1817,7 @@ xfs_qm_quotacheck(
|
|
|
* at this point (because we intentionally didn't in dqget_noattach).
|
|
|
*/
|
|
|
if (error) {
|
|
|
- xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL | XFS_QMOPT_QUOTAOFF);
|
|
|
+ xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL);
|
|
|
goto error_return;
|
|
|
}
|
|
|
|
|
@@ -1825,7 +1838,7 @@ xfs_qm_quotacheck(
|
|
|
mp->m_qflags &= ~(XFS_OQUOTA_CHKD | XFS_UQUOTA_CHKD);
|
|
|
mp->m_qflags |= flags;
|
|
|
|
|
|
- XQM_LIST_PRINT(&(XFS_QI_MPL_LIST(mp)), MPL_NEXT, "++++ Mp list +++");
|
|
|
+ xfs_qm_dquot_list_print(mp);
|
|
|
|
|
|
error_return:
|
|
|
if (error) {
|
|
@@ -1920,59 +1933,53 @@ xfs_qm_init_quotainos(
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- XFS_QI_UQIP(mp) = uip;
|
|
|
- XFS_QI_GQIP(mp) = gip;
|
|
|
+ mp->m_quotainfo->qi_uquotaip = uip;
|
|
|
+ mp->m_quotainfo->qi_gquotaip = gip;
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
|
|
|
+
|
|
|
/*
|
|
|
- * Traverse the freelist of dquots and attempt to reclaim a maximum of
|
|
|
- * 'howmany' dquots. This operation races with dqlookup(), and attempts to
|
|
|
- * favor the lookup function ...
|
|
|
- * XXXsup merge this with qm_reclaim_one().
|
|
|
+ * Just pop the least recently used dquot off the freelist and
|
|
|
+ * recycle it. The returned dquot is locked.
|
|
|
*/
|
|
|
-STATIC int
|
|
|
-xfs_qm_shake_freelist(
|
|
|
- int howmany)
|
|
|
+STATIC xfs_dquot_t *
|
|
|
+xfs_qm_dqreclaim_one(void)
|
|
|
{
|
|
|
- int nreclaimed;
|
|
|
- xfs_dqhash_t *hash;
|
|
|
- xfs_dquot_t *dqp, *nextdqp;
|
|
|
+ xfs_dquot_t *dqpout;
|
|
|
+ xfs_dquot_t *dqp;
|
|
|
int restarts;
|
|
|
- int nflushes;
|
|
|
-
|
|
|
- if (howmany <= 0)
|
|
|
- return 0;
|
|
|
|
|
|
- nreclaimed = 0;
|
|
|
restarts = 0;
|
|
|
- nflushes = 0;
|
|
|
+ dqpout = NULL;
|
|
|
|
|
|
-#ifdef QUOTADEBUG
|
|
|
- cmn_err(CE_DEBUG, "Shake free 0x%x", howmany);
|
|
|
-#endif
|
|
|
- /* lock order is : hashchainlock, freelistlock, mplistlock */
|
|
|
- tryagain:
|
|
|
- xfs_qm_freelist_lock(xfs_Gqm);
|
|
|
+ /* lockorder: hashchainlock, freelistlock, mplistlock, dqlock, dqflock */
|
|
|
+startagain:
|
|
|
+ mutex_lock(&xfs_Gqm->qm_dqfrlist_lock);
|
|
|
|
|
|
- for (dqp = xfs_Gqm->qm_dqfreelist.qh_next;
|
|
|
- ((dqp != (xfs_dquot_t *) &xfs_Gqm->qm_dqfreelist) &&
|
|
|
- nreclaimed < howmany); ) {
|
|
|
+ list_for_each_entry(dqp, &xfs_Gqm->qm_dqfrlist, q_freelist) {
|
|
|
+ struct xfs_mount *mp = dqp->q_mount;
|
|
|
xfs_dqlock(dqp);
|
|
|
|
|
|
/*
|
|
|
* We are racing with dqlookup here. Naturally we don't
|
|
|
- * want to reclaim a dquot that lookup wants.
|
|
|
+ * want to reclaim a dquot that lookup wants. We release the
|
|
|
+ * freelist lock and start over, so that lookup will grab
|
|
|
+ * both the dquot and the freelistlock.
|
|
|
*/
|
|
|
if (dqp->dq_flags & XFS_DQ_WANT) {
|
|
|
+ ASSERT(! (dqp->dq_flags & XFS_DQ_INACTIVE));
|
|
|
+
|
|
|
+ trace_xfs_dqreclaim_want(dqp);
|
|
|
+
|
|
|
xfs_dqunlock(dqp);
|
|
|
- xfs_qm_freelist_unlock(xfs_Gqm);
|
|
|
+ mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock);
|
|
|
if (++restarts >= XFS_QM_RECLAIM_MAX_RESTARTS)
|
|
|
- return nreclaimed;
|
|
|
+ return NULL;
|
|
|
XQM_STATS_INC(xqmstats.xs_qm_dqwants);
|
|
|
- goto tryagain;
|
|
|
+ goto startagain;
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -1981,23 +1988,27 @@ xfs_qm_shake_freelist(
|
|
|
* life easier.
|
|
|
*/
|
|
|
if (dqp->dq_flags & XFS_DQ_INACTIVE) {
|
|
|
- ASSERT(dqp->q_mount == NULL);
|
|
|
+ ASSERT(mp == NULL);
|
|
|
ASSERT(! XFS_DQ_IS_DIRTY(dqp));
|
|
|
- ASSERT(dqp->HL_PREVP == NULL);
|
|
|
- ASSERT(dqp->MPL_PREVP == NULL);
|
|
|
+ ASSERT(list_empty(&dqp->q_hashlist));
|
|
|
+ ASSERT(list_empty(&dqp->q_mplist));
|
|
|
+ list_del_init(&dqp->q_freelist);
|
|
|
+ xfs_Gqm->qm_dqfrlist_cnt--;
|
|
|
+ xfs_dqunlock(dqp);
|
|
|
+ dqpout = dqp;
|
|
|
XQM_STATS_INC(xqmstats.xs_qm_dqinact_reclaims);
|
|
|
- nextdqp = dqp->dq_flnext;
|
|
|
- goto off_freelist;
|
|
|
+ break;
|
|
|
}
|
|
|
|
|
|
- ASSERT(dqp->MPL_PREVP);
|
|
|
+ ASSERT(dqp->q_hash);
|
|
|
+ ASSERT(!list_empty(&dqp->q_mplist));
|
|
|
+
|
|
|
/*
|
|
|
* Try to grab the flush lock. If this dquot is in the process of
|
|
|
* getting flushed to disk, we don't want to reclaim it.
|
|
|
*/
|
|
|
if (!xfs_dqflock_nowait(dqp)) {
|
|
|
xfs_dqunlock(dqp);
|
|
|
- dqp = dqp->dq_flnext;
|
|
|
continue;
|
|
|
}
|
|
|
|
|
@@ -2010,21 +2021,21 @@ xfs_qm_shake_freelist(
|
|
|
if (XFS_DQ_IS_DIRTY(dqp)) {
|
|
|
int error;
|
|
|
|
|
|
- trace_xfs_dqshake_dirty(dqp);
|
|
|
+ trace_xfs_dqreclaim_dirty(dqp);
|
|
|
|
|
|
/*
|
|
|
* We flush it delayed write, so don't bother
|
|
|
- * releasing the mplock.
|
|
|
+ * releasing the freelist lock.
|
|
|
*/
|
|
|
error = xfs_qm_dqflush(dqp, 0);
|
|
|
if (error) {
|
|
|
- xfs_fs_cmn_err(CE_WARN, dqp->q_mount,
|
|
|
- "xfs_qm_dqflush_all: dquot %p flush failed", dqp);
|
|
|
+ xfs_fs_cmn_err(CE_WARN, mp,
|
|
|
+ "xfs_qm_dqreclaim: dquot %p flush failed", dqp);
|
|
|
}
|
|
|
xfs_dqunlock(dqp); /* dqflush unlocks dqflock */
|
|
|
- dqp = dqp->dq_flnext;
|
|
|
continue;
|
|
|
}
|
|
|
+
|
|
|
/*
|
|
|
* We're trying to get the hashlock out of order. This races
|
|
|
* with dqlookup; so, we giveup and goto the next dquot if
|
|
@@ -2033,56 +2044,74 @@ xfs_qm_shake_freelist(
|
|
|
* waiting for the freelist lock.
|
|
|
*/
|
|
|
if (!mutex_trylock(&dqp->q_hash->qh_lock)) {
|
|
|
- xfs_dqfunlock(dqp);
|
|
|
- xfs_dqunlock(dqp);
|
|
|
- dqp = dqp->dq_flnext;
|
|
|
- continue;
|
|
|
+ restarts++;
|
|
|
+ goto dqfunlock;
|
|
|
}
|
|
|
+
|
|
|
/*
|
|
|
* This races with dquot allocation code as well as dqflush_all
|
|
|
* and reclaim code. So, if we failed to grab the mplist lock,
|
|
|
* giveup everything and start over.
|
|
|
*/
|
|
|
- hash = dqp->q_hash;
|
|
|
- ASSERT(hash);
|
|
|
- if (! xfs_qm_mplist_nowait(dqp->q_mount)) {
|
|
|
- /* XXX put a sentinel so that we can come back here */
|
|
|
+ if (!mutex_trylock(&mp->m_quotainfo->qi_dqlist_lock)) {
|
|
|
+ restarts++;
|
|
|
+ mutex_unlock(&dqp->q_hash->qh_lock);
|
|
|
xfs_dqfunlock(dqp);
|
|
|
xfs_dqunlock(dqp);
|
|
|
- mutex_unlock(&hash->qh_lock);
|
|
|
- xfs_qm_freelist_unlock(xfs_Gqm);
|
|
|
- if (++restarts >= XFS_QM_RECLAIM_MAX_RESTARTS)
|
|
|
- return nreclaimed;
|
|
|
- goto tryagain;
|
|
|
+ mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock);
|
|
|
+ if (restarts++ >= XFS_QM_RECLAIM_MAX_RESTARTS)
|
|
|
+ return NULL;
|
|
|
+ goto startagain;
|
|
|
}
|
|
|
|
|
|
- trace_xfs_dqshake_unlink(dqp);
|
|
|
-
|
|
|
-#ifdef QUOTADEBUG
|
|
|
- cmn_err(CE_DEBUG, "Shake 0x%p, ID 0x%x\n",
|
|
|
- dqp, be32_to_cpu(dqp->q_core.d_id));
|
|
|
-#endif
|
|
|
ASSERT(dqp->q_nrefs == 0);
|
|
|
- nextdqp = dqp->dq_flnext;
|
|
|
- XQM_MPLIST_REMOVE(&(XFS_QI_MPL_LIST(dqp->q_mount)), dqp);
|
|
|
- XQM_HASHLIST_REMOVE(hash, dqp);
|
|
|
+ list_del_init(&dqp->q_mplist);
|
|
|
+ mp->m_quotainfo->qi_dquots--;
|
|
|
+ mp->m_quotainfo->qi_dqreclaims++;
|
|
|
+ list_del_init(&dqp->q_hashlist);
|
|
|
+ dqp->q_hash->qh_version++;
|
|
|
+ list_del_init(&dqp->q_freelist);
|
|
|
+ xfs_Gqm->qm_dqfrlist_cnt--;
|
|
|
+ dqpout = dqp;
|
|
|
+ mutex_unlock(&mp->m_quotainfo->qi_dqlist_lock);
|
|
|
+ mutex_unlock(&dqp->q_hash->qh_lock);
|
|
|
+dqfunlock:
|
|
|
xfs_dqfunlock(dqp);
|
|
|
- xfs_qm_mplist_unlock(dqp->q_mount);
|
|
|
- mutex_unlock(&hash->qh_lock);
|
|
|
-
|
|
|
- off_freelist:
|
|
|
- XQM_FREELIST_REMOVE(dqp);
|
|
|
xfs_dqunlock(dqp);
|
|
|
- nreclaimed++;
|
|
|
- XQM_STATS_INC(xqmstats.xs_qm_dqshake_reclaims);
|
|
|
+ if (dqpout)
|
|
|
+ break;
|
|
|
+ if (restarts >= XFS_QM_RECLAIM_MAX_RESTARTS)
|
|
|
+ return NULL;
|
|
|
+ }
|
|
|
+ mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock);
|
|
|
+ return dqpout;
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Traverse the freelist of dquots and attempt to reclaim a maximum of
|
|
|
+ * 'howmany' dquots. This operation races with dqlookup(), and attempts to
|
|
|
+ * favor the lookup function ...
|
|
|
+ */
|
|
|
+STATIC int
|
|
|
+xfs_qm_shake_freelist(
|
|
|
+ int howmany)
|
|
|
+{
|
|
|
+ int nreclaimed = 0;
|
|
|
+ xfs_dquot_t *dqp;
|
|
|
+
|
|
|
+ if (howmany <= 0)
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ while (nreclaimed < howmany) {
|
|
|
+ dqp = xfs_qm_dqreclaim_one();
|
|
|
+ if (!dqp)
|
|
|
+ return nreclaimed;
|
|
|
xfs_qm_dqdestroy(dqp);
|
|
|
- dqp = nextdqp;
|
|
|
+ nreclaimed++;
|
|
|
}
|
|
|
- xfs_qm_freelist_unlock(xfs_Gqm);
|
|
|
return nreclaimed;
|
|
|
}
|
|
|
|
|
|
-
|
|
|
/*
|
|
|
* The kmem_shake interface is invoked when memory is running low.
|
|
|
*/
|
|
@@ -2097,7 +2126,7 @@ xfs_qm_shake(int nr_to_scan, gfp_t gfp_mask)
|
|
|
if (!xfs_Gqm)
|
|
|
return 0;
|
|
|
|
|
|
- nfree = xfs_Gqm->qm_dqfreelist.qh_nelems; /* free dquots */
|
|
|
+ nfree = xfs_Gqm->qm_dqfrlist_cnt; /* free dquots */
|
|
|
/* incore dquots in all f/s's */
|
|
|
ndqused = atomic_read(&xfs_Gqm->qm_totaldquots) - nfree;
|
|
|
|
|
@@ -2113,131 +2142,6 @@ xfs_qm_shake(int nr_to_scan, gfp_t gfp_mask)
|
|
|
}
|
|
|
|
|
|
|
|
|
-/*
|
|
|
- * Just pop the least recently used dquot off the freelist and
|
|
|
- * recycle it. The returned dquot is locked.
|
|
|
- */
|
|
|
-STATIC xfs_dquot_t *
|
|
|
-xfs_qm_dqreclaim_one(void)
|
|
|
-{
|
|
|
- xfs_dquot_t *dqpout;
|
|
|
- xfs_dquot_t *dqp;
|
|
|
- int restarts;
|
|
|
- int nflushes;
|
|
|
-
|
|
|
- restarts = 0;
|
|
|
- dqpout = NULL;
|
|
|
- nflushes = 0;
|
|
|
-
|
|
|
- /* lockorder: hashchainlock, freelistlock, mplistlock, dqlock, dqflock */
|
|
|
- startagain:
|
|
|
- xfs_qm_freelist_lock(xfs_Gqm);
|
|
|
-
|
|
|
- FOREACH_DQUOT_IN_FREELIST(dqp, &(xfs_Gqm->qm_dqfreelist)) {
|
|
|
- xfs_dqlock(dqp);
|
|
|
-
|
|
|
- /*
|
|
|
- * We are racing with dqlookup here. Naturally we don't
|
|
|
- * want to reclaim a dquot that lookup wants. We release the
|
|
|
- * freelist lock and start over, so that lookup will grab
|
|
|
- * both the dquot and the freelistlock.
|
|
|
- */
|
|
|
- if (dqp->dq_flags & XFS_DQ_WANT) {
|
|
|
- ASSERT(! (dqp->dq_flags & XFS_DQ_INACTIVE));
|
|
|
-
|
|
|
- trace_xfs_dqreclaim_want(dqp);
|
|
|
-
|
|
|
- xfs_dqunlock(dqp);
|
|
|
- xfs_qm_freelist_unlock(xfs_Gqm);
|
|
|
- if (++restarts >= XFS_QM_RECLAIM_MAX_RESTARTS)
|
|
|
- return NULL;
|
|
|
- XQM_STATS_INC(xqmstats.xs_qm_dqwants);
|
|
|
- goto startagain;
|
|
|
- }
|
|
|
-
|
|
|
- /*
|
|
|
- * If the dquot is inactive, we are assured that it is
|
|
|
- * not on the mplist or the hashlist, and that makes our
|
|
|
- * life easier.
|
|
|
- */
|
|
|
- if (dqp->dq_flags & XFS_DQ_INACTIVE) {
|
|
|
- ASSERT(dqp->q_mount == NULL);
|
|
|
- ASSERT(! XFS_DQ_IS_DIRTY(dqp));
|
|
|
- ASSERT(dqp->HL_PREVP == NULL);
|
|
|
- ASSERT(dqp->MPL_PREVP == NULL);
|
|
|
- XQM_FREELIST_REMOVE(dqp);
|
|
|
- xfs_dqunlock(dqp);
|
|
|
- dqpout = dqp;
|
|
|
- XQM_STATS_INC(xqmstats.xs_qm_dqinact_reclaims);
|
|
|
- break;
|
|
|
- }
|
|
|
-
|
|
|
- ASSERT(dqp->q_hash);
|
|
|
- ASSERT(dqp->MPL_PREVP);
|
|
|
-
|
|
|
- /*
|
|
|
- * Try to grab the flush lock. If this dquot is in the process of
|
|
|
- * getting flushed to disk, we don't want to reclaim it.
|
|
|
- */
|
|
|
- if (!xfs_dqflock_nowait(dqp)) {
|
|
|
- xfs_dqunlock(dqp);
|
|
|
- continue;
|
|
|
- }
|
|
|
-
|
|
|
- /*
|
|
|
- * We have the flush lock so we know that this is not in the
|
|
|
- * process of being flushed. So, if this is dirty, flush it
|
|
|
- * DELWRI so that we don't get a freelist infested with
|
|
|
- * dirty dquots.
|
|
|
- */
|
|
|
- if (XFS_DQ_IS_DIRTY(dqp)) {
|
|
|
- int error;
|
|
|
-
|
|
|
- trace_xfs_dqreclaim_dirty(dqp);
|
|
|
-
|
|
|
- /*
|
|
|
- * We flush it delayed write, so don't bother
|
|
|
- * releasing the freelist lock.
|
|
|
- */
|
|
|
- error = xfs_qm_dqflush(dqp, 0);
|
|
|
- if (error) {
|
|
|
- xfs_fs_cmn_err(CE_WARN, dqp->q_mount,
|
|
|
- "xfs_qm_dqreclaim: dquot %p flush failed", dqp);
|
|
|
- }
|
|
|
- xfs_dqunlock(dqp); /* dqflush unlocks dqflock */
|
|
|
- continue;
|
|
|
- }
|
|
|
-
|
|
|
- if (! xfs_qm_mplist_nowait(dqp->q_mount)) {
|
|
|
- xfs_dqfunlock(dqp);
|
|
|
- xfs_dqunlock(dqp);
|
|
|
- continue;
|
|
|
- }
|
|
|
-
|
|
|
- if (!mutex_trylock(&dqp->q_hash->qh_lock))
|
|
|
- goto mplistunlock;
|
|
|
-
|
|
|
- trace_xfs_dqreclaim_unlink(dqp);
|
|
|
-
|
|
|
- ASSERT(dqp->q_nrefs == 0);
|
|
|
- XQM_MPLIST_REMOVE(&(XFS_QI_MPL_LIST(dqp->q_mount)), dqp);
|
|
|
- XQM_HASHLIST_REMOVE(dqp->q_hash, dqp);
|
|
|
- XQM_FREELIST_REMOVE(dqp);
|
|
|
- dqpout = dqp;
|
|
|
- mutex_unlock(&dqp->q_hash->qh_lock);
|
|
|
- mplistunlock:
|
|
|
- xfs_qm_mplist_unlock(dqp->q_mount);
|
|
|
- xfs_dqfunlock(dqp);
|
|
|
- xfs_dqunlock(dqp);
|
|
|
- if (dqpout)
|
|
|
- break;
|
|
|
- }
|
|
|
-
|
|
|
- xfs_qm_freelist_unlock(xfs_Gqm);
|
|
|
- return dqpout;
|
|
|
-}
|
|
|
-
|
|
|
-
|
|
|
/*------------------------------------------------------------------*/
|
|
|
|
|
|
/*
|
|
@@ -2662,66 +2566,3 @@ xfs_qm_vop_create_dqattach(
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-/* ------------- list stuff -----------------*/
|
|
|
-STATIC void
|
|
|
-xfs_qm_freelist_init(xfs_frlist_t *ql)
|
|
|
-{
|
|
|
- ql->qh_next = ql->qh_prev = (xfs_dquot_t *) ql;
|
|
|
- mutex_init(&ql->qh_lock);
|
|
|
- ql->qh_version = 0;
|
|
|
- ql->qh_nelems = 0;
|
|
|
-}
|
|
|
-
|
|
|
-STATIC void
|
|
|
-xfs_qm_freelist_destroy(xfs_frlist_t *ql)
|
|
|
-{
|
|
|
- xfs_dquot_t *dqp, *nextdqp;
|
|
|
-
|
|
|
- mutex_lock(&ql->qh_lock);
|
|
|
- for (dqp = ql->qh_next;
|
|
|
- dqp != (xfs_dquot_t *)ql; ) {
|
|
|
- xfs_dqlock(dqp);
|
|
|
- nextdqp = dqp->dq_flnext;
|
|
|
-#ifdef QUOTADEBUG
|
|
|
- cmn_err(CE_DEBUG, "FREELIST destroy 0x%p", dqp);
|
|
|
-#endif
|
|
|
- XQM_FREELIST_REMOVE(dqp);
|
|
|
- xfs_dqunlock(dqp);
|
|
|
- xfs_qm_dqdestroy(dqp);
|
|
|
- dqp = nextdqp;
|
|
|
- }
|
|
|
- mutex_unlock(&ql->qh_lock);
|
|
|
- mutex_destroy(&ql->qh_lock);
|
|
|
-
|
|
|
- ASSERT(ql->qh_nelems == 0);
|
|
|
-}
|
|
|
-
|
|
|
-STATIC void
|
|
|
-xfs_qm_freelist_insert(xfs_frlist_t *ql, xfs_dquot_t *dq)
|
|
|
-{
|
|
|
- dq->dq_flnext = ql->qh_next;
|
|
|
- dq->dq_flprev = (xfs_dquot_t *)ql;
|
|
|
- ql->qh_next = dq;
|
|
|
- dq->dq_flnext->dq_flprev = dq;
|
|
|
- xfs_Gqm->qm_dqfreelist.qh_nelems++;
|
|
|
- xfs_Gqm->qm_dqfreelist.qh_version++;
|
|
|
-}
|
|
|
-
|
|
|
-void
|
|
|
-xfs_qm_freelist_unlink(xfs_dquot_t *dq)
|
|
|
-{
|
|
|
- xfs_dquot_t *next = dq->dq_flnext;
|
|
|
- xfs_dquot_t *prev = dq->dq_flprev;
|
|
|
-
|
|
|
- next->dq_flprev = prev;
|
|
|
- prev->dq_flnext = next;
|
|
|
- dq->dq_flnext = dq->dq_flprev = dq;
|
|
|
- xfs_Gqm->qm_dqfreelist.qh_nelems--;
|
|
|
- xfs_Gqm->qm_dqfreelist.qh_version++;
|
|
|
-}
|
|
|
-
|
|
|
-void
|
|
|
-xfs_qm_freelist_append(xfs_frlist_t *ql, xfs_dquot_t *dq)
|
|
|
-{
|
|
|
- xfs_qm_freelist_insert((xfs_frlist_t *)ql->qh_prev, dq);
|
|
|
-}
|