|
@@ -50,6 +50,8 @@
|
|
|
#include <linux/freezer.h>
|
|
|
#include <linux/quota.h>
|
|
|
#include <linux/dqblk_xfs.h>
|
|
|
+#include <linux/lockref.h>
|
|
|
+#include <linux/list_lru.h>
|
|
|
|
|
|
#include "gfs2.h"
|
|
|
#include "incore.h"
|
|
@@ -71,29 +73,25 @@ struct gfs2_quota_change_host {
|
|
|
struct kqid qc_id;
|
|
|
};
|
|
|
|
|
|
-static LIST_HEAD(qd_lru_list);
|
|
|
-static atomic_t qd_lru_count = ATOMIC_INIT(0);
|
|
|
-static DEFINE_SPINLOCK(qd_lru_lock);
|
|
|
+/* Lock order: qd_lock -> qd->lockref.lock -> lru lock */
|
|
|
+static DEFINE_SPINLOCK(qd_lock);
|
|
|
+struct list_lru gfs2_qd_lru;
|
|
|
|
|
|
-unsigned long gfs2_qd_shrink_scan(struct shrinker *shrink,
|
|
|
- struct shrink_control *sc)
|
|
|
+static void gfs2_qd_dispose(struct list_head *list)
|
|
|
{
|
|
|
struct gfs2_quota_data *qd;
|
|
|
struct gfs2_sbd *sdp;
|
|
|
- int nr_to_scan = sc->nr_to_scan;
|
|
|
- long freed = 0;
|
|
|
|
|
|
- if (!(sc->gfp_mask & __GFP_FS))
|
|
|
- return SHRINK_STOP;
|
|
|
-
|
|
|
- spin_lock(&qd_lru_lock);
|
|
|
- while (nr_to_scan && !list_empty(&qd_lru_list)) {
|
|
|
- qd = list_entry(qd_lru_list.next,
|
|
|
- struct gfs2_quota_data, qd_reclaim);
|
|
|
+ while (!list_empty(list)) {
|
|
|
+ qd = list_entry(list->next, struct gfs2_quota_data, qd_lru);
|
|
|
sdp = qd->qd_gl->gl_sbd;
|
|
|
|
|
|
+ list_del(&qd->qd_lru);
|
|
|
+
|
|
|
/* Free from the filesystem-specific list */
|
|
|
+ spin_lock(&qd_lock);
|
|
|
list_del(&qd->qd_list);
|
|
|
+ spin_unlock(&qd_lock);
|
|
|
|
|
|
gfs2_assert_warn(sdp, !qd->qd_change);
|
|
|
gfs2_assert_warn(sdp, !qd->qd_slot_count);
|
|
@@ -103,24 +101,59 @@ unsigned long gfs2_qd_shrink_scan(struct shrinker *shrink,
|
|
|
atomic_dec(&sdp->sd_quota_count);
|
|
|
|
|
|
/* Delete it from the common reclaim list */
|
|
|
- list_del_init(&qd->qd_reclaim);
|
|
|
- atomic_dec(&qd_lru_count);
|
|
|
- spin_unlock(&qd_lru_lock);
|
|
|
kmem_cache_free(gfs2_quotad_cachep, qd);
|
|
|
- spin_lock(&qd_lru_lock);
|
|
|
- nr_to_scan--;
|
|
|
- freed++;
|
|
|
}
|
|
|
- spin_unlock(&qd_lru_lock);
|
|
|
+}
|
|
|
+
|
|
|
+
|
|
|
+static enum lru_status gfs2_qd_isolate(struct list_head *item, spinlock_t *lock, void *arg)
|
|
|
+{
|
|
|
+ struct list_head *dispose = arg;
|
|
|
+ struct gfs2_quota_data *qd = list_entry(item, struct gfs2_quota_data, qd_lru);
|
|
|
+
|
|
|
+ if (!spin_trylock(&qd->qd_lockref.lock))
|
|
|
+ return LRU_SKIP;
|
|
|
+
|
|
|
+ if (qd->qd_lockref.count == 0) {
|
|
|
+ lockref_mark_dead(&qd->qd_lockref);
|
|
|
+ list_move(&qd->qd_lru, dispose);
|
|
|
+ }
|
|
|
+
|
|
|
+ spin_unlock(&qd->qd_lockref.lock);
|
|
|
+ return LRU_REMOVED;
|
|
|
+}
|
|
|
+
|
|
|
+static unsigned long gfs2_qd_shrink_scan(struct shrinker *shrink,
|
|
|
+ struct shrink_control *sc)
|
|
|
+{
|
|
|
+ LIST_HEAD(dispose);
|
|
|
+ unsigned long freed;
|
|
|
+
|
|
|
+ if (!(sc->gfp_mask & __GFP_FS))
|
|
|
+ return SHRINK_STOP;
|
|
|
+
|
|
|
+ freed = list_lru_walk_node(&gfs2_qd_lru, sc->nid, gfs2_qd_isolate,
|
|
|
+ &dispose, &sc->nr_to_scan);
|
|
|
+
|
|
|
+ gfs2_qd_dispose(&dispose);
|
|
|
+
|
|
|
return freed;
|
|
|
}
|
|
|
|
|
|
-unsigned long gfs2_qd_shrink_count(struct shrinker *shrink,
|
|
|
- struct shrink_control *sc)
|
|
|
+static unsigned long gfs2_qd_shrink_count(struct shrinker *shrink,
|
|
|
+ struct shrink_control *sc)
|
|
|
{
|
|
|
- return vfs_pressure_ratio(atomic_read(&qd_lru_count));
|
|
|
+ return vfs_pressure_ratio(list_lru_count_node(&gfs2_qd_lru, sc->nid));
|
|
|
}
|
|
|
|
|
|
+struct shrinker gfs2_qd_shrinker = {
|
|
|
+ .count_objects = gfs2_qd_shrink_count,
|
|
|
+ .scan_objects = gfs2_qd_shrink_scan,
|
|
|
+ .seeks = DEFAULT_SEEKS,
|
|
|
+ .flags = SHRINKER_NUMA_AWARE,
|
|
|
+};
|
|
|
+
|
|
|
+
|
|
|
static u64 qd2index(struct gfs2_quota_data *qd)
|
|
|
{
|
|
|
struct kqid qid = qd->qd_id;
|
|
@@ -148,10 +181,11 @@ static int qd_alloc(struct gfs2_sbd *sdp, struct kqid qid,
|
|
|
if (!qd)
|
|
|
return -ENOMEM;
|
|
|
|
|
|
- atomic_set(&qd->qd_count, 1);
|
|
|
+ qd->qd_lockref.count = 1;
|
|
|
+ spin_lock_init(&qd->qd_lockref.lock);
|
|
|
qd->qd_id = qid;
|
|
|
qd->qd_slot = -1;
|
|
|
- INIT_LIST_HEAD(&qd->qd_reclaim);
|
|
|
+ INIT_LIST_HEAD(&qd->qd_lru);
|
|
|
|
|
|
error = gfs2_glock_get(sdp, qd2index(qd),
|
|
|
&gfs2_quota_glops, CREATE, &qd->qd_gl);
|
|
@@ -177,16 +211,11 @@ static int qd_get(struct gfs2_sbd *sdp, struct kqid qid,
|
|
|
|
|
|
for (;;) {
|
|
|
found = 0;
|
|
|
- spin_lock(&qd_lru_lock);
|
|
|
+ spin_lock(&qd_lock);
|
|
|
list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
|
|
|
- if (qid_eq(qd->qd_id, qid)) {
|
|
|
- if (!atomic_read(&qd->qd_count) &&
|
|
|
- !list_empty(&qd->qd_reclaim)) {
|
|
|
- /* Remove it from reclaim list */
|
|
|
- list_del_init(&qd->qd_reclaim);
|
|
|
- atomic_dec(&qd_lru_count);
|
|
|
- }
|
|
|
- atomic_inc(&qd->qd_count);
|
|
|
+ if (qid_eq(qd->qd_id, qid) &&
|
|
|
+ lockref_get_not_dead(&qd->qd_lockref)) {
|
|
|
+ list_lru_del(&gfs2_qd_lru, &qd->qd_lru);
|
|
|
found = 1;
|
|
|
break;
|
|
|
}
|
|
@@ -202,7 +231,7 @@ static int qd_get(struct gfs2_sbd *sdp, struct kqid qid,
|
|
|
new_qd = NULL;
|
|
|
}
|
|
|
|
|
|
- spin_unlock(&qd_lru_lock);
|
|
|
+ spin_unlock(&qd_lock);
|
|
|
|
|
|
if (qd) {
|
|
|
if (new_qd) {
|
|
@@ -222,18 +251,19 @@ static int qd_get(struct gfs2_sbd *sdp, struct kqid qid,
|
|
|
static void qd_hold(struct gfs2_quota_data *qd)
|
|
|
{
|
|
|
struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
|
|
|
- gfs2_assert(sdp, atomic_read(&qd->qd_count));
|
|
|
- atomic_inc(&qd->qd_count);
|
|
|
+ gfs2_assert(sdp, !__lockref_is_dead(&qd->qd_lockref));
|
|
|
+ lockref_get(&qd->qd_lockref);
|
|
|
}
|
|
|
|
|
|
static void qd_put(struct gfs2_quota_data *qd)
|
|
|
{
|
|
|
- if (atomic_dec_and_lock(&qd->qd_count, &qd_lru_lock)) {
|
|
|
- /* Add to the reclaim list */
|
|
|
- list_add_tail(&qd->qd_reclaim, &qd_lru_list);
|
|
|
- atomic_inc(&qd_lru_count);
|
|
|
- spin_unlock(&qd_lru_lock);
|
|
|
- }
|
|
|
+ if (lockref_put_or_lock(&qd->qd_lockref))
|
|
|
+ return;
|
|
|
+
|
|
|
+ qd->qd_lockref.count = 0;
|
|
|
+ list_lru_add(&gfs2_qd_lru, &qd->qd_lru);
|
|
|
+ spin_unlock(&qd->qd_lockref.lock);
|
|
|
+
|
|
|
}
|
|
|
|
|
|
static int slot_get(struct gfs2_quota_data *qd)
|
|
@@ -242,10 +272,10 @@ static int slot_get(struct gfs2_quota_data *qd)
|
|
|
unsigned int c, o = 0, b;
|
|
|
unsigned char byte = 0;
|
|
|
|
|
|
- spin_lock(&qd_lru_lock);
|
|
|
+ spin_lock(&qd_lock);
|
|
|
|
|
|
if (qd->qd_slot_count++) {
|
|
|
- spin_unlock(&qd_lru_lock);
|
|
|
+ spin_unlock(&qd_lock);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -269,13 +299,13 @@ found:
|
|
|
|
|
|
sdp->sd_quota_bitmap[c][o] |= 1 << b;
|
|
|
|
|
|
- spin_unlock(&qd_lru_lock);
|
|
|
+ spin_unlock(&qd_lock);
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
fail:
|
|
|
qd->qd_slot_count--;
|
|
|
- spin_unlock(&qd_lru_lock);
|
|
|
+ spin_unlock(&qd_lock);
|
|
|
return -ENOSPC;
|
|
|
}
|
|
|
|
|
@@ -283,23 +313,43 @@ static void slot_hold(struct gfs2_quota_data *qd)
|
|
|
{
|
|
|
struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
|
|
|
|
|
|
- spin_lock(&qd_lru_lock);
|
|
|
+ spin_lock(&qd_lock);
|
|
|
gfs2_assert(sdp, qd->qd_slot_count);
|
|
|
qd->qd_slot_count++;
|
|
|
- spin_unlock(&qd_lru_lock);
|
|
|
+ spin_unlock(&qd_lock);
|
|
|
+}
|
|
|
+
|
|
|
+static void gfs2_icbit_munge(struct gfs2_sbd *sdp, unsigned char **bitmap,
|
|
|
+ unsigned int bit, int new_value)
|
|
|
+{
|
|
|
+ unsigned int c, o, b = bit;
|
|
|
+ int old_value;
|
|
|
+
|
|
|
+ c = b / (8 * PAGE_SIZE);
|
|
|
+ b %= 8 * PAGE_SIZE;
|
|
|
+ o = b / 8;
|
|
|
+ b %= 8;
|
|
|
+
|
|
|
+ old_value = (bitmap[c][o] & (1 << b));
|
|
|
+ gfs2_assert_withdraw(sdp, !old_value != !new_value);
|
|
|
+
|
|
|
+ if (new_value)
|
|
|
+ bitmap[c][o] |= 1 << b;
|
|
|
+ else
|
|
|
+ bitmap[c][o] &= ~(1 << b);
|
|
|
}
|
|
|
|
|
|
static void slot_put(struct gfs2_quota_data *qd)
|
|
|
{
|
|
|
struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
|
|
|
|
|
|
- spin_lock(&qd_lru_lock);
|
|
|
+ spin_lock(&qd_lock);
|
|
|
gfs2_assert(sdp, qd->qd_slot_count);
|
|
|
if (!--qd->qd_slot_count) {
|
|
|
gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, qd->qd_slot, 0);
|
|
|
qd->qd_slot = -1;
|
|
|
}
|
|
|
- spin_unlock(&qd_lru_lock);
|
|
|
+ spin_unlock(&qd_lock);
|
|
|
}
|
|
|
|
|
|
static int bh_get(struct gfs2_quota_data *qd)
|
|
@@ -363,6 +413,25 @@ static void bh_put(struct gfs2_quota_data *qd)
|
|
|
mutex_unlock(&sdp->sd_quota_mutex);
|
|
|
}
|
|
|
|
|
|
+static int qd_check_sync(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd,
|
|
|
+ u64 *sync_gen)
|
|
|
+{
|
|
|
+ if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
|
|
|
+ !test_bit(QDF_CHANGE, &qd->qd_flags) ||
|
|
|
+ (sync_gen && (qd->qd_sync_gen >= *sync_gen)))
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ if (!lockref_get_not_dead(&qd->qd_lockref))
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
|
|
|
+ set_bit(QDF_LOCKED, &qd->qd_flags);
|
|
|
+ qd->qd_change_sync = qd->qd_change;
|
|
|
+ gfs2_assert_warn(sdp, qd->qd_slot_count);
|
|
|
+ qd->qd_slot_count++;
|
|
|
+ return 1;
|
|
|
+}
|
|
|
+
|
|
|
static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
|
|
|
{
|
|
|
struct gfs2_quota_data *qd = NULL;
|
|
@@ -374,31 +443,18 @@ static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
|
|
|
if (sdp->sd_vfs->s_flags & MS_RDONLY)
|
|
|
return 0;
|
|
|
|
|
|
- spin_lock(&qd_lru_lock);
|
|
|
+ spin_lock(&qd_lock);
|
|
|
|
|
|
list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
|
|
|
- if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
|
|
|
- !test_bit(QDF_CHANGE, &qd->qd_flags) ||
|
|
|
- qd->qd_sync_gen >= sdp->sd_quota_sync_gen)
|
|
|
- continue;
|
|
|
-
|
|
|
- list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
|
|
|
-
|
|
|
- set_bit(QDF_LOCKED, &qd->qd_flags);
|
|
|
- gfs2_assert_warn(sdp, atomic_read(&qd->qd_count));
|
|
|
- atomic_inc(&qd->qd_count);
|
|
|
- qd->qd_change_sync = qd->qd_change;
|
|
|
- gfs2_assert_warn(sdp, qd->qd_slot_count);
|
|
|
- qd->qd_slot_count++;
|
|
|
- found = 1;
|
|
|
-
|
|
|
- break;
|
|
|
+ found = qd_check_sync(sdp, qd, &sdp->sd_quota_sync_gen);
|
|
|
+ if (found)
|
|
|
+ break;
|
|
|
}
|
|
|
|
|
|
if (!found)
|
|
|
qd = NULL;
|
|
|
|
|
|
- spin_unlock(&qd_lru_lock);
|
|
|
+ spin_unlock(&qd_lock);
|
|
|
|
|
|
if (qd) {
|
|
|
gfs2_assert_warn(sdp, qd->qd_change_sync);
|
|
@@ -416,43 +472,6 @@ static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static int qd_trylock(struct gfs2_quota_data *qd)
|
|
|
-{
|
|
|
- struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
|
|
|
-
|
|
|
- if (sdp->sd_vfs->s_flags & MS_RDONLY)
|
|
|
- return 0;
|
|
|
-
|
|
|
- spin_lock(&qd_lru_lock);
|
|
|
-
|
|
|
- if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
|
|
|
- !test_bit(QDF_CHANGE, &qd->qd_flags)) {
|
|
|
- spin_unlock(&qd_lru_lock);
|
|
|
- return 0;
|
|
|
- }
|
|
|
-
|
|
|
- list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
|
|
|
-
|
|
|
- set_bit(QDF_LOCKED, &qd->qd_flags);
|
|
|
- gfs2_assert_warn(sdp, atomic_read(&qd->qd_count));
|
|
|
- atomic_inc(&qd->qd_count);
|
|
|
- qd->qd_change_sync = qd->qd_change;
|
|
|
- gfs2_assert_warn(sdp, qd->qd_slot_count);
|
|
|
- qd->qd_slot_count++;
|
|
|
-
|
|
|
- spin_unlock(&qd_lru_lock);
|
|
|
-
|
|
|
- gfs2_assert_warn(sdp, qd->qd_change_sync);
|
|
|
- if (bh_get(qd)) {
|
|
|
- clear_bit(QDF_LOCKED, &qd->qd_flags);
|
|
|
- slot_put(qd);
|
|
|
- qd_put(qd);
|
|
|
- return 0;
|
|
|
- }
|
|
|
-
|
|
|
- return 1;
|
|
|
-}
|
|
|
-
|
|
|
static void qd_unlock(struct gfs2_quota_data *qd)
|
|
|
{
|
|
|
gfs2_assert_warn(qd->qd_gl->gl_sbd,
|
|
@@ -602,9 +621,9 @@ static void do_qc(struct gfs2_quota_data *qd, s64 change)
|
|
|
x = be64_to_cpu(qc->qc_change) + change;
|
|
|
qc->qc_change = cpu_to_be64(x);
|
|
|
|
|
|
- spin_lock(&qd_lru_lock);
|
|
|
+ spin_lock(&qd_lock);
|
|
|
qd->qd_change = x;
|
|
|
- spin_unlock(&qd_lru_lock);
|
|
|
+ spin_unlock(&qd_lock);
|
|
|
|
|
|
if (!x) {
|
|
|
gfs2_assert_warn(sdp, test_bit(QDF_CHANGE, &qd->qd_flags));
|
|
@@ -763,6 +782,7 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
|
|
|
{
|
|
|
struct gfs2_sbd *sdp = (*qda)->qd_gl->gl_sbd;
|
|
|
struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
|
|
|
+ struct gfs2_alloc_parms ap = { .aflags = 0, };
|
|
|
unsigned int data_blocks, ind_blocks;
|
|
|
struct gfs2_holder *ghs, i_gh;
|
|
|
unsigned int qx, x;
|
|
@@ -815,7 +835,8 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
|
|
|
blocks = num_qd * data_blocks + RES_DINODE + num_qd + 3;
|
|
|
|
|
|
reserved = 1 + (nalloc * (data_blocks + ind_blocks));
|
|
|
- error = gfs2_inplace_reserve(ip, reserved, 0);
|
|
|
+ ap.target = reserved;
|
|
|
+ error = gfs2_inplace_reserve(ip, &ap);
|
|
|
if (error)
|
|
|
goto out_alloc;
|
|
|
|
|
@@ -974,9 +995,9 @@ static int need_sync(struct gfs2_quota_data *qd)
|
|
|
if (!qd->qd_qb.qb_limit)
|
|
|
return 0;
|
|
|
|
|
|
- spin_lock(&qd_lru_lock);
|
|
|
+ spin_lock(&qd_lock);
|
|
|
value = qd->qd_change;
|
|
|
- spin_unlock(&qd_lru_lock);
|
|
|
+ spin_unlock(&qd_lock);
|
|
|
|
|
|
spin_lock(>->gt_spin);
|
|
|
num = gt->gt_quota_scale_num;
|
|
@@ -1001,9 +1022,11 @@ static int need_sync(struct gfs2_quota_data *qd)
|
|
|
|
|
|
void gfs2_quota_unlock(struct gfs2_inode *ip)
|
|
|
{
|
|
|
+ struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
|
|
|
struct gfs2_quota_data *qda[4];
|
|
|
unsigned int count = 0;
|
|
|
unsigned int x;
|
|
|
+ int found;
|
|
|
|
|
|
if (!test_and_clear_bit(GIF_QD_LOCKED, &ip->i_flags))
|
|
|
goto out;
|
|
@@ -1016,9 +1039,25 @@ void gfs2_quota_unlock(struct gfs2_inode *ip)
|
|
|
sync = need_sync(qd);
|
|
|
|
|
|
gfs2_glock_dq_uninit(&ip->i_res->rs_qa_qd_ghs[x]);
|
|
|
+ if (!sync)
|
|
|
+ continue;
|
|
|
+
|
|
|
+ spin_lock(&qd_lock);
|
|
|
+ found = qd_check_sync(sdp, qd, NULL);
|
|
|
+ spin_unlock(&qd_lock);
|
|
|
+
|
|
|
+ if (!found)
|
|
|
+ continue;
|
|
|
|
|
|
- if (sync && qd_trylock(qd))
|
|
|
- qda[count++] = qd;
|
|
|
+ gfs2_assert_warn(sdp, qd->qd_change_sync);
|
|
|
+ if (bh_get(qd)) {
|
|
|
+ clear_bit(QDF_LOCKED, &qd->qd_flags);
|
|
|
+ slot_put(qd);
|
|
|
+ qd_put(qd);
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+
|
|
|
+ qda[count++] = qd;
|
|
|
}
|
|
|
|
|
|
if (count) {
|
|
@@ -1067,9 +1106,9 @@ int gfs2_quota_check(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
|
|
|
continue;
|
|
|
|
|
|
value = (s64)be64_to_cpu(qd->qd_qb.qb_value);
|
|
|
- spin_lock(&qd_lru_lock);
|
|
|
+ spin_lock(&qd_lock);
|
|
|
value += qd->qd_change;
|
|
|
- spin_unlock(&qd_lru_lock);
|
|
|
+ spin_unlock(&qd_lock);
|
|
|
|
|
|
if (be64_to_cpu(qd->qd_qb.qb_limit) && (s64)be64_to_cpu(qd->qd_qb.qb_limit) < value) {
|
|
|
print_message(qd, "exceeded");
|
|
@@ -1118,17 +1157,18 @@ int gfs2_quota_sync(struct super_block *sb, int type)
|
|
|
{
|
|
|
struct gfs2_sbd *sdp = sb->s_fs_info;
|
|
|
struct gfs2_quota_data **qda;
|
|
|
- unsigned int max_qd = gfs2_tune_get(sdp, gt_quota_simul_sync);
|
|
|
+ unsigned int max_qd = PAGE_SIZE/sizeof(struct gfs2_holder);
|
|
|
unsigned int num_qd;
|
|
|
unsigned int x;
|
|
|
int error = 0;
|
|
|
|
|
|
- sdp->sd_quota_sync_gen++;
|
|
|
-
|
|
|
qda = kcalloc(max_qd, sizeof(struct gfs2_quota_data *), GFP_KERNEL);
|
|
|
if (!qda)
|
|
|
return -ENOMEM;
|
|
|
|
|
|
+ mutex_lock(&sdp->sd_quota_sync_mutex);
|
|
|
+ sdp->sd_quota_sync_gen++;
|
|
|
+
|
|
|
do {
|
|
|
num_qd = 0;
|
|
|
|
|
@@ -1153,6 +1193,7 @@ int gfs2_quota_sync(struct super_block *sb, int type)
|
|
|
}
|
|
|
} while (!error && num_qd == max_qd);
|
|
|
|
|
|
+ mutex_unlock(&sdp->sd_quota_sync_mutex);
|
|
|
kfree(qda);
|
|
|
|
|
|
return error;
|
|
@@ -1258,11 +1299,11 @@ int gfs2_quota_init(struct gfs2_sbd *sdp)
|
|
|
qd->qd_slot = slot;
|
|
|
qd->qd_slot_count = 1;
|
|
|
|
|
|
- spin_lock(&qd_lru_lock);
|
|
|
+ spin_lock(&qd_lock);
|
|
|
gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, slot, 1);
|
|
|
list_add(&qd->qd_list, &sdp->sd_quota_list);
|
|
|
atomic_inc(&sdp->sd_quota_count);
|
|
|
- spin_unlock(&qd_lru_lock);
|
|
|
+ spin_unlock(&qd_lock);
|
|
|
|
|
|
found++;
|
|
|
}
|
|
@@ -1288,30 +1329,34 @@ void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
|
|
|
struct gfs2_quota_data *qd;
|
|
|
unsigned int x;
|
|
|
|
|
|
- spin_lock(&qd_lru_lock);
|
|
|
+ spin_lock(&qd_lock);
|
|
|
while (!list_empty(head)) {
|
|
|
qd = list_entry(head->prev, struct gfs2_quota_data, qd_list);
|
|
|
|
|
|
- if (atomic_read(&qd->qd_count) > 1 ||
|
|
|
- (atomic_read(&qd->qd_count) &&
|
|
|
- !test_bit(QDF_CHANGE, &qd->qd_flags))) {
|
|
|
+ /*
|
|
|
+ * To be removed in due course... we should be able to
|
|
|
+ * ensure that all refs to the qd have done by this point
|
|
|
+ * so that this rather odd test is not required
|
|
|
+ */
|
|
|
+ spin_lock(&qd->qd_lockref.lock);
|
|
|
+ if (qd->qd_lockref.count > 1 ||
|
|
|
+ (qd->qd_lockref.count && !test_bit(QDF_CHANGE, &qd->qd_flags))) {
|
|
|
+ spin_unlock(&qd->qd_lockref.lock);
|
|
|
list_move(&qd->qd_list, head);
|
|
|
- spin_unlock(&qd_lru_lock);
|
|
|
+ spin_unlock(&qd_lock);
|
|
|
schedule();
|
|
|
- spin_lock(&qd_lru_lock);
|
|
|
+ spin_lock(&qd_lock);
|
|
|
continue;
|
|
|
}
|
|
|
+ spin_unlock(&qd->qd_lockref.lock);
|
|
|
|
|
|
list_del(&qd->qd_list);
|
|
|
/* Also remove if this qd exists in the reclaim list */
|
|
|
- if (!list_empty(&qd->qd_reclaim)) {
|
|
|
- list_del_init(&qd->qd_reclaim);
|
|
|
- atomic_dec(&qd_lru_count);
|
|
|
- }
|
|
|
+ list_lru_del(&gfs2_qd_lru, &qd->qd_lru);
|
|
|
atomic_dec(&sdp->sd_quota_count);
|
|
|
- spin_unlock(&qd_lru_lock);
|
|
|
+ spin_unlock(&qd_lock);
|
|
|
|
|
|
- if (!atomic_read(&qd->qd_count)) {
|
|
|
+ if (!qd->qd_lockref.count) {
|
|
|
gfs2_assert_warn(sdp, !qd->qd_change);
|
|
|
gfs2_assert_warn(sdp, !qd->qd_slot_count);
|
|
|
} else
|
|
@@ -1321,9 +1366,9 @@ void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
|
|
|
gfs2_glock_put(qd->qd_gl);
|
|
|
kmem_cache_free(gfs2_quotad_cachep, qd);
|
|
|
|
|
|
- spin_lock(&qd_lru_lock);
|
|
|
+ spin_lock(&qd_lock);
|
|
|
}
|
|
|
- spin_unlock(&qd_lru_lock);
|
|
|
+ spin_unlock(&qd_lock);
|
|
|
|
|
|
gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count));
|
|
|
|
|
@@ -1462,7 +1507,7 @@ static int gfs2_quota_get_xstate(struct super_block *sb,
|
|
|
}
|
|
|
fqs->qs_uquota.qfs_nextents = 1; /* unsupported */
|
|
|
fqs->qs_gquota = fqs->qs_uquota; /* its the same inode in both cases */
|
|
|
- fqs->qs_incoredqs = atomic_read(&qd_lru_count);
|
|
|
+ fqs->qs_incoredqs = list_lru_count(&gfs2_qd_lru);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -1573,10 +1618,12 @@ static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid,
|
|
|
if (gfs2_is_stuffed(ip))
|
|
|
alloc_required = 1;
|
|
|
if (alloc_required) {
|
|
|
+ struct gfs2_alloc_parms ap = { .aflags = 0, };
|
|
|
gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
|
|
|
&data_blocks, &ind_blocks);
|
|
|
blocks = 1 + data_blocks + ind_blocks;
|
|
|
- error = gfs2_inplace_reserve(ip, blocks, 0);
|
|
|
+ ap.target = blocks;
|
|
|
+ error = gfs2_inplace_reserve(ip, &ap);
|
|
|
if (error)
|
|
|
goto out_i;
|
|
|
blocks += gfs2_rg_blocks(ip, blocks);
|