|
@@ -952,6 +952,7 @@ struct ext4_sb_info {
|
|
atomic_t s_mb_lost_chunks;
|
|
atomic_t s_mb_lost_chunks;
|
|
atomic_t s_mb_preallocated;
|
|
atomic_t s_mb_preallocated;
|
|
atomic_t s_mb_discarded;
|
|
atomic_t s_mb_discarded;
|
|
|
|
+ atomic_t s_lock_busy;
|
|
|
|
|
|
/* locality groups */
|
|
/* locality groups */
|
|
struct ext4_locality_group *s_locality_groups;
|
|
struct ext4_locality_group *s_locality_groups;
|
|
@@ -1593,15 +1594,42 @@ struct ext4_group_info {
|
|
#define EXT4_MB_GRP_NEED_INIT(grp) \
|
|
#define EXT4_MB_GRP_NEED_INIT(grp) \
|
|
(test_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &((grp)->bb_state)))
|
|
(test_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &((grp)->bb_state)))
|
|
|
|
|
|
|
|
+#define EXT4_MAX_CONTENTION 8
|
|
|
|
+#define EXT4_CONTENTION_THRESHOLD 2
|
|
|
|
+
|
|
static inline spinlock_t *ext4_group_lock_ptr(struct super_block *sb,
|
|
static inline spinlock_t *ext4_group_lock_ptr(struct super_block *sb,
|
|
ext4_group_t group)
|
|
ext4_group_t group)
|
|
{
|
|
{
|
|
return bgl_lock_ptr(EXT4_SB(sb)->s_blockgroup_lock, group);
|
|
return bgl_lock_ptr(EXT4_SB(sb)->s_blockgroup_lock, group);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+/*
|
|
|
|
+ * Returns true if the filesystem is busy enough that attempts to
|
|
|
|
+ * access the block group locks has run into contention.
|
|
|
|
+ */
|
|
|
|
+static inline int ext4_fs_is_busy(struct ext4_sb_info *sbi)
|
|
|
|
+{
|
|
|
|
+ return (atomic_read(&sbi->s_lock_busy) > EXT4_CONTENTION_THRESHOLD);
|
|
|
|
+}
|
|
|
|
+
|
|
static inline void ext4_lock_group(struct super_block *sb, ext4_group_t group)
|
|
static inline void ext4_lock_group(struct super_block *sb, ext4_group_t group)
|
|
{
|
|
{
|
|
- spin_lock(ext4_group_lock_ptr(sb, group));
|
|
|
|
|
|
+ spinlock_t *lock = ext4_group_lock_ptr(sb, group);
|
|
|
|
+ if (spin_trylock(lock))
|
|
|
|
+ /*
|
|
|
|
+ * We're able to grab the lock right away, so drop the
|
|
|
|
+ * lock contention counter.
|
|
|
|
+ */
|
|
|
|
+ atomic_add_unless(&EXT4_SB(sb)->s_lock_busy, -1, 0);
|
|
|
|
+ else {
|
|
|
|
+ /*
|
|
|
|
+ * The lock is busy, so bump the contention counter,
|
|
|
|
+ * and then wait on the spin lock.
|
|
|
|
+ */
|
|
|
|
+ atomic_add_unless(&EXT4_SB(sb)->s_lock_busy, 1,
|
|
|
|
+ EXT4_MAX_CONTENTION);
|
|
|
|
+ spin_lock(lock);
|
|
|
|
+ }
|
|
}
|
|
}
|
|
|
|
|
|
static inline void ext4_unlock_group(struct super_block *sb,
|
|
static inline void ext4_unlock_group(struct super_block *sb,
|