|
@@ -652,18 +652,6 @@ void sync_inodes_sb(struct super_block *sb, int wait)
|
|
sync_sb_inodes(sb, &wbc);
|
|
sync_sb_inodes(sb, &wbc);
|
|
}
|
|
}
|
|
|
|
|
|
-/*
|
|
|
|
- * Rather lame livelock avoidance.
|
|
|
|
- */
|
|
|
|
-static void set_sb_syncing(int val)
|
|
|
|
-{
|
|
|
|
- struct super_block *sb;
|
|
|
|
- spin_lock(&sb_lock);
|
|
|
|
- list_for_each_entry_reverse(sb, &super_blocks, s_list)
|
|
|
|
- sb->s_syncing = val;
|
|
|
|
- spin_unlock(&sb_lock);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
/**
|
|
/**
|
|
* sync_inodes - writes all inodes to disk
|
|
* sync_inodes - writes all inodes to disk
|
|
* @wait: wait for completion
|
|
* @wait: wait for completion
|
|
@@ -690,9 +678,6 @@ static void __sync_inodes(int wait)
|
|
spin_lock(&sb_lock);
|
|
spin_lock(&sb_lock);
|
|
restart:
|
|
restart:
|
|
list_for_each_entry(sb, &super_blocks, s_list) {
|
|
list_for_each_entry(sb, &super_blocks, s_list) {
|
|
- if (sb->s_syncing)
|
|
|
|
- continue;
|
|
|
|
- sb->s_syncing = 1;
|
|
|
|
sb->s_count++;
|
|
sb->s_count++;
|
|
spin_unlock(&sb_lock);
|
|
spin_unlock(&sb_lock);
|
|
down_read(&sb->s_umount);
|
|
down_read(&sb->s_umount);
|
|
@@ -710,13 +695,10 @@ restart:
|
|
|
|
|
|
void sync_inodes(int wait)
|
|
void sync_inodes(int wait)
|
|
{
|
|
{
|
|
- set_sb_syncing(0);
|
|
|
|
__sync_inodes(0);
|
|
__sync_inodes(0);
|
|
|
|
|
|
- if (wait) {
|
|
|
|
- set_sb_syncing(0);
|
|
|
|
|
|
+ if (wait)
|
|
__sync_inodes(1);
|
|
__sync_inodes(1);
|
|
- }
|
|
|
|
}
|
|
}
|
|
|
|
|
|
/**
|
|
/**
|