|
@@ -25,7 +25,7 @@
|
|
* case write_inode() functions do sync_dirty_buffer() and thus effectively
|
|
* case write_inode() functions do sync_dirty_buffer() and thus effectively
|
|
* write one block at a time.
|
|
* write one block at a time.
|
|
*/
|
|
*/
|
|
-static int __fsync_super(struct super_block *sb, int wait)
|
|
|
|
|
|
+static int __sync_filesystem(struct super_block *sb, int wait)
|
|
{
|
|
{
|
|
vfs_dq_sync(sb);
|
|
vfs_dq_sync(sb);
|
|
sync_inodes_sb(sb, wait);
|
|
sync_inodes_sb(sb, wait);
|
|
@@ -43,16 +43,16 @@ static int __fsync_super(struct super_block *sb, int wait)
|
|
* superblock. Filesystem data as well as the underlying block
|
|
* superblock. Filesystem data as well as the underlying block
|
|
* device. Takes the superblock lock.
|
|
* device. Takes the superblock lock.
|
|
*/
|
|
*/
|
|
-int fsync_super(struct super_block *sb)
|
|
|
|
|
|
+int sync_filesystem(struct super_block *sb)
|
|
{
|
|
{
|
|
int ret;
|
|
int ret;
|
|
|
|
|
|
- ret = __fsync_super(sb, 0);
|
|
|
|
|
|
+ ret = __sync_filesystem(sb, 0);
|
|
if (ret < 0)
|
|
if (ret < 0)
|
|
return ret;
|
|
return ret;
|
|
- return __fsync_super(sb, 1);
|
|
|
|
|
|
+ return __sync_filesystem(sb, 1);
|
|
}
|
|
}
|
|
-EXPORT_SYMBOL_GPL(fsync_super);
|
|
|
|
|
|
+EXPORT_SYMBOL_GPL(sync_filesystem);
|
|
|
|
|
|
/*
|
|
/*
|
|
* Sync all the data for all the filesystems (called by sys_sync() and
|
|
* Sync all the data for all the filesystems (called by sys_sync() and
|
|
@@ -92,7 +92,7 @@ restart:
|
|
spin_unlock(&sb_lock);
|
|
spin_unlock(&sb_lock);
|
|
down_read(&sb->s_umount);
|
|
down_read(&sb->s_umount);
|
|
if (sb->s_root)
|
|
if (sb->s_root)
|
|
- __fsync_super(sb, wait);
|
|
|
|
|
|
+ __sync_filesystem(sb, wait);
|
|
up_read(&sb->s_umount);
|
|
up_read(&sb->s_umount);
|
|
/* restart only when sb is no longer on the list */
|
|
/* restart only when sb is no longer on the list */
|
|
spin_lock(&sb_lock);
|
|
spin_lock(&sb_lock);
|