|
@@ -159,31 +159,6 @@ int sync_blockdev(struct block_device *bdev)
|
|
|
}
|
|
|
EXPORT_SYMBOL(sync_blockdev);
|
|
|
|
|
|
-static void __fsync_super(struct super_block *sb)
|
|
|
-{
|
|
|
- sync_inodes_sb(sb, 0);
|
|
|
- DQUOT_SYNC(sb);
|
|
|
- lock_super(sb);
|
|
|
- if (sb->s_dirt && sb->s_op->write_super)
|
|
|
- sb->s_op->write_super(sb);
|
|
|
- unlock_super(sb);
|
|
|
- if (sb->s_op->sync_fs)
|
|
|
- sb->s_op->sync_fs(sb, 1);
|
|
|
- sync_blockdev(sb->s_bdev);
|
|
|
- sync_inodes_sb(sb, 1);
|
|
|
-}
|
|
|
-
|
|
|
-/*
|
|
|
- * Write out and wait upon all dirty data associated with this
|
|
|
- * superblock. Filesystem data as well as the underlying block
|
|
|
- * device. Takes the superblock lock.
|
|
|
- */
|
|
|
-int fsync_super(struct super_block *sb)
|
|
|
-{
|
|
|
- __fsync_super(sb);
|
|
|
- return sync_blockdev(sb->s_bdev);
|
|
|
-}
|
|
|
-
|
|
|
/*
|
|
|
* Write out and wait upon all dirty data associated with this
|
|
|
* device. Filesystem data as well as the underlying block
|
|
@@ -259,118 +234,6 @@ void thaw_bdev(struct block_device *bdev, struct super_block *sb)
|
|
|
}
|
|
|
EXPORT_SYMBOL(thaw_bdev);
|
|
|
|
|
|
-/*
|
|
|
- * sync everything. Start out by waking pdflush, because that writes back
|
|
|
- * all queues in parallel.
|
|
|
- */
|
|
|
-static void do_sync(unsigned long wait)
|
|
|
-{
|
|
|
- wakeup_pdflush(0);
|
|
|
- sync_inodes(0); /* All mappings, inodes and their blockdevs */
|
|
|
- DQUOT_SYNC(NULL);
|
|
|
- sync_supers(); /* Write the superblocks */
|
|
|
- sync_filesystems(0); /* Start syncing the filesystems */
|
|
|
- sync_filesystems(wait); /* Waitingly sync the filesystems */
|
|
|
- sync_inodes(wait); /* Mappings, inodes and blockdevs, again. */
|
|
|
- if (!wait)
|
|
|
- printk("Emergency Sync complete\n");
|
|
|
- if (unlikely(laptop_mode))
|
|
|
- laptop_sync_completion();
|
|
|
-}
|
|
|
-
|
|
|
-asmlinkage long sys_sync(void)
|
|
|
-{
|
|
|
- do_sync(1);
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-void emergency_sync(void)
|
|
|
-{
|
|
|
- pdflush_operation(do_sync, 0);
|
|
|
-}
|
|
|
-
|
|
|
-/*
|
|
|
- * Generic function to fsync a file.
|
|
|
- *
|
|
|
- * filp may be NULL if called via the msync of a vma.
|
|
|
- */
|
|
|
-
|
|
|
-int file_fsync(struct file *filp, struct dentry *dentry, int datasync)
|
|
|
-{
|
|
|
- struct inode * inode = dentry->d_inode;
|
|
|
- struct super_block * sb;
|
|
|
- int ret, err;
|
|
|
-
|
|
|
- /* sync the inode to buffers */
|
|
|
- ret = write_inode_now(inode, 0);
|
|
|
-
|
|
|
- /* sync the superblock to buffers */
|
|
|
- sb = inode->i_sb;
|
|
|
- lock_super(sb);
|
|
|
- if (sb->s_op->write_super)
|
|
|
- sb->s_op->write_super(sb);
|
|
|
- unlock_super(sb);
|
|
|
-
|
|
|
- /* .. finally sync the buffers to disk */
|
|
|
- err = sync_blockdev(sb->s_bdev);
|
|
|
- if (!ret)
|
|
|
- ret = err;
|
|
|
- return ret;
|
|
|
-}
|
|
|
-
|
|
|
-long do_fsync(struct file *file, int datasync)
|
|
|
-{
|
|
|
- int ret;
|
|
|
- int err;
|
|
|
- struct address_space *mapping = file->f_mapping;
|
|
|
-
|
|
|
- if (!file->f_op || !file->f_op->fsync) {
|
|
|
- /* Why? We can still call filemap_fdatawrite */
|
|
|
- ret = -EINVAL;
|
|
|
- goto out;
|
|
|
- }
|
|
|
-
|
|
|
- ret = filemap_fdatawrite(mapping);
|
|
|
-
|
|
|
- /*
|
|
|
- * We need to protect against concurrent writers, which could cause
|
|
|
- * livelocks in fsync_buffers_list().
|
|
|
- */
|
|
|
- mutex_lock(&mapping->host->i_mutex);
|
|
|
- err = file->f_op->fsync(file, file->f_dentry, datasync);
|
|
|
- if (!ret)
|
|
|
- ret = err;
|
|
|
- mutex_unlock(&mapping->host->i_mutex);
|
|
|
- err = filemap_fdatawait(mapping);
|
|
|
- if (!ret)
|
|
|
- ret = err;
|
|
|
-out:
|
|
|
- return ret;
|
|
|
-}
|
|
|
-
|
|
|
-static long __do_fsync(unsigned int fd, int datasync)
|
|
|
-{
|
|
|
- struct file *file;
|
|
|
- int ret = -EBADF;
|
|
|
-
|
|
|
- file = fget(fd);
|
|
|
- if (file) {
|
|
|
- ret = do_fsync(file, datasync);
|
|
|
- fput(file);
|
|
|
- }
|
|
|
- return ret;
|
|
|
-}
|
|
|
-
|
|
|
-asmlinkage long sys_fsync(unsigned int fd)
|
|
|
-{
|
|
|
- return __do_fsync(fd, 0);
|
|
|
-}
|
|
|
-
|
|
|
-asmlinkage long sys_fdatasync(unsigned int fd)
|
|
|
-{
|
|
|
- return __do_fsync(fd, 1);
|
|
|
-}
|
|
|
-
|
|
|
/*
|
|
|
* Various filesystems appear to want __find_get_block to be non-blocking.
|
|
|
* But it's the page lock which protects the buffers. To get around this,
|
|
@@ -1550,35 +1413,6 @@ static void discard_buffer(struct buffer_head * bh)
|
|
|
unlock_buffer(bh);
|
|
|
}
|
|
|
|
|
|
-/**
|
|
|
- * try_to_release_page() - release old fs-specific metadata on a page
|
|
|
- *
|
|
|
- * @page: the page which the kernel is trying to free
|
|
|
- * @gfp_mask: memory allocation flags (and I/O mode)
|
|
|
- *
|
|
|
- * The address_space is to try to release any data against the page
|
|
|
- * (presumably at page->private). If the release was successful, return `1'.
|
|
|
- * Otherwise return zero.
|
|
|
- *
|
|
|
- * The @gfp_mask argument specifies whether I/O may be performed to release
|
|
|
- * this page (__GFP_IO), and whether the call may block (__GFP_WAIT).
|
|
|
- *
|
|
|
- * NOTE: @gfp_mask may go away, and this function may become non-blocking.
|
|
|
- */
|
|
|
-int try_to_release_page(struct page *page, gfp_t gfp_mask)
|
|
|
-{
|
|
|
- struct address_space * const mapping = page->mapping;
|
|
|
-
|
|
|
- BUG_ON(!PageLocked(page));
|
|
|
- if (PageWriteback(page))
|
|
|
- return 0;
|
|
|
-
|
|
|
- if (mapping && mapping->a_ops->releasepage)
|
|
|
- return mapping->a_ops->releasepage(page, gfp_mask);
|
|
|
- return try_to_free_buffers(page);
|
|
|
-}
|
|
|
-EXPORT_SYMBOL(try_to_release_page);
|
|
|
-
|
|
|
/**
|
|
|
* block_invalidatepage - invalidate part of all of a buffer-backed page
|
|
|
*
|
|
@@ -1630,14 +1464,6 @@ out:
|
|
|
}
|
|
|
EXPORT_SYMBOL(block_invalidatepage);
|
|
|
|
|
|
-void do_invalidatepage(struct page *page, unsigned long offset)
|
|
|
-{
|
|
|
- void (*invalidatepage)(struct page *, unsigned long);
|
|
|
- invalidatepage = page->mapping->a_ops->invalidatepage ? :
|
|
|
- block_invalidatepage;
|
|
|
- (*invalidatepage)(page, offset);
|
|
|
-}
|
|
|
-
|
|
|
/*
|
|
|
* We attach and possibly dirty the buffers atomically wrt
|
|
|
* __set_page_dirty_buffers() via private_lock. try_to_free_buffers
|