|
@@ -979,146 +979,6 @@ xfs_fs_clear_inode(
|
|
ASSERT(XFS_I(inode) == NULL);
|
|
ASSERT(XFS_I(inode) == NULL);
|
|
}
|
|
}
|
|
|
|
|
|
-/*
|
|
|
|
- * Enqueue a work item to be picked up by the vfs xfssyncd thread.
|
|
|
|
- * Doing this has two advantages:
|
|
|
|
- * - It saves on stack space, which is tight in certain situations
|
|
|
|
- * - It can be used (with care) as a mechanism to avoid deadlocks.
|
|
|
|
- * Flushing while allocating in a full filesystem requires both.
|
|
|
|
- */
|
|
|
|
-STATIC void
|
|
|
|
-xfs_syncd_queue_work(
|
|
|
|
- struct xfs_mount *mp,
|
|
|
|
- void *data,
|
|
|
|
- void (*syncer)(struct xfs_mount *, void *))
|
|
|
|
-{
|
|
|
|
- struct bhv_vfs_sync_work *work;
|
|
|
|
-
|
|
|
|
- work = kmem_alloc(sizeof(struct bhv_vfs_sync_work), KM_SLEEP);
|
|
|
|
- INIT_LIST_HEAD(&work->w_list);
|
|
|
|
- work->w_syncer = syncer;
|
|
|
|
- work->w_data = data;
|
|
|
|
- work->w_mount = mp;
|
|
|
|
- spin_lock(&mp->m_sync_lock);
|
|
|
|
- list_add_tail(&work->w_list, &mp->m_sync_list);
|
|
|
|
- spin_unlock(&mp->m_sync_lock);
|
|
|
|
- wake_up_process(mp->m_sync_task);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-/*
|
|
|
|
- * Flush delayed allocate data, attempting to free up reserved space
|
|
|
|
- * from existing allocations. At this point a new allocation attempt
|
|
|
|
- * has failed with ENOSPC and we are in the process of scratching our
|
|
|
|
- * heads, looking about for more room...
|
|
|
|
- */
|
|
|
|
-STATIC void
|
|
|
|
-xfs_flush_inode_work(
|
|
|
|
- struct xfs_mount *mp,
|
|
|
|
- void *arg)
|
|
|
|
-{
|
|
|
|
- struct inode *inode = arg;
|
|
|
|
- filemap_flush(inode->i_mapping);
|
|
|
|
- iput(inode);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-void
|
|
|
|
-xfs_flush_inode(
|
|
|
|
- xfs_inode_t *ip)
|
|
|
|
-{
|
|
|
|
- struct inode *inode = VFS_I(ip);
|
|
|
|
-
|
|
|
|
- igrab(inode);
|
|
|
|
- xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_inode_work);
|
|
|
|
- delay(msecs_to_jiffies(500));
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-/*
|
|
|
|
- * This is the "bigger hammer" version of xfs_flush_inode_work...
|
|
|
|
- * (IOW, "If at first you don't succeed, use a Bigger Hammer").
|
|
|
|
- */
|
|
|
|
-STATIC void
|
|
|
|
-xfs_flush_device_work(
|
|
|
|
- struct xfs_mount *mp,
|
|
|
|
- void *arg)
|
|
|
|
-{
|
|
|
|
- struct inode *inode = arg;
|
|
|
|
- sync_blockdev(mp->m_super->s_bdev);
|
|
|
|
- iput(inode);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-void
|
|
|
|
-xfs_flush_device(
|
|
|
|
- xfs_inode_t *ip)
|
|
|
|
-{
|
|
|
|
- struct inode *inode = VFS_I(ip);
|
|
|
|
-
|
|
|
|
- igrab(inode);
|
|
|
|
- xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_device_work);
|
|
|
|
- delay(msecs_to_jiffies(500));
|
|
|
|
- xfs_log_force(ip->i_mount, (xfs_lsn_t)0, XFS_LOG_FORCE|XFS_LOG_SYNC);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-STATIC void
|
|
|
|
-xfs_sync_worker(
|
|
|
|
- struct xfs_mount *mp,
|
|
|
|
- void *unused)
|
|
|
|
-{
|
|
|
|
- int error;
|
|
|
|
-
|
|
|
|
- if (!(mp->m_flags & XFS_MOUNT_RDONLY))
|
|
|
|
- error = xfs_sync(mp, SYNC_FSDATA | SYNC_BDFLUSH | SYNC_ATTR);
|
|
|
|
- mp->m_sync_seq++;
|
|
|
|
- wake_up(&mp->m_wait_single_sync_task);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-STATIC int
|
|
|
|
-xfssyncd(
|
|
|
|
- void *arg)
|
|
|
|
-{
|
|
|
|
- struct xfs_mount *mp = arg;
|
|
|
|
- long timeleft;
|
|
|
|
- bhv_vfs_sync_work_t *work, *n;
|
|
|
|
- LIST_HEAD (tmp);
|
|
|
|
-
|
|
|
|
- set_freezable();
|
|
|
|
- timeleft = xfs_syncd_centisecs * msecs_to_jiffies(10);
|
|
|
|
- for (;;) {
|
|
|
|
- timeleft = schedule_timeout_interruptible(timeleft);
|
|
|
|
- /* swsusp */
|
|
|
|
- try_to_freeze();
|
|
|
|
- if (kthread_should_stop() && list_empty(&mp->m_sync_list))
|
|
|
|
- break;
|
|
|
|
-
|
|
|
|
- spin_lock(&mp->m_sync_lock);
|
|
|
|
- /*
|
|
|
|
- * We can get woken by laptop mode, to do a sync -
|
|
|
|
- * that's the (only!) case where the list would be
|
|
|
|
- * empty with time remaining.
|
|
|
|
- */
|
|
|
|
- if (!timeleft || list_empty(&mp->m_sync_list)) {
|
|
|
|
- if (!timeleft)
|
|
|
|
- timeleft = xfs_syncd_centisecs *
|
|
|
|
- msecs_to_jiffies(10);
|
|
|
|
- INIT_LIST_HEAD(&mp->m_sync_work.w_list);
|
|
|
|
- list_add_tail(&mp->m_sync_work.w_list,
|
|
|
|
- &mp->m_sync_list);
|
|
|
|
- }
|
|
|
|
- list_for_each_entry_safe(work, n, &mp->m_sync_list, w_list)
|
|
|
|
- list_move(&work->w_list, &tmp);
|
|
|
|
- spin_unlock(&mp->m_sync_lock);
|
|
|
|
-
|
|
|
|
- list_for_each_entry_safe(work, n, &tmp, w_list) {
|
|
|
|
- (*work->w_syncer)(mp, work->w_data);
|
|
|
|
- list_del(&work->w_list);
|
|
|
|
- if (work == &mp->m_sync_work)
|
|
|
|
- continue;
|
|
|
|
- kmem_free(work);
|
|
|
|
- }
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- return 0;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
STATIC void
|
|
STATIC void
|
|
xfs_free_fsname(
|
|
xfs_free_fsname(
|
|
struct xfs_mount *mp)
|
|
struct xfs_mount *mp)
|
|
@@ -1137,8 +997,7 @@ xfs_fs_put_super(
|
|
int unmount_event_flags = 0;
|
|
int unmount_event_flags = 0;
|
|
int error;
|
|
int error;
|
|
|
|
|
|
- kthread_stop(mp->m_sync_task);
|
|
|
|
-
|
|
|
|
|
|
+ xfs_syncd_stop(mp);
|
|
xfs_sync(mp, SYNC_ATTR | SYNC_DELWRI);
|
|
xfs_sync(mp, SYNC_ATTR | SYNC_DELWRI);
|
|
|
|
|
|
#ifdef HAVE_DMAPI
|
|
#ifdef HAVE_DMAPI
|
|
@@ -1808,13 +1667,9 @@ xfs_fs_fill_super(
|
|
goto fail_vnrele;
|
|
goto fail_vnrele;
|
|
}
|
|
}
|
|
|
|
|
|
- mp->m_sync_work.w_syncer = xfs_sync_worker;
|
|
|
|
- mp->m_sync_work.w_mount = mp;
|
|
|
|
- mp->m_sync_task = kthread_run(xfssyncd, mp, "xfssyncd");
|
|
|
|
- if (IS_ERR(mp->m_sync_task)) {
|
|
|
|
- error = -PTR_ERR(mp->m_sync_task);
|
|
|
|
|
|
+ error = xfs_syncd_init(mp);
|
|
|
|
+ if (error)
|
|
goto fail_vnrele;
|
|
goto fail_vnrele;
|
|
- }
|
|
|
|
|
|
|
|
xfs_itrace_exit(XFS_I(sb->s_root->d_inode));
|
|
xfs_itrace_exit(XFS_I(sb->s_root->d_inode));
|
|
|
|
|