|
@@ -823,10 +823,16 @@ int bdi_writeback_thread(void *data)
|
|
|
continue;
|
|
|
}
|
|
|
|
|
|
- if (dirty_writeback_interval)
|
|
|
+ if (wb_has_dirty_io(wb) && dirty_writeback_interval)
|
|
|
schedule_timeout(msecs_to_jiffies(dirty_writeback_interval * 10));
|
|
|
- else
|
|
|
+ else {
|
|
|
+ /*
|
|
|
+ * We have nothing to do, so can go sleep without any
|
|
|
+ * timeout and save power. When a work is queued or
|
|
|
+ * something is made dirty - we will be woken up.
|
|
|
+ */
|
|
|
schedule();
|
|
|
+ }
|
|
|
|
|
|
try_to_freeze();
|
|
|
}
|
|
@@ -862,6 +868,26 @@ void wakeup_flusher_threads(long nr_pages)
|
|
|
rcu_read_unlock();
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * This function is used when the first inode for this bdi is marked dirty. It
|
|
|
+ * wakes-up the corresponding bdi thread which should then take care of the
|
|
|
+ * periodic background write-out of dirty inodes.
|
|
|
+ */
|
|
|
+static void wakeup_bdi_thread(struct backing_dev_info *bdi)
|
|
|
+{
|
|
|
+ spin_lock(&bdi->wb_lock);
|
|
|
+ if (bdi->wb.task)
|
|
|
+ wake_up_process(bdi->wb.task);
|
|
|
+ else
|
|
|
+ /*
|
|
|
+ * When bdi tasks are inactive for long time, they are killed.
|
|
|
+ * In this case we have to wake-up the forker thread which
|
|
|
+ * should create and run the bdi thread.
|
|
|
+ */
|
|
|
+ wake_up_process(default_backing_dev_info.wb.task);
|
|
|
+ spin_unlock(&bdi->wb_lock);
|
|
|
+}
|
|
|
+
|
|
|
static noinline void block_dump___mark_inode_dirty(struct inode *inode)
|
|
|
{
|
|
|
if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) {
|
|
@@ -914,6 +940,8 @@ static noinline void block_dump___mark_inode_dirty(struct inode *inode)
|
|
|
void __mark_inode_dirty(struct inode *inode, int flags)
|
|
|
{
|
|
|
struct super_block *sb = inode->i_sb;
|
|
|
+ struct backing_dev_info *bdi = NULL;
|
|
|
+ bool wakeup_bdi = false;
|
|
|
|
|
|
/*
|
|
|
* Don't do this for I_DIRTY_PAGES - that doesn't actually
|
|
@@ -967,22 +995,31 @@ void __mark_inode_dirty(struct inode *inode, int flags)
|
|
|
* reposition it (that would break b_dirty time-ordering).
|
|
|
*/
|
|
|
if (!was_dirty) {
|
|
|
- struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
|
|
|
- struct backing_dev_info *bdi = wb->bdi;
|
|
|
-
|
|
|
- if (bdi_cap_writeback_dirty(bdi) &&
|
|
|
- !test_bit(BDI_registered, &bdi->state)) {
|
|
|
- WARN_ON(1);
|
|
|
- printk(KERN_ERR "bdi-%s not registered\n",
|
|
|
- bdi->name);
|
|
|
+ bdi = inode_to_bdi(inode);
|
|
|
+
|
|
|
+ if (bdi_cap_writeback_dirty(bdi)) {
|
|
|
+ WARN(!test_bit(BDI_registered, &bdi->state),
|
|
|
+ "bdi-%s not registered\n", bdi->name);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * If this is the first dirty inode for this
|
|
|
+ * bdi, we have to wake-up the corresponding
|
|
|
+ * bdi thread to make sure background
|
|
|
+ * write-back happens later.
|
|
|
+ */
|
|
|
+ if (!wb_has_dirty_io(&bdi->wb))
|
|
|
+ wakeup_bdi = true;
|
|
|
}
|
|
|
|
|
|
inode->dirtied_when = jiffies;
|
|
|
- list_move(&inode->i_list, &wb->b_dirty);
|
|
|
+ list_move(&inode->i_list, &bdi->wb.b_dirty);
|
|
|
}
|
|
|
}
|
|
|
out:
|
|
|
spin_unlock(&inode_lock);
|
|
|
+
|
|
|
+ if (wakeup_bdi)
|
|
|
+ wakeup_bdi_thread(bdi);
|
|
|
}
|
|
|
EXPORT_SYMBOL(__mark_inode_dirty);
|
|
|
|