|
@@ -84,13 +84,9 @@ static inline struct inode *wb_inode(struct list_head *head)
|
|
|
return list_entry(head, struct inode, i_wb_list);
|
|
|
}
|
|
|
|
|
|
-static void bdi_queue_work(struct backing_dev_info *bdi,
|
|
|
- struct wb_writeback_work *work)
|
|
|
+/* Wakeup flusher thread or forker thread to fork it. Requires bdi->wb_lock. */
|
|
|
+static void bdi_wakeup_flusher(struct backing_dev_info *bdi)
|
|
|
{
|
|
|
- trace_writeback_queue(bdi, work);
|
|
|
-
|
|
|
- spin_lock_bh(&bdi->wb_lock);
|
|
|
- list_add_tail(&work->list, &bdi->work_list);
|
|
|
if (bdi->wb.task) {
|
|
|
wake_up_process(bdi->wb.task);
|
|
|
} else {
|
|
@@ -98,15 +94,26 @@ static void bdi_queue_work(struct backing_dev_info *bdi,
|
|
|
* The bdi thread isn't there, wake up the forker thread which
|
|
|
* will create and run it.
|
|
|
*/
|
|
|
- trace_writeback_nothread(bdi, work);
|
|
|
wake_up_process(default_backing_dev_info.wb.task);
|
|
|
}
|
|
|
+}
|
|
|
+
|
|
|
+static void bdi_queue_work(struct backing_dev_info *bdi,
|
|
|
+ struct wb_writeback_work *work)
|
|
|
+{
|
|
|
+ trace_writeback_queue(bdi, work);
|
|
|
+
|
|
|
+ spin_lock_bh(&bdi->wb_lock);
|
|
|
+ list_add_tail(&work->list, &bdi->work_list);
|
|
|
+ if (!bdi->wb.task)
|
|
|
+ trace_writeback_nothread(bdi, work);
|
|
|
+ bdi_wakeup_flusher(bdi);
|
|
|
spin_unlock_bh(&bdi->wb_lock);
|
|
|
}
|
|
|
|
|
|
static void
|
|
|
__bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
|
|
|
- bool range_cyclic, bool for_background)
|
|
|
+ bool range_cyclic)
|
|
|
{
|
|
|
struct wb_writeback_work *work;
|
|
|
|
|
@@ -126,7 +133,6 @@ __bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
|
|
|
work->sync_mode = WB_SYNC_NONE;
|
|
|
work->nr_pages = nr_pages;
|
|
|
work->range_cyclic = range_cyclic;
|
|
|
- work->for_background = for_background;
|
|
|
|
|
|
bdi_queue_work(bdi, work);
|
|
|
}
|
|
@@ -144,7 +150,7 @@ __bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
|
|
|
*/
|
|
|
void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages)
|
|
|
{
|
|
|
- __bdi_start_writeback(bdi, nr_pages, true, false);
|
|
|
+ __bdi_start_writeback(bdi, nr_pages, true);
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -152,13 +158,20 @@ void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages)
|
|
|
* @bdi: the backing device to write from
|
|
|
*
|
|
|
* Description:
|
|
|
- * This does WB_SYNC_NONE background writeback. The IO is only
|
|
|
- * started when this function returns, we make no guarentees on
|
|
|
- * completion. Caller need not hold sb s_umount semaphore.
|
|
|
+ * This makes sure WB_SYNC_NONE background writeback happens. When
|
|
|
+ * this function returns, it is only guaranteed that for given BDI
|
|
|
+ * some IO is happening if we are over background dirty threshold.
|
|
|
+ * Caller need not hold sb s_umount semaphore.
|
|
|
*/
|
|
|
void bdi_start_background_writeback(struct backing_dev_info *bdi)
|
|
|
{
|
|
|
- __bdi_start_writeback(bdi, LONG_MAX, true, true);
|
|
|
+ /*
|
|
|
+ * We just wake up the flusher thread. It will perform background
|
|
|
+ * writeback as soon as there is no other work to do.
|
|
|
+ */
|
|
|
+ spin_lock_bh(&bdi->wb_lock);
|
|
|
+ bdi_wakeup_flusher(bdi);
|
|
|
+ spin_unlock_bh(&bdi->wb_lock);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -718,6 +731,23 @@ static unsigned long get_nr_dirty_pages(void)
|
|
|
get_nr_dirty_inodes();
|
|
|
}
|
|
|
|
|
|
+static long wb_check_background_flush(struct bdi_writeback *wb)
|
|
|
+{
|
|
|
+ if (over_bground_thresh()) {
|
|
|
+
|
|
|
+ struct wb_writeback_work work = {
|
|
|
+ .nr_pages = LONG_MAX,
|
|
|
+ .sync_mode = WB_SYNC_NONE,
|
|
|
+ .for_background = 1,
|
|
|
+ .range_cyclic = 1,
|
|
|
+ };
|
|
|
+
|
|
|
+ return wb_writeback(wb, &work);
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
static long wb_check_old_data_flush(struct bdi_writeback *wb)
|
|
|
{
|
|
|
unsigned long expired;
|
|
@@ -787,6 +817,7 @@ long wb_do_writeback(struct bdi_writeback *wb, int force_wait)
|
|
|
* Check for periodic writeback, kupdated() style
|
|
|
*/
|
|
|
wrote += wb_check_old_data_flush(wb);
|
|
|
+ wrote += wb_check_background_flush(wb);
|
|
|
clear_bit(BDI_writeback_running, &wb->bdi->state);
|
|
|
|
|
|
return wrote;
|
|
@@ -873,7 +904,7 @@ void wakeup_flusher_threads(long nr_pages)
|
|
|
list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
|
|
|
if (!bdi_has_dirty_io(bdi))
|
|
|
continue;
|
|
|
- __bdi_start_writeback(bdi, nr_pages, false, false);
|
|
|
+ __bdi_start_writeback(bdi, nr_pages, false);
|
|
|
}
|
|
|
rcu_read_unlock();
|
|
|
}
|