|
@@ -580,17 +580,13 @@ static int writeback_sb_inodes(struct super_block *sb, struct bdi_writeback *wb,
|
|
|
return 1;
|
|
|
}
|
|
|
|
|
|
-void writeback_inodes_wb(struct bdi_writeback *wb,
|
|
|
- struct writeback_control *wbc)
|
|
|
+static void __writeback_inodes_wb(struct bdi_writeback *wb,
|
|
|
+ struct writeback_control *wbc)
|
|
|
{
|
|
|
int ret = 0;
|
|
|
|
|
|
if (!wbc->wb_start)
|
|
|
wbc->wb_start = jiffies; /* livelock avoidance */
|
|
|
- spin_lock(&wb->list_lock);
|
|
|
-
|
|
|
- if (list_empty(&wb->b_io))
|
|
|
- queue_io(wb, wbc->older_than_this);
|
|
|
|
|
|
while (!list_empty(&wb->b_io)) {
|
|
|
struct inode *inode = wb_inode(wb->b_io.prev);
|
|
@@ -606,19 +602,16 @@ void writeback_inodes_wb(struct bdi_writeback *wb,
|
|
|
if (ret)
|
|
|
break;
|
|
|
}
|
|
|
- spin_unlock(&wb->list_lock);
|
|
|
/* Leave any unwritten inodes on b_io */
|
|
|
}
|
|
|
|
|
|
-static void __writeback_inodes_sb(struct super_block *sb,
|
|
|
- struct bdi_writeback *wb, struct writeback_control *wbc)
|
|
|
+void writeback_inodes_wb(struct bdi_writeback *wb,
|
|
|
+ struct writeback_control *wbc)
|
|
|
{
|
|
|
- WARN_ON(!rwsem_is_locked(&sb->s_umount));
|
|
|
-
|
|
|
spin_lock(&wb->list_lock);
|
|
|
if (list_empty(&wb->b_io))
|
|
|
queue_io(wb, wbc->older_than_this);
|
|
|
- writeback_sb_inodes(sb, wb, wbc, true);
|
|
|
+ __writeback_inodes_wb(wb, wbc);
|
|
|
spin_unlock(&wb->list_lock);
|
|
|
}
|
|
|
|
|
@@ -685,7 +678,7 @@ static long wb_writeback(struct bdi_writeback *wb,
|
|
|
* The intended call sequence for WB_SYNC_ALL writeback is:
|
|
|
*
|
|
|
* wb_writeback()
|
|
|
- * __writeback_inodes_sb() <== called only once
|
|
|
+ * writeback_sb_inodes() <== called only once
|
|
|
* write_cache_pages() <== called once for each inode
|
|
|
* (quickly) tag currently dirty pages
|
|
|
* (maybe slowly) sync all tagged pages
|
|
@@ -694,6 +687,7 @@ static long wb_writeback(struct bdi_writeback *wb,
|
|
|
write_chunk = LONG_MAX;
|
|
|
|
|
|
wbc.wb_start = jiffies; /* livelock avoidance */
|
|
|
+ spin_lock(&wb->list_lock);
|
|
|
for (;;) {
|
|
|
/*
|
|
|
* Stop writeback when nr_pages has been consumed
|
|
@@ -730,10 +724,12 @@ static long wb_writeback(struct bdi_writeback *wb,
|
|
|
wbc.inodes_written = 0;
|
|
|
|
|
|
trace_wbc_writeback_start(&wbc, wb->bdi);
|
|
|
+ if (list_empty(&wb->b_io))
|
|
|
+ queue_io(wb, wbc.older_than_this);
|
|
|
if (work->sb)
|
|
|
- __writeback_inodes_sb(work->sb, wb, &wbc);
|
|
|
+ writeback_sb_inodes(work->sb, wb, &wbc, true);
|
|
|
else
|
|
|
- writeback_inodes_wb(wb, &wbc);
|
|
|
+ __writeback_inodes_wb(wb, &wbc);
|
|
|
trace_wbc_writeback_written(&wbc, wb->bdi);
|
|
|
|
|
|
work->nr_pages -= write_chunk - wbc.nr_to_write;
|
|
@@ -761,7 +757,6 @@ static long wb_writeback(struct bdi_writeback *wb,
|
|
|
* become available for writeback. Otherwise
|
|
|
* we'll just busyloop.
|
|
|
*/
|
|
|
- spin_lock(&wb->list_lock);
|
|
|
if (!list_empty(&wb->b_more_io)) {
|
|
|
inode = wb_inode(wb->b_more_io.prev);
|
|
|
trace_wbc_writeback_wait(&wbc, wb->bdi);
|
|
@@ -769,8 +764,8 @@ static long wb_writeback(struct bdi_writeback *wb,
|
|
|
inode_wait_for_writeback(inode, wb);
|
|
|
spin_unlock(&inode->i_lock);
|
|
|
}
|
|
|
- spin_unlock(&wb->list_lock);
|
|
|
}
|
|
|
+ spin_unlock(&wb->list_lock);
|
|
|
|
|
|
return wrote;
|
|
|
}
|