|
@@ -474,10 +474,15 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
|
|
|
spin_lock(&inode_lock);
|
|
|
inode->i_state &= ~I_SYNC;
|
|
|
if (!(inode->i_state & (I_FREEING | I_CLEAR))) {
|
|
|
- if (inode->i_state & I_DIRTY) {
|
|
|
+ if ((inode->i_state & I_DIRTY_PAGES) && wbc->for_kupdate) {
|
|
|
/*
|
|
|
- * Someone redirtied the inode while were writing back
|
|
|
- * the pages.
|
|
|
+ * More pages get dirtied by a fast dirtier.
|
|
|
+ */
|
|
|
+ goto select_queue;
|
|
|
+ } else if (inode->i_state & I_DIRTY) {
|
|
|
+ /*
|
|
|
+ * At least XFS will redirty the inode during the
|
|
|
+ * writeback (delalloc) and on io completion (isize).
|
|
|
*/
|
|
|
redirty_tail(inode);
|
|
|
} else if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
|
|
@@ -502,6 +507,7 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
|
|
|
* soon as the queue becomes uncongested.
|
|
|
*/
|
|
|
inode->i_state |= I_DIRTY_PAGES;
|
|
|
+select_queue:
|
|
|
if (wbc->nr_to_write <= 0) {
|
|
|
/*
|
|
|
* slice used up: queue for next turn
|