|
@@ -363,18 +363,7 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
|
|
|
spin_lock(&inode_lock);
|
|
|
inode->i_state &= ~I_SYNC;
|
|
|
if (!(inode->i_state & I_FREEING)) {
|
|
|
- if ((inode->i_state & I_DIRTY_PAGES) && wbc->for_kupdate) {
|
|
|
- /*
|
|
|
- * More pages get dirtied by a fast dirtier.
|
|
|
- */
|
|
|
- goto select_queue;
|
|
|
- } else if (inode->i_state & I_DIRTY) {
|
|
|
- /*
|
|
|
- * At least XFS will redirty the inode during the
|
|
|
- * writeback (delalloc) and on io completion (isize).
|
|
|
- */
|
|
|
- redirty_tail(inode);
|
|
|
- } else if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
|
|
|
+ if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
|
|
|
/*
|
|
|
* We didn't write back all the pages. nfs_writepages()
|
|
|
* sometimes bales out without doing anything. Redirty
|
|
@@ -396,7 +385,6 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
|
|
|
* soon as the queue becomes uncongested.
|
|
|
*/
|
|
|
inode->i_state |= I_DIRTY_PAGES;
|
|
|
-select_queue:
|
|
|
if (wbc->nr_to_write <= 0) {
|
|
|
/*
|
|
|
* slice used up: queue for next turn
|
|
@@ -419,6 +407,14 @@ select_queue:
|
|
|
inode->i_state |= I_DIRTY_PAGES;
|
|
|
redirty_tail(inode);
|
|
|
}
|
|
|
+ } else if (inode->i_state & I_DIRTY) {
|
|
|
+ /*
|
|
|
+ * Filesystems can dirty the inode during writeback
|
|
|
+ * operations, such as delayed allocation during
|
|
|
+ * submission or metadata updates after data IO
|
|
|
+ * completion.
|
|
|
+ */
|
|
|
+ redirty_tail(inode);
|
|
|
} else if (atomic_read(&inode->i_count)) {
|
|
|
/*
|
|
|
* The inode is clean, inuse
|