|
@@ -964,29 +964,48 @@ xfs_release(
|
|
|
xfs_flush_pages(ip, 0, -1, XBF_ASYNC, FI_NONE);
|
|
|
}
|
|
|
|
|
|
- if (ip->i_d.di_nlink != 0) {
|
|
|
- if ((((ip->i_d.di_mode & S_IFMT) == S_IFREG) &&
|
|
|
- ((ip->i_size > 0) || (VN_CACHED(VFS_I(ip)) > 0 ||
|
|
|
- ip->i_delayed_blks > 0)) &&
|
|
|
- (ip->i_df.if_flags & XFS_IFEXTENTS)) &&
|
|
|
- (!(ip->i_d.di_flags &
|
|
|
- (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)))) {
|
|
|
+ if (ip->i_d.di_nlink == 0)
|
|
|
+ return 0;
|
|
|
|
|
|
- /*
|
|
|
- * If we can't get the iolock just skip truncating
|
|
|
- * the blocks past EOF because we could deadlock
|
|
|
- * with the mmap_sem otherwise. We'll get another
|
|
|
- * chance to drop them once the last reference to
|
|
|
- * the inode is dropped, so we'll never leak blocks
|
|
|
- * permanently.
|
|
|
- */
|
|
|
- error = xfs_free_eofblocks(mp, ip,
|
|
|
- XFS_FREE_EOF_TRYLOCK);
|
|
|
- if (error)
|
|
|
- return error;
|
|
|
- }
|
|
|
- }
|
|
|
+ if ((((ip->i_d.di_mode & S_IFMT) == S_IFREG) &&
|
|
|
+ ((ip->i_size > 0) || (VN_CACHED(VFS_I(ip)) > 0 ||
|
|
|
+ ip->i_delayed_blks > 0)) &&
|
|
|
+ (ip->i_df.if_flags & XFS_IFEXTENTS)) &&
|
|
|
+ (!(ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)))) {
|
|
|
|
|
|
+ /*
|
|
|
+ * If we can't get the iolock just skip truncating the blocks
|
|
|
+ * past EOF because we could deadlock with the mmap_sem
|
|
|
+ * otherwise. We'll get another chance to drop them once the
|
|
|
+ * last reference to the inode is dropped, so we'll never leak
|
|
|
+ * blocks permanently.
|
|
|
+ *
|
|
|
+ * Further, check if the inode is being opened, written and
|
|
|
+ * closed frequently and we have delayed allocation blocks
|
|
|
+ * oustanding (e.g. streaming writes from the NFS server),
|
|
|
+ * truncating the blocks past EOF will cause fragmentation to
|
|
|
+ * occur.
|
|
|
+ *
|
|
|
+ * In this case don't do the truncation, either, but we have to
|
|
|
+ * be careful how we detect this case. Blocks beyond EOF show
|
|
|
+ * up as i_delayed_blks even when the inode is clean, so we
|
|
|
+ * need to truncate them away first before checking for a dirty
|
|
|
+ * release. Hence on the first dirty close we will still remove
|
|
|
+ * the speculative allocation, but after that we will leave it
|
|
|
+ * in place.
|
|
|
+ */
|
|
|
+ if (xfs_iflags_test(ip, XFS_IDIRTY_RELEASE))
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ error = xfs_free_eofblocks(mp, ip,
|
|
|
+ XFS_FREE_EOF_TRYLOCK);
|
|
|
+ if (error)
|
|
|
+ return error;
|
|
|
+
|
|
|
+ /* delalloc blocks after truncation means it really is dirty */
|
|
|
+ if (ip->i_delayed_blks)
|
|
|
+ xfs_iflags_set(ip, XFS_IDIRTY_RELEASE);
|
|
|
+ }
|
|
|
return 0;
|
|
|
}
|
|
|
|