|
@@ -267,6 +267,9 @@ error_out:
|
|
|
* If the caller is doing a write at the end of the file, then extend the
|
|
|
* allocation out to the file system's write iosize. We clean up any extra
|
|
|
* space left over when the file is closed in xfs_inactive().
|
|
|
+ *
|
|
|
+ * If we find we already have delalloc preallocation beyond EOF, don't do more
|
|
|
+ * preallocation as it it not needed.
|
|
|
*/
|
|
|
STATIC int
|
|
|
xfs_iomap_eof_want_preallocate(
|
|
@@ -282,6 +285,7 @@ xfs_iomap_eof_want_preallocate(
|
|
|
xfs_filblks_t count_fsb;
|
|
|
xfs_fsblock_t firstblock;
|
|
|
int n, error, imaps;
|
|
|
+ int found_delalloc = 0;
|
|
|
|
|
|
*prealloc = 0;
|
|
|
if ((offset + count) <= ip->i_size)
|
|
@@ -306,12 +310,60 @@ xfs_iomap_eof_want_preallocate(
|
|
|
return 0;
|
|
|
start_fsb += imap[n].br_blockcount;
|
|
|
count_fsb -= imap[n].br_blockcount;
|
|
|
+
|
|
|
+ if (imap[n].br_startblock == DELAYSTARTBLOCK)
|
|
|
+ found_delalloc = 1;
|
|
|
}
|
|
|
}
|
|
|
- *prealloc = 1;
|
|
|
+ if (!found_delalloc)
|
|
|
+ *prealloc = 1;
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * If we don't have a user specified preallocation size, dynamically increase
|
|
|
+ * the preallocation size as the size of the file grows. Cap the maximum size
|
|
|
+ * at a single extent or less if the filesystem is near full. The closer the
|
|
|
+ * filesystem is to full, the smaller the maximum prealocation.
|
|
|
+ */
|
|
|
+STATIC xfs_fsblock_t
|
|
|
+xfs_iomap_prealloc_size(
|
|
|
+ struct xfs_mount *mp,
|
|
|
+ struct xfs_inode *ip)
|
|
|
+{
|
|
|
+ xfs_fsblock_t alloc_blocks = 0;
|
|
|
+
|
|
|
+ if (!(mp->m_flags & XFS_MOUNT_DFLT_IOSIZE)) {
|
|
|
+ int shift = 0;
|
|
|
+ int64_t freesp;
|
|
|
+
|
|
|
+ alloc_blocks = XFS_B_TO_FSB(mp, ip->i_size);
|
|
|
+ alloc_blocks = XFS_FILEOFF_MIN(MAXEXTLEN,
|
|
|
+ rounddown_pow_of_two(alloc_blocks));
|
|
|
+
|
|
|
+ xfs_icsb_sync_counters(mp, XFS_ICSB_LAZY_COUNT);
|
|
|
+ freesp = mp->m_sb.sb_fdblocks;
|
|
|
+ if (freesp < mp->m_low_space[XFS_LOWSP_5_PCNT]) {
|
|
|
+ shift = 2;
|
|
|
+ if (freesp < mp->m_low_space[XFS_LOWSP_4_PCNT])
|
|
|
+ shift++;
|
|
|
+ if (freesp < mp->m_low_space[XFS_LOWSP_3_PCNT])
|
|
|
+ shift++;
|
|
|
+ if (freesp < mp->m_low_space[XFS_LOWSP_2_PCNT])
|
|
|
+ shift++;
|
|
|
+ if (freesp < mp->m_low_space[XFS_LOWSP_1_PCNT])
|
|
|
+ shift++;
|
|
|
+ }
|
|
|
+ if (shift)
|
|
|
+ alloc_blocks >>= shift;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (alloc_blocks < mp->m_writeio_blocks)
|
|
|
+ alloc_blocks = mp->m_writeio_blocks;
|
|
|
+
|
|
|
+ return alloc_blocks;
|
|
|
+}
|
|
|
+
|
|
|
int
|
|
|
xfs_iomap_write_delay(
|
|
|
xfs_inode_t *ip,
|
|
@@ -344,6 +396,7 @@ xfs_iomap_write_delay(
|
|
|
extsz = xfs_get_extsz_hint(ip);
|
|
|
offset_fsb = XFS_B_TO_FSBT(mp, offset);
|
|
|
|
|
|
+
|
|
|
error = xfs_iomap_eof_want_preallocate(mp, ip, offset, count,
|
|
|
imap, XFS_WRITE_IMAPS, &prealloc);
|
|
|
if (error)
|
|
@@ -351,9 +404,11 @@ xfs_iomap_write_delay(
|
|
|
|
|
|
retry:
|
|
|
if (prealloc) {
|
|
|
+ xfs_fsblock_t alloc_blocks = xfs_iomap_prealloc_size(mp, ip);
|
|
|
+
|
|
|
aligned_offset = XFS_WRITEIO_ALIGN(mp, (offset + count - 1));
|
|
|
ioalign = XFS_B_TO_FSBT(mp, aligned_offset);
|
|
|
- last_fsb = ioalign + mp->m_writeio_blocks;
|
|
|
+ last_fsb = ioalign + alloc_blocks;
|
|
|
} else {
|
|
|
last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count)));
|
|
|
}
|
|
@@ -371,22 +426,31 @@ retry:
|
|
|
XFS_BMAPI_DELAY | XFS_BMAPI_WRITE |
|
|
|
XFS_BMAPI_ENTIRE, &firstblock, 1, imap,
|
|
|
&nimaps, NULL);
|
|
|
- if (error && (error != ENOSPC))
|
|
|
+ switch (error) {
|
|
|
+ case 0:
|
|
|
+ case ENOSPC:
|
|
|
+ case EDQUOT:
|
|
|
+ break;
|
|
|
+ default:
|
|
|
return XFS_ERROR(error);
|
|
|
+ }
|
|
|
|
|
|
/*
|
|
|
- * If bmapi returned us nothing, and if we didn't get back EDQUOT,
|
|
|
- * then we must have run out of space - flush all other inodes with
|
|
|
- * delalloc blocks and retry without EOF preallocation.
|
|
|
+ * If bmapi returned us nothing, we got either ENOSPC or EDQUOT. For
|
|
|
+ * ENOSPC, * flush all other inodes with delalloc blocks to free up
|
|
|
+ * some of the excess reserved metadata space. For both cases, retry
|
|
|
+ * without EOF preallocation.
|
|
|
*/
|
|
|
if (nimaps == 0) {
|
|
|
trace_xfs_delalloc_enospc(ip, offset, count);
|
|
|
if (flushed)
|
|
|
- return XFS_ERROR(ENOSPC);
|
|
|
+ return XFS_ERROR(error ? error : ENOSPC);
|
|
|
|
|
|
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
|
|
|
- xfs_flush_inodes(ip);
|
|
|
- xfs_ilock(ip, XFS_ILOCK_EXCL);
|
|
|
+ if (error == ENOSPC) {
|
|
|
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
|
|
|
+ xfs_flush_inodes(ip);
|
|
|
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
|
|
|
+ }
|
|
|
|
|
|
flushed = 1;
|
|
|
error = 0;
|