|
@@ -1304,6 +1304,55 @@ static int ext4_journalled_write_end(struct file *file,
|
|
|
return ret ? ret : copied;
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * Reserve a metadata for a single block located at lblock
|
|
|
+ */
|
|
|
+static int ext4_da_reserve_metadata(struct inode *inode, ext4_lblk_t lblock)
|
|
|
+{
|
|
|
+ int retries = 0;
|
|
|
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
|
|
|
+ struct ext4_inode_info *ei = EXT4_I(inode);
|
|
|
+ unsigned int md_needed;
|
|
|
+ ext4_lblk_t save_last_lblock;
|
|
|
+ int save_len;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * recalculate the amount of metadata blocks to reserve
|
|
|
+ * in order to allocate nrblocks
|
|
|
+ * worse case is one extent per block
|
|
|
+ */
|
|
|
+repeat:
|
|
|
+ spin_lock(&ei->i_block_reservation_lock);
|
|
|
+ /*
|
|
|
+ * ext4_calc_metadata_amount() has side effects, which we have
|
|
|
+ * to be prepared undo if we fail to claim space.
|
|
|
+ */
|
|
|
+ save_len = ei->i_da_metadata_calc_len;
|
|
|
+ save_last_lblock = ei->i_da_metadata_calc_last_lblock;
|
|
|
+ md_needed = EXT4_NUM_B2C(sbi,
|
|
|
+ ext4_calc_metadata_amount(inode, lblock));
|
|
|
+ trace_ext4_da_reserve_space(inode, md_needed);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * We do still charge estimated metadata to the sb though;
|
|
|
+ * we cannot afford to run out of free blocks.
|
|
|
+ */
|
|
|
+ if (ext4_claim_free_clusters(sbi, md_needed, 0)) {
|
|
|
+ ei->i_da_metadata_calc_len = save_len;
|
|
|
+ ei->i_da_metadata_calc_last_lblock = save_last_lblock;
|
|
|
+ spin_unlock(&ei->i_block_reservation_lock);
|
|
|
+ if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
|
|
|
+ cond_resched();
|
|
|
+ goto repeat;
|
|
|
+ }
|
|
|
+ return -ENOSPC;
|
|
|
+ }
|
|
|
+ ei->i_reserved_meta_blocks += md_needed;
|
|
|
+ spin_unlock(&ei->i_block_reservation_lock);
|
|
|
+
|
|
|
+ return 0; /* success */
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Reserve a single cluster located at lblock
|
|
|
*/
|
|
@@ -1940,8 +1989,11 @@ add_delayed:
|
|
|
* XXX: __block_prepare_write() unmaps passed block,
|
|
|
* is it OK?
|
|
|
*/
|
|
|
- /* If the block was allocated from previously allocated cluster,
|
|
|
- * then we dont need to reserve it again. */
|
|
|
+ /*
|
|
|
+ * If the block was allocated from previously allocated cluster,
|
|
|
+ * then we don't need to reserve it again. However we still need
|
|
|
+ * to reserve metadata for every block we're going to write.
|
|
|
+ */
|
|
|
if (!(map->m_flags & EXT4_MAP_FROM_CLUSTER)) {
|
|
|
ret = ext4_da_reserve_space(inode, iblock);
|
|
|
if (ret) {
|
|
@@ -1949,6 +2001,13 @@ add_delayed:
|
|
|
retval = ret;
|
|
|
goto out_unlock;
|
|
|
}
|
|
|
+ } else {
|
|
|
+ ret = ext4_da_reserve_metadata(inode, iblock);
|
|
|
+ if (ret) {
|
|
|
+ /* not enough space to reserve */
|
|
|
+ retval = ret;
|
|
|
+ goto out_unlock;
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
|