|
@@ -1076,7 +1076,6 @@ void ext4_da_update_reserve_space(struct inode *inode,
|
|
{
|
|
{
|
|
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
|
|
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
|
|
struct ext4_inode_info *ei = EXT4_I(inode);
|
|
struct ext4_inode_info *ei = EXT4_I(inode);
|
|
- int mdb_free = 0, allocated_meta_blocks = 0;
|
|
|
|
|
|
|
|
spin_lock(&ei->i_block_reservation_lock);
|
|
spin_lock(&ei->i_block_reservation_lock);
|
|
trace_ext4_da_update_reserve_space(inode, used);
|
|
trace_ext4_da_update_reserve_space(inode, used);
|
|
@@ -1091,11 +1090,10 @@ void ext4_da_update_reserve_space(struct inode *inode,
|
|
|
|
|
|
/* Update per-inode reservations */
|
|
/* Update per-inode reservations */
|
|
ei->i_reserved_data_blocks -= used;
|
|
ei->i_reserved_data_blocks -= used;
|
|
- used += ei->i_allocated_meta_blocks;
|
|
|
|
ei->i_reserved_meta_blocks -= ei->i_allocated_meta_blocks;
|
|
ei->i_reserved_meta_blocks -= ei->i_allocated_meta_blocks;
|
|
- allocated_meta_blocks = ei->i_allocated_meta_blocks;
|
|
|
|
|
|
+ percpu_counter_sub(&sbi->s_dirtyblocks_counter,
|
|
|
|
+ used + ei->i_allocated_meta_blocks);
|
|
ei->i_allocated_meta_blocks = 0;
|
|
ei->i_allocated_meta_blocks = 0;
|
|
- percpu_counter_sub(&sbi->s_dirtyblocks_counter, used);
|
|
|
|
|
|
|
|
if (ei->i_reserved_data_blocks == 0) {
|
|
if (ei->i_reserved_data_blocks == 0) {
|
|
/*
|
|
/*
|
|
@@ -1103,31 +1101,23 @@ void ext4_da_update_reserve_space(struct inode *inode,
|
|
* only when we have written all of the delayed
|
|
* only when we have written all of the delayed
|
|
* allocation blocks.
|
|
* allocation blocks.
|
|
*/
|
|
*/
|
|
- mdb_free = ei->i_reserved_meta_blocks;
|
|
|
|
|
|
+ percpu_counter_sub(&sbi->s_dirtyblocks_counter,
|
|
|
|
+ ei->i_reserved_meta_blocks);
|
|
ei->i_reserved_meta_blocks = 0;
|
|
ei->i_reserved_meta_blocks = 0;
|
|
ei->i_da_metadata_calc_len = 0;
|
|
ei->i_da_metadata_calc_len = 0;
|
|
- percpu_counter_sub(&sbi->s_dirtyblocks_counter, mdb_free);
|
|
|
|
}
|
|
}
|
|
spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
|
|
spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
|
|
|
|
|
|
- /* Update quota subsystem */
|
|
|
|
- if (quota_claim) {
|
|
|
|
|
|
+ /* Update quota subsystem for data blocks */
|
|
|
|
+ if (quota_claim)
|
|
dquot_claim_block(inode, used);
|
|
dquot_claim_block(inode, used);
|
|
- if (mdb_free)
|
|
|
|
- dquot_release_reservation_block(inode, mdb_free);
|
|
|
|
- } else {
|
|
|
|
|
|
+ else {
|
|
/*
|
|
/*
|
|
* We did fallocate with an offset that is already delayed
|
|
* We did fallocate with an offset that is already delayed
|
|
* allocated. So on delayed allocated writeback we should
|
|
* allocated. So on delayed allocated writeback we should
|
|
- * not update the quota for allocated blocks. But then
|
|
|
|
- * converting an fallocate region to initialized region would
|
|
|
|
- * have caused a metadata allocation. So claim quota for
|
|
|
|
- * that
|
|
|
|
|
|
+ * not re-claim the quota for fallocated blocks.
|
|
*/
|
|
*/
|
|
- if (allocated_meta_blocks)
|
|
|
|
- dquot_claim_block(inode, allocated_meta_blocks);
|
|
|
|
- dquot_release_reservation_block(inode, mdb_free + used -
|
|
|
|
- allocated_meta_blocks);
|
|
|
|
|
|
+ dquot_release_reservation_block(inode, used);
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -1861,7 +1851,7 @@ static int ext4_da_reserve_space(struct inode *inode, sector_t lblock)
|
|
int retries = 0;
|
|
int retries = 0;
|
|
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
|
|
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
|
|
struct ext4_inode_info *ei = EXT4_I(inode);
|
|
struct ext4_inode_info *ei = EXT4_I(inode);
|
|
- unsigned long md_needed, md_reserved;
|
|
|
|
|
|
+ unsigned long md_needed;
|
|
int ret;
|
|
int ret;
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -1871,22 +1861,24 @@ static int ext4_da_reserve_space(struct inode *inode, sector_t lblock)
|
|
*/
|
|
*/
|
|
repeat:
|
|
repeat:
|
|
spin_lock(&ei->i_block_reservation_lock);
|
|
spin_lock(&ei->i_block_reservation_lock);
|
|
- md_reserved = ei->i_reserved_meta_blocks;
|
|
|
|
md_needed = ext4_calc_metadata_amount(inode, lblock);
|
|
md_needed = ext4_calc_metadata_amount(inode, lblock);
|
|
trace_ext4_da_reserve_space(inode, md_needed);
|
|
trace_ext4_da_reserve_space(inode, md_needed);
|
|
spin_unlock(&ei->i_block_reservation_lock);
|
|
spin_unlock(&ei->i_block_reservation_lock);
|
|
|
|
|
|
/*
|
|
/*
|
|
- * Make quota reservation here to prevent quota overflow
|
|
|
|
- * later. Real quota accounting is done at pages writeout
|
|
|
|
- * time.
|
|
|
|
|
|
+ * We will charge metadata quota at writeout time; this saves
|
|
|
|
+ * us from metadata over-estimation, though we may go over by
|
|
|
|
+ * a small amount in the end. Here we just reserve for data.
|
|
*/
|
|
*/
|
|
- ret = dquot_reserve_block(inode, md_needed + 1);
|
|
|
|
|
|
+ ret = dquot_reserve_block(inode, 1);
|
|
if (ret)
|
|
if (ret)
|
|
return ret;
|
|
return ret;
|
|
-
|
|
|
|
|
|
+ /*
|
|
|
|
+ * We do still charge estimated metadata to the sb though;
|
|
|
|
+ * we cannot afford to run out of free blocks.
|
|
|
|
+ */
|
|
if (ext4_claim_free_blocks(sbi, md_needed + 1)) {
|
|
if (ext4_claim_free_blocks(sbi, md_needed + 1)) {
|
|
- dquot_release_reservation_block(inode, md_needed + 1);
|
|
|
|
|
|
+ dquot_release_reservation_block(inode, 1);
|
|
if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
|
|
if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
|
|
yield();
|
|
yield();
|
|
goto repeat;
|
|
goto repeat;
|
|
@@ -1933,12 +1925,13 @@ static void ext4_da_release_space(struct inode *inode, int to_free)
|
|
* only when we have written all of the delayed
|
|
* only when we have written all of the delayed
|
|
* allocation blocks.
|
|
* allocation blocks.
|
|
*/
|
|
*/
|
|
- to_free += ei->i_reserved_meta_blocks;
|
|
|
|
|
|
+ percpu_counter_sub(&sbi->s_dirtyblocks_counter,
|
|
|
|
+ ei->i_reserved_meta_blocks);
|
|
ei->i_reserved_meta_blocks = 0;
|
|
ei->i_reserved_meta_blocks = 0;
|
|
ei->i_da_metadata_calc_len = 0;
|
|
ei->i_da_metadata_calc_len = 0;
|
|
}
|
|
}
|
|
|
|
|
|
- /* update fs dirty blocks counter */
|
|
|
|
|
|
+ /* update fs dirty data blocks counter */
|
|
percpu_counter_sub(&sbi->s_dirtyblocks_counter, to_free);
|
|
percpu_counter_sub(&sbi->s_dirtyblocks_counter, to_free);
|
|
|
|
|
|
spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
|
|
spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
|
|
@@ -3086,7 +3079,7 @@ static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
|
|
loff_t pos, unsigned len, unsigned flags,
|
|
loff_t pos, unsigned len, unsigned flags,
|
|
struct page **pagep, void **fsdata)
|
|
struct page **pagep, void **fsdata)
|
|
{
|
|
{
|
|
- int ret, retries = 0, quota_retries = 0;
|
|
|
|
|
|
+ int ret, retries = 0;
|
|
struct page *page;
|
|
struct page *page;
|
|
pgoff_t index;
|
|
pgoff_t index;
|
|
unsigned from, to;
|
|
unsigned from, to;
|
|
@@ -3145,22 +3138,6 @@ retry:
|
|
|
|
|
|
if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
|
|
if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
|
|
goto retry;
|
|
goto retry;
|
|
-
|
|
|
|
- if ((ret == -EDQUOT) &&
|
|
|
|
- EXT4_I(inode)->i_reserved_meta_blocks &&
|
|
|
|
- (quota_retries++ < 3)) {
|
|
|
|
- /*
|
|
|
|
- * Since we often over-estimate the number of meta
|
|
|
|
- * data blocks required, we may sometimes get a
|
|
|
|
- * spurios out of quota error even though there would
|
|
|
|
- * be enough space once we write the data blocks and
|
|
|
|
- * find out how many meta data blocks were _really_
|
|
|
|
- * required. So try forcing the inode write to see if
|
|
|
|
- * that helps.
|
|
|
|
- */
|
|
|
|
- write_inode_now(inode, (quota_retries == 3));
|
|
|
|
- goto retry;
|
|
|
|
- }
|
|
|
|
out:
|
|
out:
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|