|
@@ -122,6 +122,58 @@ const struct xattr_handler *ext4_xattr_handlers[] = {
|
|
|
NULL
|
|
|
};
|
|
|
|
|
|
+static __le32 ext4_xattr_block_csum(struct inode *inode,
|
|
|
+ sector_t block_nr,
|
|
|
+ struct ext4_xattr_header *hdr)
|
|
|
+{
|
|
|
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
|
|
|
+ struct ext4_inode_info *ei = EXT4_I(inode);
|
|
|
+ __u32 csum, old;
|
|
|
+
|
|
|
+ old = hdr->h_checksum;
|
|
|
+ hdr->h_checksum = 0;
|
|
|
+ if (le32_to_cpu(hdr->h_refcount) != 1) {
|
|
|
+ block_nr = cpu_to_le64(block_nr);
|
|
|
+ csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&block_nr,
|
|
|
+ sizeof(block_nr));
|
|
|
+ } else
|
|
|
+ csum = ei->i_csum_seed;
|
|
|
+ csum = ext4_chksum(sbi, csum, (__u8 *)hdr,
|
|
|
+ EXT4_BLOCK_SIZE(inode->i_sb));
|
|
|
+ hdr->h_checksum = old;
|
|
|
+ return cpu_to_le32(csum);
|
|
|
+}
|
|
|
+
|
|
|
+static int ext4_xattr_block_csum_verify(struct inode *inode,
|
|
|
+ sector_t block_nr,
|
|
|
+ struct ext4_xattr_header *hdr)
|
|
|
+{
|
|
|
+ if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
|
|
|
+ EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) &&
|
|
|
+ (hdr->h_checksum != ext4_xattr_block_csum(inode, block_nr, hdr)))
|
|
|
+ return 0;
|
|
|
+ return 1;
|
|
|
+}
|
|
|
+
|
|
|
+static void ext4_xattr_block_csum_set(struct inode *inode,
|
|
|
+ sector_t block_nr,
|
|
|
+ struct ext4_xattr_header *hdr)
|
|
|
+{
|
|
|
+ if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
|
|
|
+ EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
|
|
|
+ return;
|
|
|
+
|
|
|
+ hdr->h_checksum = ext4_xattr_block_csum(inode, block_nr, hdr);
|
|
|
+}
|
|
|
+
|
|
|
+static inline int ext4_handle_dirty_xattr_block(handle_t *handle,
|
|
|
+ struct inode *inode,
|
|
|
+ struct buffer_head *bh)
|
|
|
+{
|
|
|
+ ext4_xattr_block_csum_set(inode, bh->b_blocknr, BHDR(bh));
|
|
|
+ return ext4_handle_dirty_metadata(handle, inode, bh);
|
|
|
+}
|
|
|
+
|
|
|
static inline const struct xattr_handler *
|
|
|
ext4_xattr_handler(int name_index)
|
|
|
{
|
|
@@ -156,12 +208,22 @@ ext4_xattr_check_names(struct ext4_xattr_entry *entry, void *end)
|
|
|
}
|
|
|
|
|
|
static inline int
|
|
|
-ext4_xattr_check_block(struct buffer_head *bh)
|
|
|
+ext4_xattr_check_block(struct inode *inode, struct buffer_head *bh)
|
|
|
{
|
|
|
+ int error;
|
|
|
+
|
|
|
+ if (buffer_verified(bh))
|
|
|
+ return 0;
|
|
|
+
|
|
|
if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) ||
|
|
|
BHDR(bh)->h_blocks != cpu_to_le32(1))
|
|
|
return -EIO;
|
|
|
- return ext4_xattr_check_names(BFIRST(bh), bh->b_data + bh->b_size);
|
|
|
+ if (!ext4_xattr_block_csum_verify(inode, bh->b_blocknr, BHDR(bh)))
|
|
|
+ return -EIO;
|
|
|
+ error = ext4_xattr_check_names(BFIRST(bh), bh->b_data + bh->b_size);
|
|
|
+ if (!error)
|
|
|
+ set_buffer_verified(bh);
|
|
|
+ return error;
|
|
|
}
|
|
|
|
|
|
static inline int
|
|
@@ -224,7 +286,7 @@ ext4_xattr_block_get(struct inode *inode, int name_index, const char *name,
|
|
|
goto cleanup;
|
|
|
ea_bdebug(bh, "b_count=%d, refcount=%d",
|
|
|
atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
|
|
|
- if (ext4_xattr_check_block(bh)) {
|
|
|
+ if (ext4_xattr_check_block(inode, bh)) {
|
|
|
bad_block:
|
|
|
EXT4_ERROR_INODE(inode, "bad block %llu",
|
|
|
EXT4_I(inode)->i_file_acl);
|
|
@@ -369,7 +431,7 @@ ext4_xattr_block_list(struct dentry *dentry, char *buffer, size_t buffer_size)
|
|
|
goto cleanup;
|
|
|
ea_bdebug(bh, "b_count=%d, refcount=%d",
|
|
|
atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
|
|
|
- if (ext4_xattr_check_block(bh)) {
|
|
|
+ if (ext4_xattr_check_block(inode, bh)) {
|
|
|
EXT4_ERROR_INODE(inode, "bad block %llu",
|
|
|
EXT4_I(inode)->i_file_acl);
|
|
|
error = -EIO;
|
|
@@ -492,7 +554,7 @@ ext4_xattr_release_block(handle_t *handle, struct inode *inode,
|
|
|
if (ce)
|
|
|
mb_cache_entry_release(ce);
|
|
|
unlock_buffer(bh);
|
|
|
- error = ext4_handle_dirty_metadata(handle, inode, bh);
|
|
|
+ error = ext4_handle_dirty_xattr_block(handle, inode, bh);
|
|
|
if (IS_SYNC(inode))
|
|
|
ext4_handle_sync(handle);
|
|
|
dquot_free_block(inode, 1);
|
|
@@ -662,7 +724,7 @@ ext4_xattr_block_find(struct inode *inode, struct ext4_xattr_info *i,
|
|
|
ea_bdebug(bs->bh, "b_count=%d, refcount=%d",
|
|
|
atomic_read(&(bs->bh->b_count)),
|
|
|
le32_to_cpu(BHDR(bs->bh)->h_refcount));
|
|
|
- if (ext4_xattr_check_block(bs->bh)) {
|
|
|
+ if (ext4_xattr_check_block(inode, bs->bh)) {
|
|
|
EXT4_ERROR_INODE(inode, "bad block %llu",
|
|
|
EXT4_I(inode)->i_file_acl);
|
|
|
error = -EIO;
|
|
@@ -725,9 +787,9 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
|
|
|
if (error == -EIO)
|
|
|
goto bad_block;
|
|
|
if (!error)
|
|
|
- error = ext4_handle_dirty_metadata(handle,
|
|
|
- inode,
|
|
|
- bs->bh);
|
|
|
+ error = ext4_handle_dirty_xattr_block(handle,
|
|
|
+ inode,
|
|
|
+ bs->bh);
|
|
|
if (error)
|
|
|
goto cleanup;
|
|
|
goto inserted;
|
|
@@ -796,9 +858,9 @@ inserted:
|
|
|
ea_bdebug(new_bh, "reusing; refcount now=%d",
|
|
|
le32_to_cpu(BHDR(new_bh)->h_refcount));
|
|
|
unlock_buffer(new_bh);
|
|
|
- error = ext4_handle_dirty_metadata(handle,
|
|
|
- inode,
|
|
|
- new_bh);
|
|
|
+ error = ext4_handle_dirty_xattr_block(handle,
|
|
|
+ inode,
|
|
|
+ new_bh);
|
|
|
if (error)
|
|
|
goto cleanup_dquot;
|
|
|
}
|
|
@@ -855,8 +917,8 @@ getblk_failed:
|
|
|
set_buffer_uptodate(new_bh);
|
|
|
unlock_buffer(new_bh);
|
|
|
ext4_xattr_cache_insert(new_bh);
|
|
|
- error = ext4_handle_dirty_metadata(handle,
|
|
|
- inode, new_bh);
|
|
|
+ error = ext4_handle_dirty_xattr_block(handle,
|
|
|
+ inode, new_bh);
|
|
|
if (error)
|
|
|
goto cleanup;
|
|
|
}
|
|
@@ -1193,7 +1255,7 @@ retry:
|
|
|
error = -EIO;
|
|
|
if (!bh)
|
|
|
goto cleanup;
|
|
|
- if (ext4_xattr_check_block(bh)) {
|
|
|
+ if (ext4_xattr_check_block(inode, bh)) {
|
|
|
EXT4_ERROR_INODE(inode, "bad block %llu",
|
|
|
EXT4_I(inode)->i_file_acl);
|
|
|
error = -EIO;
|