Prechádzať zdrojové kódy

vfs: fix possible deadlock in ext2, ext3, ext4 when using xattrs

mb_cache_entry_alloc() was allocating cache entries with GFP_KERNEL.  But
filesystems are calling this function while holding xattr_sem so possible
recursion into the fs violates locking ordering of xattr_sem and transaction
start / i_mutex for ext2-4.  Change mb_cache_entry_alloc() so that filesystems
can specify desired gfp mask and use GFP_NOFS from all of them.

Signed-off-by: Jan Kara <jack@suse.cz>
Reported-by: Dave Jones <davej@redhat.com>
Cc: <linux-ext4@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Jan Kara 17 rokov pred
rodič
commit
335e92e8a5
5 zmenil súbory, kde vykonal 6 pridanie a 6 odobranie
  1. 1 1
      fs/ext2/xattr.c
  2. 1 1
      fs/ext3/xattr.c
  3. 1 1
      fs/ext4/xattr.c
  4. 2 2
      fs/mbcache.c
  5. 1 1
      include/linux/mbcache.h

+ 1 - 1
fs/ext2/xattr.c

@@ -835,7 +835,7 @@ ext2_xattr_cache_insert(struct buffer_head *bh)
 	struct mb_cache_entry *ce;
 	struct mb_cache_entry *ce;
 	int error;
 	int error;
 
 
-	ce = mb_cache_entry_alloc(ext2_xattr_cache);
+	ce = mb_cache_entry_alloc(ext2_xattr_cache, GFP_NOFS);
 	if (!ce)
 	if (!ce)
 		return -ENOMEM;
 		return -ENOMEM;
 	error = mb_cache_entry_insert(ce, bh->b_bdev, bh->b_blocknr, &hash);
 	error = mb_cache_entry_insert(ce, bh->b_bdev, bh->b_blocknr, &hash);

+ 1 - 1
fs/ext3/xattr.c

@@ -1126,7 +1126,7 @@ ext3_xattr_cache_insert(struct buffer_head *bh)
 	struct mb_cache_entry *ce;
 	struct mb_cache_entry *ce;
 	int error;
 	int error;
 
 
-	ce = mb_cache_entry_alloc(ext3_xattr_cache);
+	ce = mb_cache_entry_alloc(ext3_xattr_cache, GFP_NOFS);
 	if (!ce) {
 	if (!ce) {
 		ea_bdebug(bh, "out of memory");
 		ea_bdebug(bh, "out of memory");
 		return;
 		return;

+ 1 - 1
fs/ext4/xattr.c

@@ -1386,7 +1386,7 @@ ext4_xattr_cache_insert(struct buffer_head *bh)
 	struct mb_cache_entry *ce;
 	struct mb_cache_entry *ce;
 	int error;
 	int error;
 
 
-	ce = mb_cache_entry_alloc(ext4_xattr_cache);
+	ce = mb_cache_entry_alloc(ext4_xattr_cache, GFP_NOFS);
 	if (!ce) {
 	if (!ce) {
 		ea_bdebug(bh, "out of memory");
 		ea_bdebug(bh, "out of memory");
 		return;
 		return;

+ 2 - 2
fs/mbcache.c

@@ -399,11 +399,11 @@ mb_cache_destroy(struct mb_cache *cache)
  * if no more memory was available.
  * if no more memory was available.
  */
  */
 struct mb_cache_entry *
 struct mb_cache_entry *
-mb_cache_entry_alloc(struct mb_cache *cache)
+mb_cache_entry_alloc(struct mb_cache *cache, gfp_t gfp_flags)
 {
 {
 	struct mb_cache_entry *ce;
 	struct mb_cache_entry *ce;
 
 
-	ce = kmem_cache_alloc(cache->c_entry_cache, GFP_KERNEL);
+	ce = kmem_cache_alloc(cache->c_entry_cache, gfp_flags);
 	if (ce) {
 	if (ce) {
 		atomic_inc(&cache->c_entry_count);
 		atomic_inc(&cache->c_entry_count);
 		INIT_LIST_HEAD(&ce->e_lru_list);
 		INIT_LIST_HEAD(&ce->e_lru_list);

+ 1 - 1
include/linux/mbcache.h

@@ -34,7 +34,7 @@ void mb_cache_destroy(struct mb_cache *);
 
 
 /* Functions on cache entries */
 /* Functions on cache entries */
 
 
-struct mb_cache_entry *mb_cache_entry_alloc(struct mb_cache *);
+struct mb_cache_entry *mb_cache_entry_alloc(struct mb_cache *, gfp_t);
 int mb_cache_entry_insert(struct mb_cache_entry *, struct block_device *,
 int mb_cache_entry_insert(struct mb_cache_entry *, struct block_device *,
 			  sector_t, unsigned int[]);
 			  sector_t, unsigned int[]);
 void mb_cache_entry_release(struct mb_cache_entry *);
 void mb_cache_entry_release(struct mb_cache_entry *);