|
@@ -295,6 +295,20 @@ static void destroy_inode(struct inode *inode)
|
|
|
call_rcu(&inode->i_rcu, i_callback);
|
|
|
}
|
|
|
|
|
|
+void address_space_init_once(struct address_space *mapping)
|
|
|
+{
|
|
|
+ memset(mapping, 0, sizeof(*mapping));
|
|
|
+ INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC);
|
|
|
+ spin_lock_init(&mapping->tree_lock);
|
|
|
+ spin_lock_init(&mapping->i_mmap_lock);
|
|
|
+ INIT_LIST_HEAD(&mapping->private_list);
|
|
|
+ spin_lock_init(&mapping->private_lock);
|
|
|
+ INIT_RAW_PRIO_TREE_ROOT(&mapping->i_mmap);
|
|
|
+ INIT_LIST_HEAD(&mapping->i_mmap_nonlinear);
|
|
|
+ mutex_init(&mapping->unmap_mutex);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(address_space_init_once);
|
|
|
+
|
|
|
/*
|
|
|
* These are initializations that only need to be done
|
|
|
* once, because the fields are idempotent across use
|
|
@@ -308,13 +322,7 @@ void inode_init_once(struct inode *inode)
|
|
|
INIT_LIST_HEAD(&inode->i_devices);
|
|
|
INIT_LIST_HEAD(&inode->i_wb_list);
|
|
|
INIT_LIST_HEAD(&inode->i_lru);
|
|
|
- INIT_RADIX_TREE(&inode->i_data.page_tree, GFP_ATOMIC);
|
|
|
- spin_lock_init(&inode->i_data.tree_lock);
|
|
|
- spin_lock_init(&inode->i_data.i_mmap_lock);
|
|
|
- INIT_LIST_HEAD(&inode->i_data.private_list);
|
|
|
- spin_lock_init(&inode->i_data.private_lock);
|
|
|
- INIT_RAW_PRIO_TREE_ROOT(&inode->i_data.i_mmap);
|
|
|
- INIT_LIST_HEAD(&inode->i_data.i_mmap_nonlinear);
|
|
|
+ address_space_init_once(&inode->i_data);
|
|
|
i_size_ordered_init(inode);
|
|
|
#ifdef CONFIG_FSNOTIFY
|
|
|
INIT_HLIST_HEAD(&inode->i_fsnotify_marks);
|