|
@@ -84,14 +84,14 @@ static struct hlist_head *inode_hashtable;
|
|
DEFINE_SPINLOCK(inode_lock);
|
|
DEFINE_SPINLOCK(inode_lock);
|
|
|
|
|
|
/*
|
|
/*
|
|
- * iprune_sem provides exclusion between the kswapd or try_to_free_pages
|
|
|
|
|
|
+ * iprune_mutex provides exclusion between the kswapd or try_to_free_pages
|
|
* icache shrinking path, and the umount path. Without this exclusion,
|
|
* icache shrinking path, and the umount path. Without this exclusion,
|
|
* by the time prune_icache calls iput for the inode whose pages it has
|
|
* by the time prune_icache calls iput for the inode whose pages it has
|
|
* been invalidating, or by the time it calls clear_inode & destroy_inode
|
|
* been invalidating, or by the time it calls clear_inode & destroy_inode
|
|
* from its final dispose_list, the struct super_block they refer to
|
|
* from its final dispose_list, the struct super_block they refer to
|
|
* (for inode->i_sb->s_op) may already have been freed and reused.
|
|
* (for inode->i_sb->s_op) may already have been freed and reused.
|
|
*/
|
|
*/
|
|
-DECLARE_MUTEX(iprune_sem);
|
|
|
|
|
|
+DEFINE_MUTEX(iprune_mutex);
|
|
|
|
|
|
/*
|
|
/*
|
|
* Statistics gathering..
|
|
* Statistics gathering..
|
|
@@ -319,7 +319,7 @@ static int invalidate_list(struct list_head *head, struct list_head *dispose)
|
|
/*
|
|
/*
|
|
* We can reschedule here without worrying about the list's
|
|
* We can reschedule here without worrying about the list's
|
|
* consistency because the per-sb list of inodes must not
|
|
* consistency because the per-sb list of inodes must not
|
|
- * change during umount anymore, and because iprune_sem keeps
|
|
|
|
|
|
+ * change during umount anymore, and because iprune_mutex keeps
|
|
* shrink_icache_memory() away.
|
|
* shrink_icache_memory() away.
|
|
*/
|
|
*/
|
|
cond_resched_lock(&inode_lock);
|
|
cond_resched_lock(&inode_lock);
|
|
@@ -355,14 +355,14 @@ int invalidate_inodes(struct super_block * sb)
|
|
int busy;
|
|
int busy;
|
|
LIST_HEAD(throw_away);
|
|
LIST_HEAD(throw_away);
|
|
|
|
|
|
- down(&iprune_sem);
|
|
|
|
|
|
+ mutex_lock(&iprune_mutex);
|
|
spin_lock(&inode_lock);
|
|
spin_lock(&inode_lock);
|
|
inotify_unmount_inodes(&sb->s_inodes);
|
|
inotify_unmount_inodes(&sb->s_inodes);
|
|
busy = invalidate_list(&sb->s_inodes, &throw_away);
|
|
busy = invalidate_list(&sb->s_inodes, &throw_away);
|
|
spin_unlock(&inode_lock);
|
|
spin_unlock(&inode_lock);
|
|
|
|
|
|
dispose_list(&throw_away);
|
|
dispose_list(&throw_away);
|
|
- up(&iprune_sem);
|
|
|
|
|
|
+ mutex_unlock(&iprune_mutex);
|
|
|
|
|
|
return busy;
|
|
return busy;
|
|
}
|
|
}
|
|
@@ -377,7 +377,7 @@ int __invalidate_device(struct block_device *bdev)
|
|
if (sb) {
|
|
if (sb) {
|
|
/*
|
|
/*
|
|
* no need to lock the super, get_super holds the
|
|
* no need to lock the super, get_super holds the
|
|
- * read semaphore so the filesystem cannot go away
|
|
|
|
|
|
+ * read mutex so the filesystem cannot go away
|
|
* under us (->put_super runs with the write lock
|
|
* under us (->put_super runs with the write lock
|
|
* hold).
|
|
* hold).
|
|
*/
|
|
*/
|
|
@@ -423,7 +423,7 @@ static void prune_icache(int nr_to_scan)
|
|
int nr_scanned;
|
|
int nr_scanned;
|
|
unsigned long reap = 0;
|
|
unsigned long reap = 0;
|
|
|
|
|
|
- down(&iprune_sem);
|
|
|
|
|
|
+ mutex_lock(&iprune_mutex);
|
|
spin_lock(&inode_lock);
|
|
spin_lock(&inode_lock);
|
|
for (nr_scanned = 0; nr_scanned < nr_to_scan; nr_scanned++) {
|
|
for (nr_scanned = 0; nr_scanned < nr_to_scan; nr_scanned++) {
|
|
struct inode *inode;
|
|
struct inode *inode;
|
|
@@ -459,7 +459,7 @@ static void prune_icache(int nr_to_scan)
|
|
spin_unlock(&inode_lock);
|
|
spin_unlock(&inode_lock);
|
|
|
|
|
|
dispose_list(&freeable);
|
|
dispose_list(&freeable);
|
|
- up(&iprune_sem);
|
|
|
|
|
|
+ mutex_unlock(&iprune_mutex);
|
|
|
|
|
|
if (current_is_kswapd())
|
|
if (current_is_kswapd())
|
|
mod_page_state(kswapd_inodesteal, reap);
|
|
mod_page_state(kswapd_inodesteal, reap);
|