|
@@ -14,6 +14,7 @@
|
|
|
#include <linux/highmem.h>
|
|
|
#include <linux/slab.h>
|
|
|
#include <linux/spinlock.h>
|
|
|
+#include <linux/mutex.h>
|
|
|
#include <linux/interrupt.h>
|
|
|
#include <linux/proc_fs.h>
|
|
|
#include <linux/seq_file.h>
|
|
@@ -473,7 +474,7 @@ static atomic_t vmap_lazy_nr = ATOMIC_INIT(0);
|
|
|
static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
|
|
|
int sync, int force_flush)
|
|
|
{
|
|
|
- static DEFINE_SPINLOCK(purge_lock);
|
|
|
+ static DEFINE_MUTEX(purge_lock);
|
|
|
LIST_HEAD(valist);
|
|
|
struct vmap_area *va;
|
|
|
int nr = 0;
|
|
@@ -484,10 +485,10 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
|
|
|
* the case that isn't actually used at the moment anyway.
|
|
|
*/
|
|
|
if (!sync && !force_flush) {
|
|
|
- if (!spin_trylock(&purge_lock))
|
|
|
+ if (!mutex_trylock(&purge_lock))
|
|
|
return;
|
|
|
} else
|
|
|
- spin_lock(&purge_lock);
|
|
|
+ mutex_lock(&purge_lock);
|
|
|
|
|
|
rcu_read_lock();
|
|
|
list_for_each_entry_rcu(va, &vmap_area_list, list) {
|
|
@@ -519,7 +520,7 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
|
|
|
__free_vmap_area(va);
|
|
|
spin_unlock(&vmap_area_lock);
|
|
|
}
|
|
|
- spin_unlock(&purge_lock);
|
|
|
+ mutex_unlock(&purge_lock);
|
|
|
}
|
|
|
|
|
|
/*
|