|
@@ -31,8 +31,6 @@
|
|
|
#include <asm/tlbflush.h>
|
|
|
#include <asm/shmparam.h>
|
|
|
|
|
|
-bool vmap_lazy_unmap __read_mostly = true;
|
|
|
-
|
|
|
/*** Page table manipulation functions ***/
|
|
|
|
|
|
static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
|
|
@@ -503,9 +501,6 @@ static unsigned long lazy_max_pages(void)
|
|
|
{
|
|
|
unsigned int log;
|
|
|
|
|
|
- if (!vmap_lazy_unmap)
|
|
|
- return 0;
|
|
|
-
|
|
|
log = fls(num_online_cpus());
|
|
|
|
|
|
return log * (32UL * 1024 * 1024 / PAGE_SIZE);
|
|
@@ -566,7 +561,6 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
|
|
|
if (va->va_end > *end)
|
|
|
*end = va->va_end;
|
|
|
nr += (va->va_end - va->va_start) >> PAGE_SHIFT;
|
|
|
- unmap_vmap_area(va);
|
|
|
list_add_tail(&va->purge_list, &valist);
|
|
|
va->flags |= VM_LAZY_FREEING;
|
|
|
va->flags &= ~VM_LAZY_FREE;
|
|
@@ -611,10 +605,11 @@ static void purge_vmap_area_lazy(void)
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * Free and unmap a vmap area, caller ensuring flush_cache_vunmap had been
|
|
|
- * called for the correct range previously.
|
|
|
+ * Free a vmap area, caller ensuring that the area has been unmapped
|
|
|
+ * and flush_cache_vunmap had been called for the correct range
|
|
|
+ * previously.
|
|
|
*/
|
|
|
-static void free_unmap_vmap_area_noflush(struct vmap_area *va)
|
|
|
+static void free_vmap_area_noflush(struct vmap_area *va)
|
|
|
{
|
|
|
va->flags |= VM_LAZY_FREE;
|
|
|
atomic_add((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr);
|
|
@@ -622,6 +617,16 @@ static void free_unmap_vmap_area_noflush(struct vmap_area *va)
|
|
|
try_purge_vmap_area_lazy();
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * Free and unmap a vmap area, caller ensuring flush_cache_vunmap had been
|
|
|
+ * called for the correct range previously.
|
|
|
+ */
|
|
|
+static void free_unmap_vmap_area_noflush(struct vmap_area *va)
|
|
|
+{
|
|
|
+ unmap_vmap_area(va);
|
|
|
+ free_vmap_area_noflush(va);
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Free and unmap a vmap area
|
|
|
*/
|
|
@@ -798,7 +803,7 @@ static void free_vmap_block(struct vmap_block *vb)
|
|
|
spin_unlock(&vmap_block_tree_lock);
|
|
|
BUG_ON(tmp != vb);
|
|
|
|
|
|
- free_unmap_vmap_area_noflush(vb->va);
|
|
|
+ free_vmap_area_noflush(vb->va);
|
|
|
call_rcu(&vb->rcu_head, rcu_free_vb);
|
|
|
}
|
|
|
|
|
@@ -936,6 +941,8 @@ static void vb_free(const void *addr, unsigned long size)
|
|
|
rcu_read_unlock();
|
|
|
BUG_ON(!vb);
|
|
|
|
|
|
+ vunmap_page_range((unsigned long)addr, (unsigned long)addr + size);
|
|
|
+
|
|
|
spin_lock(&vb->lock);
|
|
|
BUG_ON(bitmap_allocate_region(vb->dirty_map, offset >> PAGE_SHIFT, order));
|
|
|
|
|
@@ -988,7 +995,6 @@ void vm_unmap_aliases(void)
|
|
|
|
|
|
s = vb->va->va_start + (i << PAGE_SHIFT);
|
|
|
e = vb->va->va_start + (j << PAGE_SHIFT);
|
|
|
- vunmap_page_range(s, e);
|
|
|
flush = 1;
|
|
|
|
|
|
if (s < start)
|