|
@@ -592,6 +592,8 @@ static void free_unmap_vmap_area_addr(unsigned long addr)
|
|
|
|
|
|
#define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE)
|
|
|
|
|
|
+static bool vmap_initialized __read_mostly = false;
|
|
|
+
|
|
|
struct vmap_block_queue {
|
|
|
spinlock_t lock;
|
|
|
struct list_head free;
|
|
@@ -828,6 +830,9 @@ void vm_unmap_aliases(void)
|
|
|
int cpu;
|
|
|
int flush = 0;
|
|
|
|
|
|
+ if (unlikely(!vmap_initialized))
|
|
|
+ return;
|
|
|
+
|
|
|
for_each_possible_cpu(cpu) {
|
|
|
struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
|
|
|
struct vmap_block *vb;
|
|
@@ -942,6 +947,8 @@ void __init vmalloc_init(void)
|
|
|
INIT_LIST_HEAD(&vbq->dirty);
|
|
|
vbq->nr_dirty = 0;
|
|
|
}
|
|
|
+
|
|
|
+ vmap_initialized = true;
|
|
|
}
|
|
|
|
|
|
void unmap_kernel_range(unsigned long addr, unsigned long size)
|