|
@@ -797,9 +797,13 @@ static void __init log_early(int op_type, const void *ptr, size_t size,
|
|
|
unsigned long flags;
|
|
|
struct early_log *log;
|
|
|
|
|
|
+ if (atomic_read(&kmemleak_error)) {
|
|
|
+ /* kmemleak stopped recording, just count the requests */
|
|
|
+ crt_early_log++;
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
if (crt_early_log >= ARRAY_SIZE(early_log)) {
|
|
|
- pr_warning("Early log buffer exceeded, "
|
|
|
- "please increase DEBUG_KMEMLEAK_EARLY_LOG_SIZE\n");
|
|
|
kmemleak_disable();
|
|
|
return;
|
|
|
}
|
|
@@ -1634,7 +1638,6 @@ static void kmemleak_disable(void)
|
|
|
return;
|
|
|
|
|
|
/* stop any memory operation tracing */
|
|
|
- atomic_set(&kmemleak_early_log, 0);
|
|
|
atomic_set(&kmemleak_enabled, 0);
|
|
|
|
|
|
/* check whether it is too early for a kernel thread */
|
|
@@ -1694,12 +1697,18 @@ void __init kmemleak_init(void)
|
|
|
scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE);
|
|
|
INIT_PRIO_TREE_ROOT(&object_tree_root);
|
|
|
|
|
|
+ if (crt_early_log >= ARRAY_SIZE(early_log))
|
|
|
+ pr_warning("Early log buffer exceeded (%d), please increase "
|
|
|
+ "DEBUG_KMEMLEAK_EARLY_LOG_SIZE\n", crt_early_log);
|
|
|
+
|
|
|
/* the kernel is still in UP mode, so disabling the IRQs is enough */
|
|
|
local_irq_save(flags);
|
|
|
- if (!atomic_read(&kmemleak_error)) {
|
|
|
+ atomic_set(&kmemleak_early_log, 0);
|
|
|
+ if (atomic_read(&kmemleak_error)) {
|
|
|
+ local_irq_restore(flags);
|
|
|
+ return;
|
|
|
+ } else
|
|
|
atomic_set(&kmemleak_enabled, 1);
|
|
|
- atomic_set(&kmemleak_early_log, 0);
|
|
|
- }
|
|
|
local_irq_restore(flags);
|
|
|
|
|
|
/*
|