|
@@ -2998,6 +2998,42 @@ EXPORT_SYMBOL_GPL(lockdep_init_map);
|
|
|
|
|
|
struct lock_class_key __lockdep_no_validate__;
|
|
|
|
|
|
+static int
|
|
|
+print_lock_nested_lock_not_held(struct task_struct *curr,
|
|
|
+ struct held_lock *hlock,
|
|
|
+ unsigned long ip)
|
|
|
+{
|
|
|
+ if (!debug_locks_off())
|
|
|
+ return 0;
|
|
|
+ if (debug_locks_silent)
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ printk("\n");
|
|
|
+ printk("==================================\n");
|
|
|
+ printk("[ BUG: Nested lock was not taken ]\n");
|
|
|
+ print_kernel_ident();
|
|
|
+ printk("----------------------------------\n");
|
|
|
+
|
|
|
+ printk("%s/%d is trying to lock:\n", curr->comm, task_pid_nr(curr));
|
|
|
+ print_lock(hlock);
|
|
|
+
|
|
|
+ printk("\nbut this task is not holding:\n");
|
|
|
+ printk("%s\n", hlock->nest_lock->name);
|
|
|
+
|
|
|
+ printk("\nstack backtrace:\n");
|
|
|
+ dump_stack();
|
|
|
+
|
|
|
+ printk("\nother info that might help us debug this:\n");
|
|
|
+ lockdep_print_held_locks(curr);
|
|
|
+
|
|
|
+ printk("\nstack backtrace:\n");
|
|
|
+ dump_stack();
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static int __lock_is_held(struct lockdep_map *lock);
|
|
|
+
|
|
|
/*
|
|
|
* This gets called for every mutex_lock*()/spin_lock*() operation.
|
|
|
* We maintain the dependency maps and validate the locking attempt:
|
|
@@ -3139,6 +3175,9 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
|
|
|
}
|
|
|
chain_key = iterate_chain_key(chain_key, id);
|
|
|
|
|
|
+ if (nest_lock && !__lock_is_held(nest_lock))
|
|
|
+ return print_lock_nested_lock_not_held(curr, hlock, ip);
|
|
|
+
|
|
|
if (!validate_chain(curr, lock, hlock, chain_head, chain_key))
|
|
|
return 0;
|
|
|
|