|
@@ -805,7 +805,8 @@ static struct lock_list *alloc_list_entry(void)
|
|
|
* Add a new dependency to the head of the list:
|
|
|
*/
|
|
|
static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
|
|
|
- struct list_head *head, unsigned long ip, int distance)
|
|
|
+ struct list_head *head, unsigned long ip,
|
|
|
+ int distance, struct stack_trace *trace)
|
|
|
{
|
|
|
struct lock_list *entry;
|
|
|
/*
|
|
@@ -816,11 +817,9 @@ static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
|
|
|
if (!entry)
|
|
|
return 0;
|
|
|
|
|
|
- if (!save_trace(&entry->trace))
|
|
|
- return 0;
|
|
|
-
|
|
|
entry->class = this;
|
|
|
entry->distance = distance;
|
|
|
+ entry->trace = *trace;
|
|
|
/*
|
|
|
* Since we never remove from the dependency list, the list can
|
|
|
* be walked lockless by other CPUs, it's only allocation
|
|
@@ -1622,12 +1621,20 @@ check_deadlock(struct task_struct *curr, struct held_lock *next,
|
|
|
*/
|
|
|
static int
|
|
|
check_prev_add(struct task_struct *curr, struct held_lock *prev,
|
|
|
- struct held_lock *next, int distance)
|
|
|
+ struct held_lock *next, int distance, int trylock_loop)
|
|
|
{
|
|
|
struct lock_list *entry;
|
|
|
int ret;
|
|
|
struct lock_list this;
|
|
|
struct lock_list *uninitialized_var(target_entry);
|
|
|
+ /*
|
|
|
+ * Static variable, serialized by the graph_lock().
|
|
|
+ *
|
|
|
+ * We use this static variable to save the stack trace in case
|
|
|
+ * we call into this function multiple times due to encountering
|
|
|
+ * trylocks in the held lock stack.
|
|
|
+ */
|
|
|
+ static struct stack_trace trace;
|
|
|
|
|
|
/*
|
|
|
* Prove that the new <prev> -> <next> dependency would not
|
|
@@ -1675,20 +1682,23 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+ if (!trylock_loop && !save_trace(&trace))
|
|
|
+ return 0;
|
|
|
+
|
|
|
/*
|
|
|
* Ok, all validations passed, add the new lock
|
|
|
* to the previous lock's dependency list:
|
|
|
*/
|
|
|
ret = add_lock_to_list(hlock_class(prev), hlock_class(next),
|
|
|
&hlock_class(prev)->locks_after,
|
|
|
- next->acquire_ip, distance);
|
|
|
+ next->acquire_ip, distance, &trace);
|
|
|
|
|
|
if (!ret)
|
|
|
return 0;
|
|
|
|
|
|
ret = add_lock_to_list(hlock_class(next), hlock_class(prev),
|
|
|
&hlock_class(next)->locks_before,
|
|
|
- next->acquire_ip, distance);
|
|
|
+ next->acquire_ip, distance, &trace);
|
|
|
if (!ret)
|
|
|
return 0;
|
|
|
|
|
@@ -1718,6 +1728,7 @@ static int
|
|
|
check_prevs_add(struct task_struct *curr, struct held_lock *next)
|
|
|
{
|
|
|
int depth = curr->lockdep_depth;
|
|
|
+ int trylock_loop = 0;
|
|
|
struct held_lock *hlock;
|
|
|
|
|
|
/*
|
|
@@ -1743,7 +1754,8 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next)
|
|
|
* added:
|
|
|
*/
|
|
|
if (hlock->read != 2) {
|
|
|
- if (!check_prev_add(curr, hlock, next, distance))
|
|
|
+ if (!check_prev_add(curr, hlock, next,
|
|
|
+ distance, trylock_loop))
|
|
|
return 0;
|
|
|
/*
|
|
|
* Stop after the first non-trylock entry,
|
|
@@ -1766,6 +1778,7 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next)
|
|
|
if (curr->held_locks[depth].irq_context !=
|
|
|
curr->held_locks[depth-1].irq_context)
|
|
|
break;
|
|
|
+ trylock_loop = 1;
|
|
|
}
|
|
|
return 1;
|
|
|
out_bug:
|