|
@@ -639,6 +639,16 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
|
|
|
}
|
|
|
#endif
|
|
|
|
|
|
+ if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
|
|
|
+ debug_locks_off();
|
|
|
+ printk(KERN_ERR
|
|
|
+ "BUG: looking up invalid subclass: %u\n", subclass);
|
|
|
+ printk(KERN_ERR
|
|
|
+ "turning off the locking correctness validator.\n");
|
|
|
+ dump_stack();
|
|
|
+ return NULL;
|
|
|
+ }
|
|
|
+
|
|
|
/*
|
|
|
* Static locks do not have their class-keys yet - for them the key
|
|
|
* is the lock object itself:
|
|
@@ -774,7 +784,9 @@ out_unlock_set:
|
|
|
raw_local_irq_restore(flags);
|
|
|
|
|
|
if (!subclass || force)
|
|
|
- lock->class_cache = class;
|
|
|
+ lock->class_cache[0] = class;
|
|
|
+ else if (subclass < NR_LOCKDEP_CACHING_CLASSES)
|
|
|
+ lock->class_cache[subclass] = class;
|
|
|
|
|
|
if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass))
|
|
|
return NULL;
|
|
@@ -2679,7 +2691,11 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
|
|
|
void lockdep_init_map(struct lockdep_map *lock, const char *name,
|
|
|
struct lock_class_key *key, int subclass)
|
|
|
{
|
|
|
- lock->class_cache = NULL;
|
|
|
+ int i;
|
|
|
+
|
|
|
+ for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
|
|
|
+ lock->class_cache[i] = NULL;
|
|
|
+
|
|
|
#ifdef CONFIG_LOCK_STAT
|
|
|
lock->cpu = raw_smp_processor_id();
|
|
|
#endif
|
|
@@ -2739,21 +2755,13 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
|
|
|
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
|
|
|
return 0;
|
|
|
|
|
|
- if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
|
|
|
- debug_locks_off();
|
|
|
- printk("BUG: MAX_LOCKDEP_SUBCLASSES too low!\n");
|
|
|
- printk("turning off the locking correctness validator.\n");
|
|
|
- dump_stack();
|
|
|
- return 0;
|
|
|
- }
|
|
|
-
|
|
|
if (lock->key == &__lockdep_no_validate__)
|
|
|
check = 1;
|
|
|
|
|
|
- if (!subclass)
|
|
|
- class = lock->class_cache;
|
|
|
+ if (subclass < NR_LOCKDEP_CACHING_CLASSES)
|
|
|
+ class = lock->class_cache[subclass];
|
|
|
/*
|
|
|
- * Not cached yet or subclass?
|
|
|
+ * Not cached?
|
|
|
*/
|
|
|
if (unlikely(!class)) {
|
|
|
class = register_lock_class(lock, subclass, 0);
|
|
@@ -2918,7 +2926,7 @@ static int match_held_lock(struct held_lock *hlock, struct lockdep_map *lock)
|
|
|
return 1;
|
|
|
|
|
|
if (hlock->references) {
|
|
|
- struct lock_class *class = lock->class_cache;
|
|
|
+ struct lock_class *class = lock->class_cache[0];
|
|
|
|
|
|
if (!class)
|
|
|
class = look_up_lock_class(lock, 0);
|
|
@@ -3559,7 +3567,12 @@ void lockdep_reset_lock(struct lockdep_map *lock)
|
|
|
if (list_empty(head))
|
|
|
continue;
|
|
|
list_for_each_entry_safe(class, next, head, hash_entry) {
|
|
|
- if (unlikely(class == lock->class_cache)) {
|
|
|
+ int match = 0;
|
|
|
+
|
|
|
+ for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++)
|
|
|
+ match |= class == lock->class_cache[j];
|
|
|
+
|
|
|
+ if (unlikely(match)) {
|
|
|
if (debug_locks_off_graph_unlock())
|
|
|
WARN_ON(1);
|
|
|
goto out_restore;
|
|
@@ -3775,7 +3788,7 @@ EXPORT_SYMBOL_GPL(debug_show_all_locks);
|
|
|
* Careful: only use this function if you are sure that
|
|
|
* the task cannot run in parallel!
|
|
|
*/
|
|
|
-void __debug_show_held_locks(struct task_struct *task)
|
|
|
+void debug_show_held_locks(struct task_struct *task)
|
|
|
{
|
|
|
if (unlikely(!debug_locks)) {
|
|
|
printk("INFO: lockdep is turned off.\n");
|
|
@@ -3783,12 +3796,6 @@ void __debug_show_held_locks(struct task_struct *task)
|
|
|
}
|
|
|
lockdep_print_held_locks(task);
|
|
|
}
|
|
|
-EXPORT_SYMBOL_GPL(__debug_show_held_locks);
|
|
|
-
|
|
|
-void debug_show_held_locks(struct task_struct *task)
|
|
|
-{
|
|
|
- __debug_show_held_locks(task);
|
|
|
-}
|
|
|
EXPORT_SYMBOL_GPL(debug_show_held_locks);
|
|
|
|
|
|
void lockdep_sys_exit(void)
|