|
@@ -2660,6 +2660,55 @@ static int check_unlock(struct task_struct *curr, struct lockdep_map *lock,
|
|
|
return 1;
|
|
|
}
|
|
|
|
|
|
+static int
|
|
|
+__lock_set_subclass(struct lockdep_map *lock,
|
|
|
+ unsigned int subclass, unsigned long ip)
|
|
|
+{
|
|
|
+ struct task_struct *curr = current;
|
|
|
+ struct held_lock *hlock, *prev_hlock;
|
|
|
+ struct lock_class *class;
|
|
|
+ unsigned int depth;
|
|
|
+ int i;
|
|
|
+
|
|
|
+ depth = curr->lockdep_depth;
|
|
|
+ if (DEBUG_LOCKS_WARN_ON(!depth))
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ prev_hlock = NULL;
|
|
|
+ for (i = depth-1; i >= 0; i--) {
|
|
|
+ hlock = curr->held_locks + i;
|
|
|
+ /*
|
|
|
+ * We must not cross into another context:
|
|
|
+ */
|
|
|
+ if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
|
|
|
+ break;
|
|
|
+ if (hlock->instance == lock)
|
|
|
+ goto found_it;
|
|
|
+ prev_hlock = hlock;
|
|
|
+ }
|
|
|
+ return print_unlock_inbalance_bug(curr, lock, ip);
|
|
|
+
|
|
|
+found_it:
|
|
|
+ class = register_lock_class(lock, subclass, 0);
|
|
|
+ hlock->class = class;
|
|
|
+
|
|
|
+ curr->lockdep_depth = i;
|
|
|
+ curr->curr_chain_key = hlock->prev_chain_key;
|
|
|
+
|
|
|
+ for (; i < depth; i++) {
|
|
|
+ hlock = curr->held_locks + i;
|
|
|
+ if (!__lock_acquire(hlock->instance,
|
|
|
+ hlock->class->subclass, hlock->trylock,
|
|
|
+ hlock->read, hlock->check, hlock->hardirqs_off,
|
|
|
+ hlock->acquire_ip))
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth))
|
|
|
+ return 0;
|
|
|
+ return 1;
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Remove the lock to the list of currently held locks in a
|
|
|
* potentially non-nested (out of order) manner. This is a
|
|
@@ -2824,6 +2873,26 @@ static void check_flags(unsigned long flags)
|
|
|
#endif
|
|
|
}
|
|
|
|
|
|
+void
|
|
|
+lock_set_subclass(struct lockdep_map *lock,
|
|
|
+ unsigned int subclass, unsigned long ip)
|
|
|
+{
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
+ if (unlikely(current->lockdep_recursion))
|
|
|
+ return;
|
|
|
+
|
|
|
+ raw_local_irq_save(flags);
|
|
|
+ current->lockdep_recursion = 1;
|
|
|
+ check_flags(flags);
|
|
|
+ if (__lock_set_subclass(lock, subclass, ip))
|
|
|
+ check_chain_key(current);
|
|
|
+ current->lockdep_recursion = 0;
|
|
|
+ raw_local_irq_restore(flags);
|
|
|
+}
|
|
|
+
|
|
|
+EXPORT_SYMBOL_GPL(lock_set_subclass);
|
|
|
+
|
|
|
/*
|
|
|
* We are not always called with irqs disabled - do that here,
|
|
|
* and also avoid lockdep recursion:
|