kernel_lock.c 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264
  1. /*
  2. * lib/kernel_lock.c
  3. *
  4. * This is the traditional BKL - big kernel lock. Largely
  5. * relegated to obsolescense, but used by various less
  6. * important (or lazy) subsystems.
  7. */
  8. #include <linux/smp_lock.h>
  9. #include <linux/module.h>
  10. #include <linux/kallsyms.h>
  11. #if defined(CONFIG_PREEMPT) && defined(__smp_processor_id) && \
  12. defined(CONFIG_DEBUG_PREEMPT)
  13. /*
  14. * Debugging check.
  15. */
  16. unsigned int smp_processor_id(void)
  17. {
  18. unsigned long preempt_count = preempt_count();
  19. int this_cpu = __smp_processor_id();
  20. cpumask_t this_mask;
  21. if (likely(preempt_count))
  22. goto out;
  23. if (irqs_disabled())
  24. goto out;
  25. /*
  26. * Kernel threads bound to a single CPU can safely use
  27. * smp_processor_id():
  28. */
  29. this_mask = cpumask_of_cpu(this_cpu);
  30. if (cpus_equal(current->cpus_allowed, this_mask))
  31. goto out;
  32. /*
  33. * It is valid to assume CPU-locality during early bootup:
  34. */
  35. if (system_state != SYSTEM_RUNNING)
  36. goto out;
  37. /*
  38. * Avoid recursion:
  39. */
  40. preempt_disable();
  41. if (!printk_ratelimit())
  42. goto out_enable;
  43. printk(KERN_ERR "BUG: using smp_processor_id() in preemptible [%08x] code: %s/%d\n", preempt_count(), current->comm, current->pid);
  44. print_symbol("caller is %s\n", (long)__builtin_return_address(0));
  45. dump_stack();
  46. out_enable:
  47. preempt_enable_no_resched();
  48. out:
  49. return this_cpu;
  50. }
  51. EXPORT_SYMBOL(smp_processor_id);
  52. #endif /* PREEMPT && __smp_processor_id && DEBUG_PREEMPT */
  53. #ifdef CONFIG_PREEMPT_BKL
  54. /*
  55. * The 'big kernel semaphore'
  56. *
  57. * This mutex is taken and released recursively by lock_kernel()
  58. * and unlock_kernel(). It is transparently dropped and reaquired
  59. * over schedule(). It is used to protect legacy code that hasn't
  60. * been migrated to a proper locking design yet.
  61. *
  62. * Note: code locked by this semaphore will only be serialized against
  63. * other code using the same locking facility. The code guarantees that
  64. * the task remains on the same CPU.
  65. *
  66. * Don't use in new code.
  67. */
  68. static DECLARE_MUTEX(kernel_sem);
  69. /*
  70. * Re-acquire the kernel semaphore.
  71. *
  72. * This function is called with preemption off.
  73. *
  74. * We are executing in schedule() so the code must be extremely careful
  75. * about recursion, both due to the down() and due to the enabling of
  76. * preemption. schedule() will re-check the preemption flag after
  77. * reacquiring the semaphore.
  78. */
  79. int __lockfunc __reacquire_kernel_lock(void)
  80. {
  81. struct task_struct *task = current;
  82. int saved_lock_depth = task->lock_depth;
  83. BUG_ON(saved_lock_depth < 0);
  84. task->lock_depth = -1;
  85. preempt_enable_no_resched();
  86. down(&kernel_sem);
  87. preempt_disable();
  88. task->lock_depth = saved_lock_depth;
  89. return 0;
  90. }
  91. void __lockfunc __release_kernel_lock(void)
  92. {
  93. up(&kernel_sem);
  94. }
  95. /*
  96. * Getting the big kernel semaphore.
  97. */
  98. void __lockfunc lock_kernel(void)
  99. {
  100. struct task_struct *task = current;
  101. int depth = task->lock_depth + 1;
  102. if (likely(!depth))
  103. /*
  104. * No recursion worries - we set up lock_depth _after_
  105. */
  106. down(&kernel_sem);
  107. task->lock_depth = depth;
  108. }
  109. void __lockfunc unlock_kernel(void)
  110. {
  111. struct task_struct *task = current;
  112. BUG_ON(task->lock_depth < 0);
  113. if (likely(--task->lock_depth < 0))
  114. up(&kernel_sem);
  115. }
  116. #else
  117. /*
  118. * The 'big kernel lock'
  119. *
  120. * This spinlock is taken and released recursively by lock_kernel()
  121. * and unlock_kernel(). It is transparently dropped and reaquired
  122. * over schedule(). It is used to protect legacy code that hasn't
  123. * been migrated to a proper locking design yet.
  124. *
  125. * Don't use in new code.
  126. */
  127. static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kernel_flag);
  128. /*
  129. * Acquire/release the underlying lock from the scheduler.
  130. *
  131. * This is called with preemption disabled, and should
  132. * return an error value if it cannot get the lock and
  133. * TIF_NEED_RESCHED gets set.
  134. *
  135. * If it successfully gets the lock, it should increment
  136. * the preemption count like any spinlock does.
  137. *
  138. * (This works on UP too - _raw_spin_trylock will never
  139. * return false in that case)
  140. */
  141. int __lockfunc __reacquire_kernel_lock(void)
  142. {
  143. while (!_raw_spin_trylock(&kernel_flag)) {
  144. if (test_thread_flag(TIF_NEED_RESCHED))
  145. return -EAGAIN;
  146. cpu_relax();
  147. }
  148. preempt_disable();
  149. return 0;
  150. }
  151. void __lockfunc __release_kernel_lock(void)
  152. {
  153. _raw_spin_unlock(&kernel_flag);
  154. preempt_enable_no_resched();
  155. }
  156. /*
  157. * These are the BKL spinlocks - we try to be polite about preemption.
  158. * If SMP is not on (ie UP preemption), this all goes away because the
  159. * _raw_spin_trylock() will always succeed.
  160. */
  161. #ifdef CONFIG_PREEMPT
  162. static inline void __lock_kernel(void)
  163. {
  164. preempt_disable();
  165. if (unlikely(!_raw_spin_trylock(&kernel_flag))) {
  166. /*
  167. * If preemption was disabled even before this
  168. * was called, there's nothing we can be polite
  169. * about - just spin.
  170. */
  171. if (preempt_count() > 1) {
  172. _raw_spin_lock(&kernel_flag);
  173. return;
  174. }
  175. /*
  176. * Otherwise, let's wait for the kernel lock
  177. * with preemption enabled..
  178. */
  179. do {
  180. preempt_enable();
  181. while (spin_is_locked(&kernel_flag))
  182. cpu_relax();
  183. preempt_disable();
  184. } while (!_raw_spin_trylock(&kernel_flag));
  185. }
  186. }
  187. #else
  188. /*
  189. * Non-preemption case - just get the spinlock
  190. */
  191. static inline void __lock_kernel(void)
  192. {
  193. _raw_spin_lock(&kernel_flag);
  194. }
  195. #endif
  196. static inline void __unlock_kernel(void)
  197. {
  198. _raw_spin_unlock(&kernel_flag);
  199. preempt_enable();
  200. }
  201. /*
  202. * Getting the big kernel lock.
  203. *
  204. * This cannot happen asynchronously, so we only need to
  205. * worry about other CPU's.
  206. */
  207. void __lockfunc lock_kernel(void)
  208. {
  209. int depth = current->lock_depth+1;
  210. if (likely(!depth))
  211. __lock_kernel();
  212. current->lock_depth = depth;
  213. }
  214. void __lockfunc unlock_kernel(void)
  215. {
  216. BUG_ON(current->lock_depth < 0);
  217. if (likely(--current->lock_depth < 0))
  218. __unlock_kernel();
  219. }
  220. #endif
  221. EXPORT_SYMBOL(lock_kernel);
  222. EXPORT_SYMBOL(unlock_kernel);