kernel_lock.c 3.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143
  1. /*
  2. * lib/kernel_lock.c
  3. *
  4. * This is the traditional BKL - big kernel lock. Largely
  5. * relegated to obsolescence, but used by various less
  6. * important (or lazy) subsystems.
  7. */
  8. #include <linux/module.h>
  9. #include <linux/kallsyms.h>
  10. #include <linux/semaphore.h>
  11. #include <linux/smp_lock.h>
  12. #define CREATE_TRACE_POINTS
  13. #include <trace/events/bkl.h>
  14. /*
  15. * The 'big kernel lock'
  16. *
  17. * This spinlock is taken and released recursively by lock_kernel()
  18. * and unlock_kernel(). It is transparently dropped and reacquired
  19. * over schedule(). It is used to protect legacy code that hasn't
  20. * been migrated to a proper locking design yet.
  21. *
  22. * Don't use in new code.
  23. */
  24. static __cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(kernel_flag);
  25. /*
  26. * Acquire/release the underlying lock from the scheduler.
  27. *
  28. * This is called with preemption disabled, and should
  29. * return an error value if it cannot get the lock and
  30. * TIF_NEED_RESCHED gets set.
  31. *
  32. * If it successfully gets the lock, it should increment
  33. * the preemption count like any spinlock does.
  34. *
  35. * (This works on UP too - do_raw_spin_trylock will never
  36. * return false in that case)
  37. */
  38. int __lockfunc __reacquire_kernel_lock(void)
  39. {
  40. while (!do_raw_spin_trylock(&kernel_flag)) {
  41. if (need_resched())
  42. return -EAGAIN;
  43. cpu_relax();
  44. }
  45. preempt_disable();
  46. return 0;
  47. }
  48. void __lockfunc __release_kernel_lock(void)
  49. {
  50. do_raw_spin_unlock(&kernel_flag);
  51. preempt_enable_no_resched();
  52. }
  53. /*
  54. * These are the BKL spinlocks - we try to be polite about preemption.
  55. * If SMP is not on (ie UP preemption), this all goes away because the
  56. * do_raw_spin_trylock() will always succeed.
  57. */
  58. #ifdef CONFIG_PREEMPT
  59. static inline void __lock_kernel(void)
  60. {
  61. preempt_disable();
  62. if (unlikely(!do_raw_spin_trylock(&kernel_flag))) {
  63. /*
  64. * If preemption was disabled even before this
  65. * was called, there's nothing we can be polite
  66. * about - just spin.
  67. */
  68. if (preempt_count() > 1) {
  69. do_raw_spin_lock(&kernel_flag);
  70. return;
  71. }
  72. /*
  73. * Otherwise, let's wait for the kernel lock
  74. * with preemption enabled..
  75. */
  76. do {
  77. preempt_enable();
  78. while (raw_spin_is_locked(&kernel_flag))
  79. cpu_relax();
  80. preempt_disable();
  81. } while (!do_raw_spin_trylock(&kernel_flag));
  82. }
  83. }
  84. #else
  85. /*
  86. * Non-preemption case - just get the spinlock
  87. */
  88. static inline void __lock_kernel(void)
  89. {
  90. do_raw_spin_lock(&kernel_flag);
  91. }
  92. #endif
  93. static inline void __unlock_kernel(void)
  94. {
  95. /*
  96. * the BKL is not covered by lockdep, so we open-code the
  97. * unlocking sequence (and thus avoid the dep-chain ops):
  98. */
  99. do_raw_spin_unlock(&kernel_flag);
  100. preempt_enable();
  101. }
  102. /*
  103. * Getting the big kernel lock.
  104. *
  105. * This cannot happen asynchronously, so we only need to
  106. * worry about other CPU's.
  107. */
  108. void __lockfunc _lock_kernel(const char *func, const char *file, int line)
  109. {
  110. int depth = current->lock_depth + 1;
  111. trace_lock_kernel(func, file, line);
  112. if (likely(!depth)) {
  113. might_sleep();
  114. __lock_kernel();
  115. }
  116. current->lock_depth = depth;
  117. }
  118. void __lockfunc _unlock_kernel(const char *func, const char *file, int line)
  119. {
  120. BUG_ON(current->lock_depth < 0);
  121. if (likely(--current->lock_depth < 0))
  122. __unlock_kernel();
  123. trace_unlock_kernel(func, file, line);
  124. }
  125. EXPORT_SYMBOL(_lock_kernel);
  126. EXPORT_SYMBOL(_unlock_kernel);