kernel_lock.c 2.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990
  1. /*
  2. * lib/kernel_lock.c
  3. *
  4. * This is the traditional BKL - big kernel lock. Largely
  5. * relegated to obsolescence, but used by various less
  6. * important (or lazy) subsystems.
  7. */
  8. #include <linux/smp_lock.h>
  9. #include <linux/module.h>
  10. #include <linux/kallsyms.h>
  11. /*
  12. * The 'big kernel semaphore'
  13. *
  14. * This mutex is taken and released recursively by lock_kernel()
  15. * and unlock_kernel(). It is transparently dropped and reacquired
  16. * over schedule(). It is used to protect legacy code that hasn't
  17. * been migrated to a proper locking design yet.
  18. *
  19. * Note: code locked by this semaphore will only be serialized against
  20. * other code using the same locking facility. The code guarantees that
  21. * the task remains on the same CPU.
  22. *
  23. * Don't use in new code.
  24. */
  25. static DECLARE_MUTEX(kernel_sem);
  26. /*
  27. * Re-acquire the kernel semaphore.
  28. *
  29. * This function is called with preemption off.
  30. *
  31. * We are executing in schedule() so the code must be extremely careful
  32. * about recursion, both due to the down() and due to the enabling of
  33. * preemption. schedule() will re-check the preemption flag after
  34. * reacquiring the semaphore.
  35. */
  36. int __lockfunc __reacquire_kernel_lock(void)
  37. {
  38. struct task_struct *task = current;
  39. int saved_lock_depth = task->lock_depth;
  40. BUG_ON(saved_lock_depth < 0);
  41. task->lock_depth = -1;
  42. preempt_enable_no_resched();
  43. down(&kernel_sem);
  44. preempt_disable();
  45. task->lock_depth = saved_lock_depth;
  46. return 0;
  47. }
  48. void __lockfunc __release_kernel_lock(void)
  49. {
  50. up(&kernel_sem);
  51. }
  52. /*
  53. * Getting the big kernel semaphore.
  54. */
  55. void __lockfunc lock_kernel(void)
  56. {
  57. struct task_struct *task = current;
  58. int depth = task->lock_depth + 1;
  59. if (likely(!depth))
  60. /*
  61. * No recursion worries - we set up lock_depth _after_
  62. */
  63. down(&kernel_sem);
  64. task->lock_depth = depth;
  65. }
  66. void __lockfunc unlock_kernel(void)
  67. {
  68. struct task_struct *task = current;
  69. BUG_ON(task->lock_depth < 0);
  70. if (likely(--task->lock_depth < 0))
  71. up(&kernel_sem);
  72. }
  73. EXPORT_SYMBOL(lock_kernel);
  74. EXPORT_SYMBOL(unlock_kernel);