lglock.h 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146
  1. /*
  2. * Specialised local-global spinlock. Can only be declared as global variables
  3. * to avoid overhead and keep things simple (and we don't want to start using
  4. * these inside dynamically allocated structures).
  5. *
  6. * "local/global locks" (lglocks) can be used to:
  7. *
  8. * - Provide fast exclusive access to per-CPU data, with exclusive access to
  9. * another CPU's data allowed but possibly subject to contention, and to
  10. * provide very slow exclusive access to all per-CPU data.
  11. * - Or to provide very fast and scalable read serialisation, and to provide
  12. * very slow exclusive serialisation of data (not necessarily per-CPU data).
  13. *
  14. * Brlocks are also implemented as a short-hand notation for the latter use
  15. * case.
  16. *
  17. * Copyright 2009, 2010, Nick Piggin, Novell Inc.
  18. */
  19. #ifndef __LINUX_LGLOCK_H
  20. #define __LINUX_LGLOCK_H
  21. #include <linux/spinlock.h>
  22. #include <linux/lockdep.h>
  23. #include <linux/percpu.h>
  24. #include <linux/cpu.h>
  25. /* can make br locks by using local lock for read side, global lock for write */
  26. #define br_lock_init(name) name##_lock_init()
  27. #define br_read_lock(name) name##_local_lock()
  28. #define br_read_unlock(name) name##_local_unlock()
  29. #define br_write_lock(name) name##_global_lock()
  30. #define br_write_unlock(name) name##_global_unlock()
  31. #define DECLARE_BRLOCK(name) DECLARE_LGLOCK(name)
  32. #define DEFINE_BRLOCK(name) DEFINE_LGLOCK(name)
  33. #define lg_lock_init(name) name##_lock_init()
  34. #define lg_local_lock(name) name##_local_lock()
  35. #define lg_local_unlock(name) name##_local_unlock()
  36. #define lg_local_lock_cpu(name, cpu) name##_local_lock_cpu(cpu)
  37. #define lg_local_unlock_cpu(name, cpu) name##_local_unlock_cpu(cpu)
  38. #define lg_global_lock(name) name##_global_lock()
  39. #define lg_global_unlock(name) name##_global_unlock()
  40. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  41. #define LOCKDEP_INIT_MAP lockdep_init_map
  42. #define DEFINE_LGLOCK_LOCKDEP(name) \
  43. struct lock_class_key name##_lock_key; \
  44. struct lockdep_map name##_lock_dep_map; \
  45. EXPORT_SYMBOL(name##_lock_dep_map)
  46. #else
  47. #define LOCKDEP_INIT_MAP(a, b, c, d)
  48. #define DEFINE_LGLOCK_LOCKDEP(name)
  49. #endif
  50. #define DECLARE_LGLOCK(name) \
  51. extern void name##_lock_init(void); \
  52. extern void name##_local_lock(void); \
  53. extern void name##_local_unlock(void); \
  54. extern void name##_local_lock_cpu(int cpu); \
  55. extern void name##_local_unlock_cpu(int cpu); \
  56. extern void name##_global_lock(void); \
  57. extern void name##_global_unlock(void); \
  58. #define DEFINE_LGLOCK(name) \
  59. \
  60. DEFINE_SPINLOCK(name##_cpu_lock); \
  61. DEFINE_PER_CPU(arch_spinlock_t, name##_lock); \
  62. DEFINE_LGLOCK_LOCKDEP(name); \
  63. \
  64. void name##_lock_init(void) { \
  65. int i; \
  66. LOCKDEP_INIT_MAP(&name##_lock_dep_map, #name, &name##_lock_key, 0); \
  67. for_each_possible_cpu(i) { \
  68. arch_spinlock_t *lock; \
  69. lock = &per_cpu(name##_lock, i); \
  70. *lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; \
  71. } \
  72. } \
  73. EXPORT_SYMBOL(name##_lock_init); \
  74. \
  75. void name##_local_lock(void) { \
  76. arch_spinlock_t *lock; \
  77. preempt_disable(); \
  78. rwlock_acquire_read(&name##_lock_dep_map, 0, 0, _THIS_IP_); \
  79. lock = &__get_cpu_var(name##_lock); \
  80. arch_spin_lock(lock); \
  81. } \
  82. EXPORT_SYMBOL(name##_local_lock); \
  83. \
  84. void name##_local_unlock(void) { \
  85. arch_spinlock_t *lock; \
  86. rwlock_release(&name##_lock_dep_map, 1, _THIS_IP_); \
  87. lock = &__get_cpu_var(name##_lock); \
  88. arch_spin_unlock(lock); \
  89. preempt_enable(); \
  90. } \
  91. EXPORT_SYMBOL(name##_local_unlock); \
  92. \
  93. void name##_local_lock_cpu(int cpu) { \
  94. arch_spinlock_t *lock; \
  95. preempt_disable(); \
  96. rwlock_acquire_read(&name##_lock_dep_map, 0, 0, _THIS_IP_); \
  97. lock = &per_cpu(name##_lock, cpu); \
  98. arch_spin_lock(lock); \
  99. } \
  100. EXPORT_SYMBOL(name##_local_lock_cpu); \
  101. \
  102. void name##_local_unlock_cpu(int cpu) { \
  103. arch_spinlock_t *lock; \
  104. rwlock_release(&name##_lock_dep_map, 1, _THIS_IP_); \
  105. lock = &per_cpu(name##_lock, cpu); \
  106. arch_spin_unlock(lock); \
  107. preempt_enable(); \
  108. } \
  109. EXPORT_SYMBOL(name##_local_unlock_cpu); \
  110. \
  111. void name##_global_lock(void) { \
  112. int i; \
  113. preempt_disable(); \
  114. rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_); \
  115. for_each_possible_cpu(i) { \
  116. arch_spinlock_t *lock; \
  117. lock = &per_cpu(name##_lock, i); \
  118. arch_spin_lock(lock); \
  119. } \
  120. } \
  121. EXPORT_SYMBOL(name##_global_lock); \
  122. \
  123. void name##_global_unlock(void) { \
  124. int i; \
  125. rwlock_release(&name##_lock_dep_map, 1, _RET_IP_); \
  126. for_each_possible_cpu(i) { \
  127. arch_spinlock_t *lock; \
  128. lock = &per_cpu(name##_lock, i); \
  129. arch_spin_unlock(lock); \
  130. } \
  131. preempt_enable(); \
  132. } \
  133. EXPORT_SYMBOL(name##_global_unlock);
  134. #endif