lglock.h 6.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200
  1. /*
  2. * Specialised local-global spinlock. Can only be declared as global variables
  3. * to avoid overhead and keep things simple (and we don't want to start using
  4. * these inside dynamically allocated structures).
  5. *
  6. * "local/global locks" (lglocks) can be used to:
  7. *
  8. * - Provide fast exclusive access to per-CPU data, with exclusive access to
  9. * another CPU's data allowed but possibly subject to contention, and to
  10. * provide very slow exclusive access to all per-CPU data.
  11. * - Or to provide very fast and scalable read serialisation, and to provide
  12. * very slow exclusive serialisation of data (not necessarily per-CPU data).
  13. *
  14. * Brlocks are also implemented as a short-hand notation for the latter use
  15. * case.
  16. *
  17. * Copyright 2009, 2010, Nick Piggin, Novell Inc.
  18. */
  19. #ifndef __LINUX_LGLOCK_H
  20. #define __LINUX_LGLOCK_H
  21. #include <linux/spinlock.h>
  22. #include <linux/lockdep.h>
  23. #include <linux/percpu.h>
  24. #include <linux/cpu.h>
  25. /* can make br locks by using local lock for read side, global lock for write */
  26. #define br_lock_init(name) name##_lock_init()
  27. #define br_read_lock(name) name##_local_lock()
  28. #define br_read_unlock(name) name##_local_unlock()
  29. #define br_write_lock(name) name##_global_lock_online()
  30. #define br_write_unlock(name) name##_global_unlock_online()
  31. #define DECLARE_BRLOCK(name) DECLARE_LGLOCK(name)
  32. #define DEFINE_BRLOCK(name) DEFINE_LGLOCK(name)
  33. #define lg_lock_init(name) name##_lock_init()
  34. #define lg_local_lock(name) name##_local_lock()
  35. #define lg_local_unlock(name) name##_local_unlock()
  36. #define lg_local_lock_cpu(name, cpu) name##_local_lock_cpu(cpu)
  37. #define lg_local_unlock_cpu(name, cpu) name##_local_unlock_cpu(cpu)
  38. #define lg_global_lock(name) name##_global_lock()
  39. #define lg_global_unlock(name) name##_global_unlock()
  40. #define lg_global_lock_online(name) name##_global_lock_online()
  41. #define lg_global_unlock_online(name) name##_global_unlock_online()
  42. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  43. #define LOCKDEP_INIT_MAP lockdep_init_map
  44. #define DEFINE_LGLOCK_LOCKDEP(name) \
  45. struct lock_class_key name##_lock_key; \
  46. struct lockdep_map name##_lock_dep_map; \
  47. EXPORT_SYMBOL(name##_lock_dep_map)
  48. #else
  49. #define LOCKDEP_INIT_MAP(a, b, c, d)
  50. #define DEFINE_LGLOCK_LOCKDEP(name)
  51. #endif
  52. #define DECLARE_LGLOCK(name) \
  53. extern void name##_lock_init(void); \
  54. extern void name##_local_lock(void); \
  55. extern void name##_local_unlock(void); \
  56. extern void name##_local_lock_cpu(int cpu); \
  57. extern void name##_local_unlock_cpu(int cpu); \
  58. extern void name##_global_lock(void); \
  59. extern void name##_global_unlock(void); \
  60. extern void name##_global_lock_online(void); \
  61. extern void name##_global_unlock_online(void); \
  62. #define DEFINE_LGLOCK(name) \
  63. \
  64. DEFINE_SPINLOCK(name##_cpu_lock); \
  65. cpumask_t name##_cpus __read_mostly; \
  66. DEFINE_PER_CPU(arch_spinlock_t, name##_lock); \
  67. DEFINE_LGLOCK_LOCKDEP(name); \
  68. \
  69. static int \
  70. name##_lg_cpu_callback(struct notifier_block *nb, \
  71. unsigned long action, void *hcpu) \
  72. { \
  73. switch (action & ~CPU_TASKS_FROZEN) { \
  74. case CPU_UP_PREPARE: \
  75. spin_lock(&name##_cpu_lock); \
  76. cpu_set((unsigned long)hcpu, name##_cpus); \
  77. spin_unlock(&name##_cpu_lock); \
  78. break; \
  79. case CPU_UP_CANCELED: case CPU_DEAD: \
  80. spin_lock(&name##_cpu_lock); \
  81. cpu_clear((unsigned long)hcpu, name##_cpus); \
  82. spin_unlock(&name##_cpu_lock); \
  83. } \
  84. return NOTIFY_OK; \
  85. } \
  86. static struct notifier_block name##_lg_cpu_notifier = { \
  87. .notifier_call = name##_lg_cpu_callback, \
  88. }; \
  89. void name##_lock_init(void) { \
  90. int i; \
  91. LOCKDEP_INIT_MAP(&name##_lock_dep_map, #name, &name##_lock_key, 0); \
  92. for_each_possible_cpu(i) { \
  93. arch_spinlock_t *lock; \
  94. lock = &per_cpu(name##_lock, i); \
  95. *lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; \
  96. } \
  97. register_hotcpu_notifier(&name##_lg_cpu_notifier); \
  98. get_online_cpus(); \
  99. for_each_online_cpu(i) \
  100. cpu_set(i, name##_cpus); \
  101. put_online_cpus(); \
  102. } \
  103. EXPORT_SYMBOL(name##_lock_init); \
  104. \
  105. void name##_local_lock(void) { \
  106. arch_spinlock_t *lock; \
  107. preempt_disable(); \
  108. rwlock_acquire_read(&name##_lock_dep_map, 0, 0, _THIS_IP_); \
  109. lock = &__get_cpu_var(name##_lock); \
  110. arch_spin_lock(lock); \
  111. } \
  112. EXPORT_SYMBOL(name##_local_lock); \
  113. \
  114. void name##_local_unlock(void) { \
  115. arch_spinlock_t *lock; \
  116. rwlock_release(&name##_lock_dep_map, 1, _THIS_IP_); \
  117. lock = &__get_cpu_var(name##_lock); \
  118. arch_spin_unlock(lock); \
  119. preempt_enable(); \
  120. } \
  121. EXPORT_SYMBOL(name##_local_unlock); \
  122. \
  123. void name##_local_lock_cpu(int cpu) { \
  124. arch_spinlock_t *lock; \
  125. preempt_disable(); \
  126. rwlock_acquire_read(&name##_lock_dep_map, 0, 0, _THIS_IP_); \
  127. lock = &per_cpu(name##_lock, cpu); \
  128. arch_spin_lock(lock); \
  129. } \
  130. EXPORT_SYMBOL(name##_local_lock_cpu); \
  131. \
  132. void name##_local_unlock_cpu(int cpu) { \
  133. arch_spinlock_t *lock; \
  134. rwlock_release(&name##_lock_dep_map, 1, _THIS_IP_); \
  135. lock = &per_cpu(name##_lock, cpu); \
  136. arch_spin_unlock(lock); \
  137. preempt_enable(); \
  138. } \
  139. EXPORT_SYMBOL(name##_local_unlock_cpu); \
  140. \
  141. void name##_global_lock_online(void) { \
  142. int i; \
  143. spin_lock(&name##_cpu_lock); \
  144. rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_); \
  145. for_each_cpu(i, &name##_cpus) { \
  146. arch_spinlock_t *lock; \
  147. lock = &per_cpu(name##_lock, i); \
  148. arch_spin_lock(lock); \
  149. } \
  150. } \
  151. EXPORT_SYMBOL(name##_global_lock_online); \
  152. \
  153. void name##_global_unlock_online(void) { \
  154. int i; \
  155. rwlock_release(&name##_lock_dep_map, 1, _RET_IP_); \
  156. for_each_cpu(i, &name##_cpus) { \
  157. arch_spinlock_t *lock; \
  158. lock = &per_cpu(name##_lock, i); \
  159. arch_spin_unlock(lock); \
  160. } \
  161. spin_unlock(&name##_cpu_lock); \
  162. } \
  163. EXPORT_SYMBOL(name##_global_unlock_online); \
  164. \
  165. void name##_global_lock(void) { \
  166. int i; \
  167. preempt_disable(); \
  168. rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_); \
  169. for_each_possible_cpu(i) { \
  170. arch_spinlock_t *lock; \
  171. lock = &per_cpu(name##_lock, i); \
  172. arch_spin_lock(lock); \
  173. } \
  174. } \
  175. EXPORT_SYMBOL(name##_global_lock); \
  176. \
  177. void name##_global_unlock(void) { \
  178. int i; \
  179. rwlock_release(&name##_lock_dep_map, 1, _RET_IP_); \
  180. for_each_possible_cpu(i) { \
  181. arch_spinlock_t *lock; \
  182. lock = &per_cpu(name##_lock, i); \
  183. arch_spin_unlock(lock); \
  184. } \
  185. preempt_enable(); \
  186. } \
  187. EXPORT_SYMBOL(name##_global_unlock);
  188. #endif