spinlock.c 7.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331
  1. /*
  2. * Copyright (2004) Linus Torvalds
  3. *
  4. * Author: Zwane Mwaikambo <zwane@fsmlabs.com>
  5. *
  6. * Copyright (2004, 2005) Ingo Molnar
  7. *
  8. * This file contains the spinlock/rwlock implementations for the
  9. * SMP and the DEBUG_SPINLOCK cases. (UP-nondebug inlines them)
  10. *
  11. * Note that some architectures have special knowledge about the
  12. * stack frames of these functions in their profile_pc. If you
  13. * change anything significant here that could change the stack
  14. * frame contact the architecture maintainers.
  15. */
  16. #include <linux/linkage.h>
  17. #include <linux/preempt.h>
  18. #include <linux/spinlock.h>
  19. #include <linux/interrupt.h>
  20. #include <linux/debug_locks.h>
  21. #include <linux/module.h>
  22. int __lockfunc _spin_trylock(spinlock_t *lock)
  23. {
  24. return __spin_trylock(lock);
  25. }
  26. EXPORT_SYMBOL(_spin_trylock);
  27. int __lockfunc _read_trylock(rwlock_t *lock)
  28. {
  29. return __read_trylock(lock);
  30. }
  31. EXPORT_SYMBOL(_read_trylock);
  32. int __lockfunc _write_trylock(rwlock_t *lock)
  33. {
  34. return __write_trylock(lock);
  35. }
  36. EXPORT_SYMBOL(_write_trylock);
  37. /*
  38. * If lockdep is enabled then we use the non-preemption spin-ops
  39. * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
  40. * not re-enabled during lock-acquire (which the preempt-spin-ops do):
  41. */
  42. #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
  43. void __lockfunc _read_lock(rwlock_t *lock)
  44. {
  45. __read_lock(lock);
  46. }
  47. EXPORT_SYMBOL(_read_lock);
  48. unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
  49. {
  50. return __spin_lock_irqsave(lock);
  51. }
  52. EXPORT_SYMBOL(_spin_lock_irqsave);
  53. void __lockfunc _spin_lock_irq(spinlock_t *lock)
  54. {
  55. __spin_lock_irq(lock);
  56. }
  57. EXPORT_SYMBOL(_spin_lock_irq);
  58. void __lockfunc _spin_lock_bh(spinlock_t *lock)
  59. {
  60. __spin_lock_bh(lock);
  61. }
  62. EXPORT_SYMBOL(_spin_lock_bh);
  63. unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
  64. {
  65. return __read_lock_irqsave(lock);
  66. }
  67. EXPORT_SYMBOL(_read_lock_irqsave);
  68. void __lockfunc _read_lock_irq(rwlock_t *lock)
  69. {
  70. __read_lock_irq(lock);
  71. }
  72. EXPORT_SYMBOL(_read_lock_irq);
  73. void __lockfunc _read_lock_bh(rwlock_t *lock)
  74. {
  75. __read_lock_bh(lock);
  76. }
  77. EXPORT_SYMBOL(_read_lock_bh);
  78. unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
  79. {
  80. return __write_lock_irqsave(lock);
  81. }
  82. EXPORT_SYMBOL(_write_lock_irqsave);
  83. void __lockfunc _write_lock_irq(rwlock_t *lock)
  84. {
  85. __write_lock_irq(lock);
  86. }
  87. EXPORT_SYMBOL(_write_lock_irq);
  88. void __lockfunc _write_lock_bh(rwlock_t *lock)
  89. {
  90. __write_lock_bh(lock);
  91. }
  92. EXPORT_SYMBOL(_write_lock_bh);
  93. void __lockfunc _spin_lock(spinlock_t *lock)
  94. {
  95. __spin_lock(lock);
  96. }
  97. EXPORT_SYMBOL(_spin_lock);
  98. void __lockfunc _write_lock(rwlock_t *lock)
  99. {
  100. __write_lock(lock);
  101. }
  102. EXPORT_SYMBOL(_write_lock);
  103. #else /* CONFIG_PREEMPT: */
  104. /*
  105. * This could be a long-held lock. We both prepare to spin for a long
  106. * time (making _this_ CPU preemptable if possible), and we also signal
  107. * towards that other CPU that it should break the lock ASAP.
  108. *
  109. * (We do this in a function because inlining it would be excessive.)
  110. */
  111. #define BUILD_LOCK_OPS(op, locktype) \
  112. void __lockfunc _##op##_lock(locktype##_t *lock) \
  113. { \
  114. for (;;) { \
  115. preempt_disable(); \
  116. if (likely(_raw_##op##_trylock(lock))) \
  117. break; \
  118. preempt_enable(); \
  119. \
  120. if (!(lock)->break_lock) \
  121. (lock)->break_lock = 1; \
  122. while (!op##_can_lock(lock) && (lock)->break_lock) \
  123. _raw_##op##_relax(&lock->raw_lock); \
  124. } \
  125. (lock)->break_lock = 0; \
  126. } \
  127. \
  128. EXPORT_SYMBOL(_##op##_lock); \
  129. \
  130. unsigned long __lockfunc _##op##_lock_irqsave(locktype##_t *lock) \
  131. { \
  132. unsigned long flags; \
  133. \
  134. for (;;) { \
  135. preempt_disable(); \
  136. local_irq_save(flags); \
  137. if (likely(_raw_##op##_trylock(lock))) \
  138. break; \
  139. local_irq_restore(flags); \
  140. preempt_enable(); \
  141. \
  142. if (!(lock)->break_lock) \
  143. (lock)->break_lock = 1; \
  144. while (!op##_can_lock(lock) && (lock)->break_lock) \
  145. _raw_##op##_relax(&lock->raw_lock); \
  146. } \
  147. (lock)->break_lock = 0; \
  148. return flags; \
  149. } \
  150. \
  151. EXPORT_SYMBOL(_##op##_lock_irqsave); \
  152. \
  153. void __lockfunc _##op##_lock_irq(locktype##_t *lock) \
  154. { \
  155. _##op##_lock_irqsave(lock); \
  156. } \
  157. \
  158. EXPORT_SYMBOL(_##op##_lock_irq); \
  159. \
  160. void __lockfunc _##op##_lock_bh(locktype##_t *lock) \
  161. { \
  162. unsigned long flags; \
  163. \
  164. /* */ \
  165. /* Careful: we must exclude softirqs too, hence the */ \
  166. /* irq-disabling. We use the generic preemption-aware */ \
  167. /* function: */ \
  168. /**/ \
  169. flags = _##op##_lock_irqsave(lock); \
  170. local_bh_disable(); \
  171. local_irq_restore(flags); \
  172. } \
  173. \
  174. EXPORT_SYMBOL(_##op##_lock_bh)
  175. /*
  176. * Build preemption-friendly versions of the following
  177. * lock-spinning functions:
  178. *
  179. * _[spin|read|write]_lock()
  180. * _[spin|read|write]_lock_irq()
  181. * _[spin|read|write]_lock_irqsave()
  182. * _[spin|read|write]_lock_bh()
  183. */
  184. BUILD_LOCK_OPS(spin, spinlock);
  185. BUILD_LOCK_OPS(read, rwlock);
  186. BUILD_LOCK_OPS(write, rwlock);
  187. #endif /* CONFIG_PREEMPT */
  188. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  189. void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass)
  190. {
  191. preempt_disable();
  192. spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
  193. LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
  194. }
  195. EXPORT_SYMBOL(_spin_lock_nested);
  196. unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass)
  197. {
  198. unsigned long flags;
  199. local_irq_save(flags);
  200. preempt_disable();
  201. spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
  202. LOCK_CONTENDED_FLAGS(lock, _raw_spin_trylock, _raw_spin_lock,
  203. _raw_spin_lock_flags, &flags);
  204. return flags;
  205. }
  206. EXPORT_SYMBOL(_spin_lock_irqsave_nested);
  207. void __lockfunc _spin_lock_nest_lock(spinlock_t *lock,
  208. struct lockdep_map *nest_lock)
  209. {
  210. preempt_disable();
  211. spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_);
  212. LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
  213. }
  214. EXPORT_SYMBOL(_spin_lock_nest_lock);
  215. #endif
  216. void __lockfunc _spin_unlock(spinlock_t *lock)
  217. {
  218. __spin_unlock(lock);
  219. }
  220. EXPORT_SYMBOL(_spin_unlock);
  221. void __lockfunc _write_unlock(rwlock_t *lock)
  222. {
  223. __write_unlock(lock);
  224. }
  225. EXPORT_SYMBOL(_write_unlock);
  226. void __lockfunc _read_unlock(rwlock_t *lock)
  227. {
  228. __read_unlock(lock);
  229. }
  230. EXPORT_SYMBOL(_read_unlock);
  231. void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
  232. {
  233. __spin_unlock_irqrestore(lock, flags);
  234. }
  235. EXPORT_SYMBOL(_spin_unlock_irqrestore);
  236. void __lockfunc _spin_unlock_irq(spinlock_t *lock)
  237. {
  238. __spin_unlock_irq(lock);
  239. }
  240. EXPORT_SYMBOL(_spin_unlock_irq);
  241. void __lockfunc _spin_unlock_bh(spinlock_t *lock)
  242. {
  243. __spin_unlock_bh(lock);
  244. }
  245. EXPORT_SYMBOL(_spin_unlock_bh);
  246. void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
  247. {
  248. __read_unlock_irqrestore(lock, flags);
  249. }
  250. EXPORT_SYMBOL(_read_unlock_irqrestore);
  251. void __lockfunc _read_unlock_irq(rwlock_t *lock)
  252. {
  253. __read_unlock_irq(lock);
  254. }
  255. EXPORT_SYMBOL(_read_unlock_irq);
  256. void __lockfunc _read_unlock_bh(rwlock_t *lock)
  257. {
  258. __read_unlock_bh(lock);
  259. }
  260. EXPORT_SYMBOL(_read_unlock_bh);
  261. void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
  262. {
  263. __write_unlock_irqrestore(lock, flags);
  264. }
  265. EXPORT_SYMBOL(_write_unlock_irqrestore);
  266. void __lockfunc _write_unlock_irq(rwlock_t *lock)
  267. {
  268. __write_unlock_irq(lock);
  269. }
  270. EXPORT_SYMBOL(_write_unlock_irq);
  271. void __lockfunc _write_unlock_bh(rwlock_t *lock)
  272. {
  273. __write_unlock_bh(lock);
  274. }
  275. EXPORT_SYMBOL(_write_unlock_bh);
  276. int __lockfunc _spin_trylock_bh(spinlock_t *lock)
  277. {
  278. return __spin_trylock_bh(lock);
  279. }
  280. EXPORT_SYMBOL(_spin_trylock_bh);
  281. notrace int in_lock_functions(unsigned long addr)
  282. {
  283. /* Linker adds these: start and end of __lockfunc functions */
  284. extern char __lock_text_start[], __lock_text_end[];
  285. return addr >= (unsigned long)__lock_text_start
  286. && addr < (unsigned long)__lock_text_end;
  287. }
  288. EXPORT_SYMBOL(in_lock_functions);