spinlock.c 9.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383
  1. /*
  2. * Copyright (2004) Linus Torvalds
  3. *
  4. * Author: Zwane Mwaikambo <zwane@fsmlabs.com>
  5. *
  6. * Copyright (2004, 2005) Ingo Molnar
  7. *
  8. * This file contains the spinlock/rwlock implementations for the
  9. * SMP and the DEBUG_SPINLOCK cases. (UP-nondebug inlines them)
  10. *
  11. * Note that some architectures have special knowledge about the
  12. * stack frames of these functions in their profile_pc. If you
  13. * change anything significant here that could change the stack
  14. * frame contact the architecture maintainers.
  15. */
  16. #include <linux/linkage.h>
  17. #include <linux/preempt.h>
  18. #include <linux/spinlock.h>
  19. #include <linux/interrupt.h>
  20. #include <linux/debug_locks.h>
  21. #include <linux/module.h>
  22. /*
  23. * If lockdep is enabled then we use the non-preemption spin-ops
  24. * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
  25. * not re-enabled during lock-acquire (which the preempt-spin-ops do):
  26. */
  27. #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
  28. /*
  29. * The __lock_function inlines are taken from
  30. * include/linux/spinlock_api_smp.h
  31. */
  32. #else
  33. /*
  34. * We build the __lock_function inlines here. They are too large for
  35. * inlining all over the place, but here is only one user per function
  36. * which embedds them into the calling _lock_function below.
  37. *
  38. * This could be a long-held lock. We both prepare to spin for a long
  39. * time (making _this_ CPU preemptable if possible), and we also signal
  40. * towards that other CPU that it should break the lock ASAP.
  41. */
  42. #define BUILD_LOCK_OPS(op, locktype) \
  43. void __lockfunc __##op##_lock(locktype##_t *lock) \
  44. { \
  45. for (;;) { \
  46. preempt_disable(); \
  47. if (likely(_raw_##op##_trylock(lock))) \
  48. break; \
  49. preempt_enable(); \
  50. \
  51. if (!(lock)->break_lock) \
  52. (lock)->break_lock = 1; \
  53. while (!op##_can_lock(lock) && (lock)->break_lock) \
  54. _raw_##op##_relax(&lock->raw_lock); \
  55. } \
  56. (lock)->break_lock = 0; \
  57. } \
  58. \
  59. unsigned long __lockfunc __##op##_lock_irqsave(locktype##_t *lock) \
  60. { \
  61. unsigned long flags; \
  62. \
  63. for (;;) { \
  64. preempt_disable(); \
  65. local_irq_save(flags); \
  66. if (likely(_raw_##op##_trylock(lock))) \
  67. break; \
  68. local_irq_restore(flags); \
  69. preempt_enable(); \
  70. \
  71. if (!(lock)->break_lock) \
  72. (lock)->break_lock = 1; \
  73. while (!op##_can_lock(lock) && (lock)->break_lock) \
  74. _raw_##op##_relax(&lock->raw_lock); \
  75. } \
  76. (lock)->break_lock = 0; \
  77. return flags; \
  78. } \
  79. \
  80. void __lockfunc __##op##_lock_irq(locktype##_t *lock) \
  81. { \
  82. _##op##_lock_irqsave(lock); \
  83. } \
  84. \
  85. void __lockfunc __##op##_lock_bh(locktype##_t *lock) \
  86. { \
  87. unsigned long flags; \
  88. \
  89. /* */ \
  90. /* Careful: we must exclude softirqs too, hence the */ \
  91. /* irq-disabling. We use the generic preemption-aware */ \
  92. /* function: */ \
  93. /**/ \
  94. flags = _##op##_lock_irqsave(lock); \
  95. local_bh_disable(); \
  96. local_irq_restore(flags); \
  97. } \
  98. /*
  99. * Build preemption-friendly versions of the following
  100. * lock-spinning functions:
  101. *
  102. * __[spin|read|write]_lock()
  103. * __[spin|read|write]_lock_irq()
  104. * __[spin|read|write]_lock_irqsave()
  105. * __[spin|read|write]_lock_bh()
  106. */
  107. BUILD_LOCK_OPS(spin, spinlock);
  108. BUILD_LOCK_OPS(read, rwlock);
  109. BUILD_LOCK_OPS(write, rwlock);
  110. #endif
  111. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  112. void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass)
  113. {
  114. preempt_disable();
  115. spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
  116. LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
  117. }
  118. EXPORT_SYMBOL(_spin_lock_nested);
  119. unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock,
  120. int subclass)
  121. {
  122. unsigned long flags;
  123. local_irq_save(flags);
  124. preempt_disable();
  125. spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
  126. LOCK_CONTENDED_FLAGS(lock, _raw_spin_trylock, _raw_spin_lock,
  127. _raw_spin_lock_flags, &flags);
  128. return flags;
  129. }
  130. EXPORT_SYMBOL(_spin_lock_irqsave_nested);
  131. void __lockfunc _spin_lock_nest_lock(spinlock_t *lock,
  132. struct lockdep_map *nest_lock)
  133. {
  134. preempt_disable();
  135. spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_);
  136. LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
  137. }
  138. EXPORT_SYMBOL(_spin_lock_nest_lock);
  139. #endif
  140. #ifndef CONFIG_INLINE_SPIN_TRYLOCK
  141. int __lockfunc _spin_trylock(spinlock_t *lock)
  142. {
  143. return __spin_trylock(lock);
  144. }
  145. EXPORT_SYMBOL(_spin_trylock);
  146. #endif
  147. #ifndef CONFIG_INLINE_READ_TRYLOCK
  148. int __lockfunc _read_trylock(rwlock_t *lock)
  149. {
  150. return __read_trylock(lock);
  151. }
  152. EXPORT_SYMBOL(_read_trylock);
  153. #endif
  154. #ifndef CONFIG_INLINE_WRITE_TRYLOCK
  155. int __lockfunc _write_trylock(rwlock_t *lock)
  156. {
  157. return __write_trylock(lock);
  158. }
  159. EXPORT_SYMBOL(_write_trylock);
  160. #endif
  161. #ifndef CONFIG_INLINE_READ_LOCK
  162. void __lockfunc _read_lock(rwlock_t *lock)
  163. {
  164. __read_lock(lock);
  165. }
  166. EXPORT_SYMBOL(_read_lock);
  167. #endif
  168. #ifndef CONFIG_INLINE_SPIN_LOCK_IRQSAVE
  169. unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
  170. {
  171. return __spin_lock_irqsave(lock);
  172. }
  173. EXPORT_SYMBOL(_spin_lock_irqsave);
  174. #endif
  175. #ifndef CONFIG_INLINE_SPIN_LOCK_IRQ
  176. void __lockfunc _spin_lock_irq(spinlock_t *lock)
  177. {
  178. __spin_lock_irq(lock);
  179. }
  180. EXPORT_SYMBOL(_spin_lock_irq);
  181. #endif
  182. #ifndef CONFIG_INLINE_SPIN_LOCK_BH
  183. void __lockfunc _spin_lock_bh(spinlock_t *lock)
  184. {
  185. __spin_lock_bh(lock);
  186. }
  187. EXPORT_SYMBOL(_spin_lock_bh);
  188. #endif
  189. #ifndef CONFIG_INLINE_READ_LOCK_IRQSAVE
  190. unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
  191. {
  192. return __read_lock_irqsave(lock);
  193. }
  194. EXPORT_SYMBOL(_read_lock_irqsave);
  195. #endif
  196. #ifndef CONFIG_INLINE_READ_LOCK_IRQ
  197. void __lockfunc _read_lock_irq(rwlock_t *lock)
  198. {
  199. __read_lock_irq(lock);
  200. }
  201. EXPORT_SYMBOL(_read_lock_irq);
  202. #endif
  203. #ifndef CONFIG_INLINE_READ_LOCK_BH
  204. void __lockfunc _read_lock_bh(rwlock_t *lock)
  205. {
  206. __read_lock_bh(lock);
  207. }
  208. EXPORT_SYMBOL(_read_lock_bh);
  209. #endif
  210. #ifndef CONFIG_INLINE_WRITE_LOCK_IRQSAVE
  211. unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
  212. {
  213. return __write_lock_irqsave(lock);
  214. }
  215. EXPORT_SYMBOL(_write_lock_irqsave);
  216. #endif
  217. #ifndef CONFIG_INLINE_WRITE_LOCK_IRQ
  218. void __lockfunc _write_lock_irq(rwlock_t *lock)
  219. {
  220. __write_lock_irq(lock);
  221. }
  222. EXPORT_SYMBOL(_write_lock_irq);
  223. #endif
  224. #ifndef CONFIG_INLINE_WRITE_LOCK_BH
  225. void __lockfunc _write_lock_bh(rwlock_t *lock)
  226. {
  227. __write_lock_bh(lock);
  228. }
  229. EXPORT_SYMBOL(_write_lock_bh);
  230. #endif
  231. #ifndef CONFIG_INLINE_SPIN_LOCK
  232. void __lockfunc _spin_lock(spinlock_t *lock)
  233. {
  234. __spin_lock(lock);
  235. }
  236. EXPORT_SYMBOL(_spin_lock);
  237. #endif
  238. #ifndef CONFIG_INLINE_WRITE_LOCK
  239. void __lockfunc _write_lock(rwlock_t *lock)
  240. {
  241. __write_lock(lock);
  242. }
  243. EXPORT_SYMBOL(_write_lock);
  244. #endif
  245. #ifndef CONFIG_INLINE_SPIN_UNLOCK
  246. void __lockfunc _spin_unlock(spinlock_t *lock)
  247. {
  248. __spin_unlock(lock);
  249. }
  250. EXPORT_SYMBOL(_spin_unlock);
  251. #endif
  252. #ifndef CONFIG_INLINE_WRITE_UNLOCK
  253. void __lockfunc _write_unlock(rwlock_t *lock)
  254. {
  255. __write_unlock(lock);
  256. }
  257. EXPORT_SYMBOL(_write_unlock);
  258. #endif
  259. #ifndef CONFIG_INLINE_READ_UNLOCK
  260. void __lockfunc _read_unlock(rwlock_t *lock)
  261. {
  262. __read_unlock(lock);
  263. }
  264. EXPORT_SYMBOL(_read_unlock);
  265. #endif
  266. #ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE
  267. void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
  268. {
  269. __spin_unlock_irqrestore(lock, flags);
  270. }
  271. EXPORT_SYMBOL(_spin_unlock_irqrestore);
  272. #endif
  273. #ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQ
  274. void __lockfunc _spin_unlock_irq(spinlock_t *lock)
  275. {
  276. __spin_unlock_irq(lock);
  277. }
  278. EXPORT_SYMBOL(_spin_unlock_irq);
  279. #endif
  280. #ifndef CONFIG_INLINE_SPIN_UNLOCK_BH
  281. void __lockfunc _spin_unlock_bh(spinlock_t *lock)
  282. {
  283. __spin_unlock_bh(lock);
  284. }
  285. EXPORT_SYMBOL(_spin_unlock_bh);
  286. #endif
  287. #ifndef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE
  288. void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
  289. {
  290. __read_unlock_irqrestore(lock, flags);
  291. }
  292. EXPORT_SYMBOL(_read_unlock_irqrestore);
  293. #endif
  294. #ifndef CONFIG_INLINE_READ_UNLOCK_IRQ
  295. void __lockfunc _read_unlock_irq(rwlock_t *lock)
  296. {
  297. __read_unlock_irq(lock);
  298. }
  299. EXPORT_SYMBOL(_read_unlock_irq);
  300. #endif
  301. #ifndef CONFIG_INLINE_READ_UNLOCK_BH
  302. void __lockfunc _read_unlock_bh(rwlock_t *lock)
  303. {
  304. __read_unlock_bh(lock);
  305. }
  306. EXPORT_SYMBOL(_read_unlock_bh);
  307. #endif
  308. #ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE
  309. void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
  310. {
  311. __write_unlock_irqrestore(lock, flags);
  312. }
  313. EXPORT_SYMBOL(_write_unlock_irqrestore);
  314. #endif
  315. #ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQ
  316. void __lockfunc _write_unlock_irq(rwlock_t *lock)
  317. {
  318. __write_unlock_irq(lock);
  319. }
  320. EXPORT_SYMBOL(_write_unlock_irq);
  321. #endif
  322. #ifndef CONFIG_INLINE_WRITE_UNLOCK_BH
  323. void __lockfunc _write_unlock_bh(rwlock_t *lock)
  324. {
  325. __write_unlock_bh(lock);
  326. }
  327. EXPORT_SYMBOL(_write_unlock_bh);
  328. #endif
  329. #ifndef CONFIG_INLINE_SPIN_TRYLOCK_BH
  330. int __lockfunc _spin_trylock_bh(spinlock_t *lock)
  331. {
  332. return __spin_trylock_bh(lock);
  333. }
  334. EXPORT_SYMBOL(_spin_trylock_bh);
  335. #endif
  336. notrace int in_lock_functions(unsigned long addr)
  337. {
  338. /* Linker adds these: start and end of __lockfunc functions */
  339. extern char __lock_text_start[], __lock_text_end[];
  340. return addr >= (unsigned long)__lock_text_start
  341. && addr < (unsigned long)__lock_text_end;
  342. }
  343. EXPORT_SYMBOL(in_lock_functions);