spinlock.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447
  1. /*
  2. * Copyright (2004) Linus Torvalds
  3. *
  4. * Author: Zwane Mwaikambo <zwane@fsmlabs.com>
  5. *
  6. * Copyright (2004, 2005) Ingo Molnar
  7. *
  8. * This file contains the spinlock/rwlock implementations for the
  9. * SMP and the DEBUG_SPINLOCK cases. (UP-nondebug inlines them)
  10. *
  11. * Note that some architectures have special knowledge about the
  12. * stack frames of these functions in their profile_pc. If you
  13. * change anything significant here that could change the stack
  14. * frame contact the architecture maintainers.
  15. */
  16. #include <linux/linkage.h>
  17. #include <linux/preempt.h>
  18. #include <linux/spinlock.h>
  19. #include <linux/interrupt.h>
  20. #include <linux/debug_locks.h>
  21. #include <linux/module.h>
  22. int __lockfunc _spin_trylock(spinlock_t *lock)
  23. {
  24. preempt_disable();
  25. if (_raw_spin_trylock(lock)) {
  26. spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
  27. return 1;
  28. }
  29. preempt_enable();
  30. return 0;
  31. }
  32. EXPORT_SYMBOL(_spin_trylock);
  33. int __lockfunc _read_trylock(rwlock_t *lock)
  34. {
  35. preempt_disable();
  36. if (_raw_read_trylock(lock)) {
  37. rwlock_acquire_read(&lock->dep_map, 0, 1, _RET_IP_);
  38. return 1;
  39. }
  40. preempt_enable();
  41. return 0;
  42. }
  43. EXPORT_SYMBOL(_read_trylock);
  44. int __lockfunc _write_trylock(rwlock_t *lock)
  45. {
  46. preempt_disable();
  47. if (_raw_write_trylock(lock)) {
  48. rwlock_acquire(&lock->dep_map, 0, 1, _RET_IP_);
  49. return 1;
  50. }
  51. preempt_enable();
  52. return 0;
  53. }
  54. EXPORT_SYMBOL(_write_trylock);
  55. /*
  56. * If lockdep is enabled then we use the non-preemption spin-ops
  57. * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
  58. * not re-enabled during lock-acquire (which the preempt-spin-ops do):
  59. */
  60. #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
  61. void __lockfunc _read_lock(rwlock_t *lock)
  62. {
  63. preempt_disable();
  64. rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
  65. LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
  66. }
  67. EXPORT_SYMBOL(_read_lock);
  68. unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
  69. {
  70. unsigned long flags;
  71. local_irq_save(flags);
  72. preempt_disable();
  73. spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
  74. /*
  75. * On lockdep we dont want the hand-coded irq-enable of
  76. * _raw_spin_lock_flags() code, because lockdep assumes
  77. * that interrupts are not re-enabled during lock-acquire:
  78. */
  79. #ifdef CONFIG_LOCKDEP
  80. LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
  81. #else
  82. _raw_spin_lock_flags(lock, &flags);
  83. #endif
  84. return flags;
  85. }
  86. EXPORT_SYMBOL(_spin_lock_irqsave);
  87. void __lockfunc _spin_lock_irq(spinlock_t *lock)
  88. {
  89. local_irq_disable();
  90. preempt_disable();
  91. spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
  92. LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
  93. }
  94. EXPORT_SYMBOL(_spin_lock_irq);
  95. void __lockfunc _spin_lock_bh(spinlock_t *lock)
  96. {
  97. local_bh_disable();
  98. preempt_disable();
  99. spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
  100. LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
  101. }
  102. EXPORT_SYMBOL(_spin_lock_bh);
  103. unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
  104. {
  105. unsigned long flags;
  106. local_irq_save(flags);
  107. preempt_disable();
  108. rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
  109. LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
  110. return flags;
  111. }
  112. EXPORT_SYMBOL(_read_lock_irqsave);
  113. void __lockfunc _read_lock_irq(rwlock_t *lock)
  114. {
  115. local_irq_disable();
  116. preempt_disable();
  117. rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
  118. LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
  119. }
  120. EXPORT_SYMBOL(_read_lock_irq);
  121. void __lockfunc _read_lock_bh(rwlock_t *lock)
  122. {
  123. local_bh_disable();
  124. preempt_disable();
  125. rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
  126. LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
  127. }
  128. EXPORT_SYMBOL(_read_lock_bh);
  129. unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
  130. {
  131. unsigned long flags;
  132. local_irq_save(flags);
  133. preempt_disable();
  134. rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
  135. LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
  136. return flags;
  137. }
  138. EXPORT_SYMBOL(_write_lock_irqsave);
  139. void __lockfunc _write_lock_irq(rwlock_t *lock)
  140. {
  141. local_irq_disable();
  142. preempt_disable();
  143. rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
  144. LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
  145. }
  146. EXPORT_SYMBOL(_write_lock_irq);
  147. void __lockfunc _write_lock_bh(rwlock_t *lock)
  148. {
  149. local_bh_disable();
  150. preempt_disable();
  151. rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
  152. LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
  153. }
  154. EXPORT_SYMBOL(_write_lock_bh);
  155. void __lockfunc _spin_lock(spinlock_t *lock)
  156. {
  157. preempt_disable();
  158. spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
  159. LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
  160. }
  161. EXPORT_SYMBOL(_spin_lock);
  162. void __lockfunc _write_lock(rwlock_t *lock)
  163. {
  164. preempt_disable();
  165. rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
  166. LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
  167. }
  168. EXPORT_SYMBOL(_write_lock);
  169. #else /* CONFIG_PREEMPT: */
  170. /*
  171. * This could be a long-held lock. We both prepare to spin for a long
  172. * time (making _this_ CPU preemptable if possible), and we also signal
  173. * towards that other CPU that it should break the lock ASAP.
  174. *
  175. * (We do this in a function because inlining it would be excessive.)
  176. */
  177. #define BUILD_LOCK_OPS(op, locktype) \
  178. void __lockfunc _##op##_lock(locktype##_t *lock) \
  179. { \
  180. for (;;) { \
  181. preempt_disable(); \
  182. if (likely(_raw_##op##_trylock(lock))) \
  183. break; \
  184. preempt_enable(); \
  185. \
  186. if (!(lock)->break_lock) \
  187. (lock)->break_lock = 1; \
  188. while (!op##_can_lock(lock) && (lock)->break_lock) \
  189. _raw_##op##_relax(&lock->raw_lock); \
  190. } \
  191. (lock)->break_lock = 0; \
  192. } \
  193. \
  194. EXPORT_SYMBOL(_##op##_lock); \
  195. \
  196. unsigned long __lockfunc _##op##_lock_irqsave(locktype##_t *lock) \
  197. { \
  198. unsigned long flags; \
  199. \
  200. for (;;) { \
  201. preempt_disable(); \
  202. local_irq_save(flags); \
  203. if (likely(_raw_##op##_trylock(lock))) \
  204. break; \
  205. local_irq_restore(flags); \
  206. preempt_enable(); \
  207. \
  208. if (!(lock)->break_lock) \
  209. (lock)->break_lock = 1; \
  210. while (!op##_can_lock(lock) && (lock)->break_lock) \
  211. _raw_##op##_relax(&lock->raw_lock); \
  212. } \
  213. (lock)->break_lock = 0; \
  214. return flags; \
  215. } \
  216. \
  217. EXPORT_SYMBOL(_##op##_lock_irqsave); \
  218. \
  219. void __lockfunc _##op##_lock_irq(locktype##_t *lock) \
  220. { \
  221. _##op##_lock_irqsave(lock); \
  222. } \
  223. \
  224. EXPORT_SYMBOL(_##op##_lock_irq); \
  225. \
  226. void __lockfunc _##op##_lock_bh(locktype##_t *lock) \
  227. { \
  228. unsigned long flags; \
  229. \
  230. /* */ \
  231. /* Careful: we must exclude softirqs too, hence the */ \
  232. /* irq-disabling. We use the generic preemption-aware */ \
  233. /* function: */ \
  234. /**/ \
  235. flags = _##op##_lock_irqsave(lock); \
  236. local_bh_disable(); \
  237. local_irq_restore(flags); \
  238. } \
  239. \
  240. EXPORT_SYMBOL(_##op##_lock_bh)
  241. /*
  242. * Build preemption-friendly versions of the following
  243. * lock-spinning functions:
  244. *
  245. * _[spin|read|write]_lock()
  246. * _[spin|read|write]_lock_irq()
  247. * _[spin|read|write]_lock_irqsave()
  248. * _[spin|read|write]_lock_bh()
  249. */
  250. BUILD_LOCK_OPS(spin, spinlock);
  251. BUILD_LOCK_OPS(read, rwlock);
  252. BUILD_LOCK_OPS(write, rwlock);
  253. #endif /* CONFIG_PREEMPT */
  254. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  255. void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass)
  256. {
  257. preempt_disable();
  258. spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
  259. LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
  260. }
  261. EXPORT_SYMBOL(_spin_lock_nested);
  262. unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass)
  263. {
  264. unsigned long flags;
  265. local_irq_save(flags);
  266. preempt_disable();
  267. spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
  268. /*
  269. * On lockdep we dont want the hand-coded irq-enable of
  270. * _raw_spin_lock_flags() code, because lockdep assumes
  271. * that interrupts are not re-enabled during lock-acquire:
  272. */
  273. #ifdef CONFIG_LOCKDEP
  274. LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
  275. #else
  276. _raw_spin_lock_flags(lock, &flags);
  277. #endif
  278. return flags;
  279. }
  280. EXPORT_SYMBOL(_spin_lock_irqsave_nested);
  281. #endif
  282. void __lockfunc _spin_unlock(spinlock_t *lock)
  283. {
  284. spin_release(&lock->dep_map, 1, _RET_IP_);
  285. _raw_spin_unlock(lock);
  286. preempt_enable();
  287. }
  288. EXPORT_SYMBOL(_spin_unlock);
  289. void __lockfunc _write_unlock(rwlock_t *lock)
  290. {
  291. rwlock_release(&lock->dep_map, 1, _RET_IP_);
  292. _raw_write_unlock(lock);
  293. preempt_enable();
  294. }
  295. EXPORT_SYMBOL(_write_unlock);
  296. void __lockfunc _read_unlock(rwlock_t *lock)
  297. {
  298. rwlock_release(&lock->dep_map, 1, _RET_IP_);
  299. _raw_read_unlock(lock);
  300. preempt_enable();
  301. }
  302. EXPORT_SYMBOL(_read_unlock);
  303. void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
  304. {
  305. spin_release(&lock->dep_map, 1, _RET_IP_);
  306. _raw_spin_unlock(lock);
  307. local_irq_restore(flags);
  308. preempt_enable();
  309. }
  310. EXPORT_SYMBOL(_spin_unlock_irqrestore);
  311. void __lockfunc _spin_unlock_irq(spinlock_t *lock)
  312. {
  313. spin_release(&lock->dep_map, 1, _RET_IP_);
  314. _raw_spin_unlock(lock);
  315. local_irq_enable();
  316. preempt_enable();
  317. }
  318. EXPORT_SYMBOL(_spin_unlock_irq);
  319. void __lockfunc _spin_unlock_bh(spinlock_t *lock)
  320. {
  321. spin_release(&lock->dep_map, 1, _RET_IP_);
  322. _raw_spin_unlock(lock);
  323. preempt_enable_no_resched();
  324. local_bh_enable_ip((unsigned long)__builtin_return_address(0));
  325. }
  326. EXPORT_SYMBOL(_spin_unlock_bh);
  327. void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
  328. {
  329. rwlock_release(&lock->dep_map, 1, _RET_IP_);
  330. _raw_read_unlock(lock);
  331. local_irq_restore(flags);
  332. preempt_enable();
  333. }
  334. EXPORT_SYMBOL(_read_unlock_irqrestore);
  335. void __lockfunc _read_unlock_irq(rwlock_t *lock)
  336. {
  337. rwlock_release(&lock->dep_map, 1, _RET_IP_);
  338. _raw_read_unlock(lock);
  339. local_irq_enable();
  340. preempt_enable();
  341. }
  342. EXPORT_SYMBOL(_read_unlock_irq);
  343. void __lockfunc _read_unlock_bh(rwlock_t *lock)
  344. {
  345. rwlock_release(&lock->dep_map, 1, _RET_IP_);
  346. _raw_read_unlock(lock);
  347. preempt_enable_no_resched();
  348. local_bh_enable_ip((unsigned long)__builtin_return_address(0));
  349. }
  350. EXPORT_SYMBOL(_read_unlock_bh);
  351. void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
  352. {
  353. rwlock_release(&lock->dep_map, 1, _RET_IP_);
  354. _raw_write_unlock(lock);
  355. local_irq_restore(flags);
  356. preempt_enable();
  357. }
  358. EXPORT_SYMBOL(_write_unlock_irqrestore);
  359. void __lockfunc _write_unlock_irq(rwlock_t *lock)
  360. {
  361. rwlock_release(&lock->dep_map, 1, _RET_IP_);
  362. _raw_write_unlock(lock);
  363. local_irq_enable();
  364. preempt_enable();
  365. }
  366. EXPORT_SYMBOL(_write_unlock_irq);
  367. void __lockfunc _write_unlock_bh(rwlock_t *lock)
  368. {
  369. rwlock_release(&lock->dep_map, 1, _RET_IP_);
  370. _raw_write_unlock(lock);
  371. preempt_enable_no_resched();
  372. local_bh_enable_ip((unsigned long)__builtin_return_address(0));
  373. }
  374. EXPORT_SYMBOL(_write_unlock_bh);
  375. int __lockfunc _spin_trylock_bh(spinlock_t *lock)
  376. {
  377. local_bh_disable();
  378. preempt_disable();
  379. if (_raw_spin_trylock(lock)) {
  380. spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
  381. return 1;
  382. }
  383. preempt_enable_no_resched();
  384. local_bh_enable_ip((unsigned long)__builtin_return_address(0));
  385. return 0;
  386. }
  387. EXPORT_SYMBOL(_spin_trylock_bh);
  388. notrace int in_lock_functions(unsigned long addr)
  389. {
  390. /* Linker adds these: start and end of __lockfunc functions */
  391. extern char __lock_text_start[], __lock_text_end[];
  392. return addr >= (unsigned long)__lock_text_start
  393. && addr < (unsigned long)__lock_text_end;
  394. }
  395. EXPORT_SYMBOL(in_lock_functions);