spinlock.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449
  1. /*
  2. * Copyright (2004) Linus Torvalds
  3. *
  4. * Author: Zwane Mwaikambo <zwane@fsmlabs.com>
  5. *
  6. * Copyright (2004, 2005) Ingo Molnar
  7. *
  8. * This file contains the spinlock/rwlock implementations for the
  9. * SMP and the DEBUG_SPINLOCK cases. (UP-nondebug inlines them)
  10. *
  11. * Note that some architectures have special knowledge about the
  12. * stack frames of these functions in their profile_pc. If you
  13. * change anything significant here that could change the stack
  14. * frame contact the architecture maintainers.
  15. */
  16. #include <linux/linkage.h>
  17. #include <linux/preempt.h>
  18. #include <linux/spinlock.h>
  19. #include <linux/interrupt.h>
  20. #include <linux/debug_locks.h>
  21. #include <linux/module.h>
  22. int __lockfunc _spin_trylock(spinlock_t *lock)
  23. {
  24. preempt_disable();
  25. if (_raw_spin_trylock(lock)) {
  26. spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
  27. return 1;
  28. }
  29. preempt_enable();
  30. return 0;
  31. }
  32. EXPORT_SYMBOL(_spin_trylock);
  33. int __lockfunc _read_trylock(rwlock_t *lock)
  34. {
  35. preempt_disable();
  36. if (_raw_read_trylock(lock)) {
  37. rwlock_acquire_read(&lock->dep_map, 0, 1, _RET_IP_);
  38. return 1;
  39. }
  40. preempt_enable();
  41. return 0;
  42. }
  43. EXPORT_SYMBOL(_read_trylock);
  44. int __lockfunc _write_trylock(rwlock_t *lock)
  45. {
  46. preempt_disable();
  47. if (_raw_write_trylock(lock)) {
  48. rwlock_acquire(&lock->dep_map, 0, 1, _RET_IP_);
  49. return 1;
  50. }
  51. preempt_enable();
  52. return 0;
  53. }
  54. EXPORT_SYMBOL(_write_trylock);
  55. /*
  56. * If lockdep is enabled then we use the non-preemption spin-ops
  57. * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
  58. * not re-enabled during lock-acquire (which the preempt-spin-ops do):
  59. */
  60. #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
  61. void __lockfunc _read_lock(rwlock_t *lock)
  62. {
  63. preempt_disable();
  64. rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
  65. LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
  66. }
  67. EXPORT_SYMBOL(_read_lock);
  68. unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
  69. {
  70. unsigned long flags;
  71. local_irq_save(flags);
  72. preempt_disable();
  73. spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
  74. /*
  75. * On lockdep we dont want the hand-coded irq-enable of
  76. * _raw_spin_lock_flags() code, because lockdep assumes
  77. * that interrupts are not re-enabled during lock-acquire:
  78. */
  79. #ifdef CONFIG_LOCKDEP
  80. LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
  81. #else
  82. _raw_spin_lock_flags(lock, &flags);
  83. #endif
  84. return flags;
  85. }
  86. EXPORT_SYMBOL(_spin_lock_irqsave);
  87. void __lockfunc _spin_lock_irq(spinlock_t *lock)
  88. {
  89. local_irq_disable();
  90. preempt_disable();
  91. spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
  92. LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
  93. }
  94. EXPORT_SYMBOL(_spin_lock_irq);
  95. void __lockfunc _spin_lock_bh(spinlock_t *lock)
  96. {
  97. local_bh_disable();
  98. preempt_disable();
  99. spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
  100. LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
  101. }
  102. EXPORT_SYMBOL(_spin_lock_bh);
  103. unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
  104. {
  105. unsigned long flags;
  106. local_irq_save(flags);
  107. preempt_disable();
  108. rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
  109. LOCK_CONTENDED_FLAGS(lock, _raw_read_trylock, _raw_read_lock,
  110. _raw_read_lock_flags, &flags);
  111. return flags;
  112. }
  113. EXPORT_SYMBOL(_read_lock_irqsave);
  114. void __lockfunc _read_lock_irq(rwlock_t *lock)
  115. {
  116. local_irq_disable();
  117. preempt_disable();
  118. rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
  119. LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
  120. }
  121. EXPORT_SYMBOL(_read_lock_irq);
  122. void __lockfunc _read_lock_bh(rwlock_t *lock)
  123. {
  124. local_bh_disable();
  125. preempt_disable();
  126. rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
  127. LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
  128. }
  129. EXPORT_SYMBOL(_read_lock_bh);
  130. unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
  131. {
  132. unsigned long flags;
  133. local_irq_save(flags);
  134. preempt_disable();
  135. rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
  136. LOCK_CONTENDED_FLAGS(lock, _raw_write_trylock, _raw_write_lock,
  137. _raw_write_lock_flags, &flags);
  138. return flags;
  139. }
  140. EXPORT_SYMBOL(_write_lock_irqsave);
  141. void __lockfunc _write_lock_irq(rwlock_t *lock)
  142. {
  143. local_irq_disable();
  144. preempt_disable();
  145. rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
  146. LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
  147. }
  148. EXPORT_SYMBOL(_write_lock_irq);
  149. void __lockfunc _write_lock_bh(rwlock_t *lock)
  150. {
  151. local_bh_disable();
  152. preempt_disable();
  153. rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
  154. LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
  155. }
  156. EXPORT_SYMBOL(_write_lock_bh);
  157. void __lockfunc _spin_lock(spinlock_t *lock)
  158. {
  159. preempt_disable();
  160. spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
  161. LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
  162. }
  163. EXPORT_SYMBOL(_spin_lock);
  164. void __lockfunc _write_lock(rwlock_t *lock)
  165. {
  166. preempt_disable();
  167. rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
  168. LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
  169. }
  170. EXPORT_SYMBOL(_write_lock);
  171. #else /* CONFIG_PREEMPT: */
  172. /*
  173. * This could be a long-held lock. We both prepare to spin for a long
  174. * time (making _this_ CPU preemptable if possible), and we also signal
  175. * towards that other CPU that it should break the lock ASAP.
  176. *
  177. * (We do this in a function because inlining it would be excessive.)
  178. */
  179. #define BUILD_LOCK_OPS(op, locktype) \
  180. void __lockfunc _##op##_lock(locktype##_t *lock) \
  181. { \
  182. for (;;) { \
  183. preempt_disable(); \
  184. if (likely(_raw_##op##_trylock(lock))) \
  185. break; \
  186. preempt_enable(); \
  187. \
  188. if (!(lock)->break_lock) \
  189. (lock)->break_lock = 1; \
  190. while (!op##_can_lock(lock) && (lock)->break_lock) \
  191. _raw_##op##_relax(&lock->raw_lock); \
  192. } \
  193. (lock)->break_lock = 0; \
  194. } \
  195. \
  196. EXPORT_SYMBOL(_##op##_lock); \
  197. \
  198. unsigned long __lockfunc _##op##_lock_irqsave(locktype##_t *lock) \
  199. { \
  200. unsigned long flags; \
  201. \
  202. for (;;) { \
  203. preempt_disable(); \
  204. local_irq_save(flags); \
  205. if (likely(_raw_##op##_trylock(lock))) \
  206. break; \
  207. local_irq_restore(flags); \
  208. preempt_enable(); \
  209. \
  210. if (!(lock)->break_lock) \
  211. (lock)->break_lock = 1; \
  212. while (!op##_can_lock(lock) && (lock)->break_lock) \
  213. _raw_##op##_relax(&lock->raw_lock); \
  214. } \
  215. (lock)->break_lock = 0; \
  216. return flags; \
  217. } \
  218. \
  219. EXPORT_SYMBOL(_##op##_lock_irqsave); \
  220. \
  221. void __lockfunc _##op##_lock_irq(locktype##_t *lock) \
  222. { \
  223. _##op##_lock_irqsave(lock); \
  224. } \
  225. \
  226. EXPORT_SYMBOL(_##op##_lock_irq); \
  227. \
  228. void __lockfunc _##op##_lock_bh(locktype##_t *lock) \
  229. { \
  230. unsigned long flags; \
  231. \
  232. /* */ \
  233. /* Careful: we must exclude softirqs too, hence the */ \
  234. /* irq-disabling. We use the generic preemption-aware */ \
  235. /* function: */ \
  236. /**/ \
  237. flags = _##op##_lock_irqsave(lock); \
  238. local_bh_disable(); \
  239. local_irq_restore(flags); \
  240. } \
  241. \
  242. EXPORT_SYMBOL(_##op##_lock_bh)
  243. /*
  244. * Build preemption-friendly versions of the following
  245. * lock-spinning functions:
  246. *
  247. * _[spin|read|write]_lock()
  248. * _[spin|read|write]_lock_irq()
  249. * _[spin|read|write]_lock_irqsave()
  250. * _[spin|read|write]_lock_bh()
  251. */
  252. BUILD_LOCK_OPS(spin, spinlock);
  253. BUILD_LOCK_OPS(read, rwlock);
  254. BUILD_LOCK_OPS(write, rwlock);
  255. #endif /* CONFIG_PREEMPT */
  256. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  257. void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass)
  258. {
  259. preempt_disable();
  260. spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
  261. LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
  262. }
  263. EXPORT_SYMBOL(_spin_lock_nested);
  264. unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass)
  265. {
  266. unsigned long flags;
  267. local_irq_save(flags);
  268. preempt_disable();
  269. spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
  270. LOCK_CONTENDED_FLAGS(lock, _raw_spin_trylock, _raw_spin_lock,
  271. _raw_spin_lock_flags, &flags);
  272. return flags;
  273. }
  274. EXPORT_SYMBOL(_spin_lock_irqsave_nested);
  275. void __lockfunc _spin_lock_nest_lock(spinlock_t *lock,
  276. struct lockdep_map *nest_lock)
  277. {
  278. preempt_disable();
  279. spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_);
  280. LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
  281. }
  282. EXPORT_SYMBOL(_spin_lock_nest_lock);
  283. #endif
  284. void __lockfunc _spin_unlock(spinlock_t *lock)
  285. {
  286. spin_release(&lock->dep_map, 1, _RET_IP_);
  287. _raw_spin_unlock(lock);
  288. preempt_enable();
  289. }
  290. EXPORT_SYMBOL(_spin_unlock);
  291. void __lockfunc _write_unlock(rwlock_t *lock)
  292. {
  293. rwlock_release(&lock->dep_map, 1, _RET_IP_);
  294. _raw_write_unlock(lock);
  295. preempt_enable();
  296. }
  297. EXPORT_SYMBOL(_write_unlock);
  298. void __lockfunc _read_unlock(rwlock_t *lock)
  299. {
  300. rwlock_release(&lock->dep_map, 1, _RET_IP_);
  301. _raw_read_unlock(lock);
  302. preempt_enable();
  303. }
  304. EXPORT_SYMBOL(_read_unlock);
  305. void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
  306. {
  307. spin_release(&lock->dep_map, 1, _RET_IP_);
  308. _raw_spin_unlock(lock);
  309. local_irq_restore(flags);
  310. preempt_enable();
  311. }
  312. EXPORT_SYMBOL(_spin_unlock_irqrestore);
  313. void __lockfunc _spin_unlock_irq(spinlock_t *lock)
  314. {
  315. spin_release(&lock->dep_map, 1, _RET_IP_);
  316. _raw_spin_unlock(lock);
  317. local_irq_enable();
  318. preempt_enable();
  319. }
  320. EXPORT_SYMBOL(_spin_unlock_irq);
  321. void __lockfunc _spin_unlock_bh(spinlock_t *lock)
  322. {
  323. spin_release(&lock->dep_map, 1, _RET_IP_);
  324. _raw_spin_unlock(lock);
  325. preempt_enable_no_resched();
  326. local_bh_enable_ip((unsigned long)__builtin_return_address(0));
  327. }
  328. EXPORT_SYMBOL(_spin_unlock_bh);
  329. void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
  330. {
  331. rwlock_release(&lock->dep_map, 1, _RET_IP_);
  332. _raw_read_unlock(lock);
  333. local_irq_restore(flags);
  334. preempt_enable();
  335. }
  336. EXPORT_SYMBOL(_read_unlock_irqrestore);
  337. void __lockfunc _read_unlock_irq(rwlock_t *lock)
  338. {
  339. rwlock_release(&lock->dep_map, 1, _RET_IP_);
  340. _raw_read_unlock(lock);
  341. local_irq_enable();
  342. preempt_enable();
  343. }
  344. EXPORT_SYMBOL(_read_unlock_irq);
  345. void __lockfunc _read_unlock_bh(rwlock_t *lock)
  346. {
  347. rwlock_release(&lock->dep_map, 1, _RET_IP_);
  348. _raw_read_unlock(lock);
  349. preempt_enable_no_resched();
  350. local_bh_enable_ip((unsigned long)__builtin_return_address(0));
  351. }
  352. EXPORT_SYMBOL(_read_unlock_bh);
  353. void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
  354. {
  355. rwlock_release(&lock->dep_map, 1, _RET_IP_);
  356. _raw_write_unlock(lock);
  357. local_irq_restore(flags);
  358. preempt_enable();
  359. }
  360. EXPORT_SYMBOL(_write_unlock_irqrestore);
  361. void __lockfunc _write_unlock_irq(rwlock_t *lock)
  362. {
  363. rwlock_release(&lock->dep_map, 1, _RET_IP_);
  364. _raw_write_unlock(lock);
  365. local_irq_enable();
  366. preempt_enable();
  367. }
  368. EXPORT_SYMBOL(_write_unlock_irq);
  369. void __lockfunc _write_unlock_bh(rwlock_t *lock)
  370. {
  371. rwlock_release(&lock->dep_map, 1, _RET_IP_);
  372. _raw_write_unlock(lock);
  373. preempt_enable_no_resched();
  374. local_bh_enable_ip((unsigned long)__builtin_return_address(0));
  375. }
  376. EXPORT_SYMBOL(_write_unlock_bh);
  377. int __lockfunc _spin_trylock_bh(spinlock_t *lock)
  378. {
  379. local_bh_disable();
  380. preempt_disable();
  381. if (_raw_spin_trylock(lock)) {
  382. spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
  383. return 1;
  384. }
  385. preempt_enable_no_resched();
  386. local_bh_enable_ip((unsigned long)__builtin_return_address(0));
  387. return 0;
  388. }
  389. EXPORT_SYMBOL(_spin_trylock_bh);
  390. notrace int in_lock_functions(unsigned long addr)
  391. {
  392. /* Linker adds these: start and end of __lockfunc functions */
  393. extern char __lock_text_start[], __lock_text_end[];
  394. return addr >= (unsigned long)__lock_text_start
  395. && addr < (unsigned long)__lock_text_end;
  396. }
  397. EXPORT_SYMBOL(in_lock_functions);