spinlock_api_smp.h 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457
  1. #ifndef __LINUX_SPINLOCK_API_SMP_H
  2. #define __LINUX_SPINLOCK_API_SMP_H
  3. #ifndef __LINUX_SPINLOCK_H
  4. # error "please don't include this file directly"
  5. #endif
  6. /*
  7. * include/linux/spinlock_api_smp.h
  8. *
  9. * spinlock API declarations on SMP (and debug)
  10. * (implemented in kernel/spinlock.c)
  11. *
  12. * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
  13. * Released under the General Public License (GPL).
  14. */
  15. int in_lock_functions(unsigned long addr);
  16. #define assert_spin_locked(x) BUG_ON(!spin_is_locked(x))
  17. void __lockfunc _spin_lock(spinlock_t *lock) __acquires(lock);
  18. void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass)
  19. __acquires(lock);
  20. void __lockfunc _spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *map)
  21. __acquires(lock);
  22. void __lockfunc _read_lock(rwlock_t *lock) __acquires(lock);
  23. void __lockfunc _write_lock(rwlock_t *lock) __acquires(lock);
  24. void __lockfunc _spin_lock_bh(spinlock_t *lock) __acquires(lock);
  25. void __lockfunc _read_lock_bh(rwlock_t *lock) __acquires(lock);
  26. void __lockfunc _write_lock_bh(rwlock_t *lock) __acquires(lock);
  27. void __lockfunc _spin_lock_irq(spinlock_t *lock) __acquires(lock);
  28. void __lockfunc _read_lock_irq(rwlock_t *lock) __acquires(lock);
  29. void __lockfunc _write_lock_irq(rwlock_t *lock) __acquires(lock);
  30. unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
  31. __acquires(lock);
  32. unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass)
  33. __acquires(lock);
  34. unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
  35. __acquires(lock);
  36. unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
  37. __acquires(lock);
  38. int __lockfunc _spin_trylock(spinlock_t *lock);
  39. int __lockfunc _read_trylock(rwlock_t *lock);
  40. int __lockfunc _write_trylock(rwlock_t *lock);
  41. int __lockfunc _spin_trylock_bh(spinlock_t *lock);
  42. void __lockfunc _spin_unlock(spinlock_t *lock) __releases(lock);
  43. void __lockfunc _read_unlock(rwlock_t *lock) __releases(lock);
  44. void __lockfunc _write_unlock(rwlock_t *lock) __releases(lock);
  45. void __lockfunc _spin_unlock_bh(spinlock_t *lock) __releases(lock);
  46. void __lockfunc _read_unlock_bh(rwlock_t *lock) __releases(lock);
  47. void __lockfunc _write_unlock_bh(rwlock_t *lock) __releases(lock);
  48. void __lockfunc _spin_unlock_irq(spinlock_t *lock) __releases(lock);
  49. void __lockfunc _read_unlock_irq(rwlock_t *lock) __releases(lock);
  50. void __lockfunc _write_unlock_irq(rwlock_t *lock) __releases(lock);
  51. void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
  52. __releases(lock);
  53. void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
  54. __releases(lock);
  55. void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
  56. __releases(lock);
  57. /*
  58. * We inline the unlock functions in the nondebug case:
  59. */
  60. #if !defined(CONFIG_DEBUG_SPINLOCK) && !defined(CONFIG_PREEMPT)
  61. #define __always_inline__spin_unlock
  62. #define __always_inline__read_unlock
  63. #define __always_inline__write_unlock
  64. #define __always_inline__spin_unlock_irq
  65. #define __always_inline__read_unlock_irq
  66. #define __always_inline__write_unlock_irq
  67. #endif
  68. #ifndef CONFIG_DEBUG_SPINLOCK
  69. #ifndef CONFIG_GENERIC_LOCKBREAK
  70. #ifdef __always_inline__spin_lock
  71. #define _spin_lock(lock) __spin_lock(lock)
  72. #endif
  73. #ifdef __always_inline__read_lock
  74. #define _read_lock(lock) __read_lock(lock)
  75. #endif
  76. #ifdef __always_inline__write_lock
  77. #define _write_lock(lock) __write_lock(lock)
  78. #endif
  79. #ifdef __always_inline__spin_lock_bh
  80. #define _spin_lock_bh(lock) __spin_lock_bh(lock)
  81. #endif
  82. #ifdef __always_inline__read_lock_bh
  83. #define _read_lock_bh(lock) __read_lock_bh(lock)
  84. #endif
  85. #ifdef __always_inline__write_lock_bh
  86. #define _write_lock_bh(lock) __write_lock_bh(lock)
  87. #endif
  88. #ifdef __always_inline__spin_lock_irq
  89. #define _spin_lock_irq(lock) __spin_lock_irq(lock)
  90. #endif
  91. #ifdef __always_inline__read_lock_irq
  92. #define _read_lock_irq(lock) __read_lock_irq(lock)
  93. #endif
  94. #ifdef __always_inline__write_lock_irq
  95. #define _write_lock_irq(lock) __write_lock_irq(lock)
  96. #endif
  97. #ifdef __always_inline__spin_lock_irqsave
  98. #define _spin_lock_irqsave(lock) __spin_lock_irqsave(lock)
  99. #endif
  100. #ifdef __always_inline__read_lock_irqsave
  101. #define _read_lock_irqsave(lock) __read_lock_irqsave(lock)
  102. #endif
  103. #ifdef __always_inline__write_lock_irqsave
  104. #define _write_lock_irqsave(lock) __write_lock_irqsave(lock)
  105. #endif
  106. #endif /* !CONFIG_GENERIC_LOCKBREAK */
  107. #ifdef __always_inline__spin_trylock
  108. #define _spin_trylock(lock) __spin_trylock(lock)
  109. #endif
  110. #ifdef __always_inline__read_trylock
  111. #define _read_trylock(lock) __read_trylock(lock)
  112. #endif
  113. #ifdef __always_inline__write_trylock
  114. #define _write_trylock(lock) __write_trylock(lock)
  115. #endif
  116. #ifdef __always_inline__spin_trylock_bh
  117. #define _spin_trylock_bh(lock) __spin_trylock_bh(lock)
  118. #endif
  119. #ifdef __always_inline__spin_unlock
  120. #define _spin_unlock(lock) __spin_unlock(lock)
  121. #endif
  122. #ifdef __always_inline__read_unlock
  123. #define _read_unlock(lock) __read_unlock(lock)
  124. #endif
  125. #ifdef __always_inline__write_unlock
  126. #define _write_unlock(lock) __write_unlock(lock)
  127. #endif
  128. #ifdef __always_inline__spin_unlock_bh
  129. #define _spin_unlock_bh(lock) __spin_unlock_bh(lock)
  130. #endif
  131. #ifdef __always_inline__read_unlock_bh
  132. #define _read_unlock_bh(lock) __read_unlock_bh(lock)
  133. #endif
  134. #ifdef __always_inline__write_unlock_bh
  135. #define _write_unlock_bh(lock) __write_unlock_bh(lock)
  136. #endif
  137. #ifdef __always_inline__spin_unlock_irq
  138. #define _spin_unlock_irq(lock) __spin_unlock_irq(lock)
  139. #endif
  140. #ifdef __always_inline__read_unlock_irq
  141. #define _read_unlock_irq(lock) __read_unlock_irq(lock)
  142. #endif
  143. #ifdef __always_inline__write_unlock_irq
  144. #define _write_unlock_irq(lock) __write_unlock_irq(lock)
  145. #endif
  146. #ifdef __always_inline__spin_unlock_irqrestore
  147. #define _spin_unlock_irqrestore(lock, flags) __spin_unlock_irqrestore(lock, flags)
  148. #endif
  149. #ifdef __always_inline__read_unlock_irqrestore
  150. #define _read_unlock_irqrestore(lock, flags) __read_unlock_irqrestore(lock, flags)
  151. #endif
  152. #ifdef __always_inline__write_unlock_irqrestore
  153. #define _write_unlock_irqrestore(lock, flags) __write_unlock_irqrestore(lock, flags)
  154. #endif
  155. #endif /* CONFIG_DEBUG_SPINLOCK */
  156. static inline int __spin_trylock(spinlock_t *lock)
  157. {
  158. preempt_disable();
  159. if (_raw_spin_trylock(lock)) {
  160. spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
  161. return 1;
  162. }
  163. preempt_enable();
  164. return 0;
  165. }
  166. static inline int __read_trylock(rwlock_t *lock)
  167. {
  168. preempt_disable();
  169. if (_raw_read_trylock(lock)) {
  170. rwlock_acquire_read(&lock->dep_map, 0, 1, _RET_IP_);
  171. return 1;
  172. }
  173. preempt_enable();
  174. return 0;
  175. }
  176. static inline int __write_trylock(rwlock_t *lock)
  177. {
  178. preempt_disable();
  179. if (_raw_write_trylock(lock)) {
  180. rwlock_acquire(&lock->dep_map, 0, 1, _RET_IP_);
  181. return 1;
  182. }
  183. preempt_enable();
  184. return 0;
  185. }
  186. /*
  187. * If lockdep is enabled then we use the non-preemption spin-ops
  188. * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
  189. * not re-enabled during lock-acquire (which the preempt-spin-ops do):
  190. */
  191. #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
  192. static inline void __read_lock(rwlock_t *lock)
  193. {
  194. preempt_disable();
  195. rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
  196. LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
  197. }
  198. static inline unsigned long __spin_lock_irqsave(spinlock_t *lock)
  199. {
  200. unsigned long flags;
  201. local_irq_save(flags);
  202. preempt_disable();
  203. spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
  204. /*
  205. * On lockdep we dont want the hand-coded irq-enable of
  206. * _raw_spin_lock_flags() code, because lockdep assumes
  207. * that interrupts are not re-enabled during lock-acquire:
  208. */
  209. #ifdef CONFIG_LOCKDEP
  210. LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
  211. #else
  212. _raw_spin_lock_flags(lock, &flags);
  213. #endif
  214. return flags;
  215. }
  216. static inline void __spin_lock_irq(spinlock_t *lock)
  217. {
  218. local_irq_disable();
  219. preempt_disable();
  220. spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
  221. LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
  222. }
  223. static inline void __spin_lock_bh(spinlock_t *lock)
  224. {
  225. local_bh_disable();
  226. preempt_disable();
  227. spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
  228. LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
  229. }
  230. static inline unsigned long __read_lock_irqsave(rwlock_t *lock)
  231. {
  232. unsigned long flags;
  233. local_irq_save(flags);
  234. preempt_disable();
  235. rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
  236. LOCK_CONTENDED_FLAGS(lock, _raw_read_trylock, _raw_read_lock,
  237. _raw_read_lock_flags, &flags);
  238. return flags;
  239. }
  240. static inline void __read_lock_irq(rwlock_t *lock)
  241. {
  242. local_irq_disable();
  243. preempt_disable();
  244. rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
  245. LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
  246. }
  247. static inline void __read_lock_bh(rwlock_t *lock)
  248. {
  249. local_bh_disable();
  250. preempt_disable();
  251. rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
  252. LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
  253. }
  254. static inline unsigned long __write_lock_irqsave(rwlock_t *lock)
  255. {
  256. unsigned long flags;
  257. local_irq_save(flags);
  258. preempt_disable();
  259. rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
  260. LOCK_CONTENDED_FLAGS(lock, _raw_write_trylock, _raw_write_lock,
  261. _raw_write_lock_flags, &flags);
  262. return flags;
  263. }
  264. static inline void __write_lock_irq(rwlock_t *lock)
  265. {
  266. local_irq_disable();
  267. preempt_disable();
  268. rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
  269. LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
  270. }
  271. static inline void __write_lock_bh(rwlock_t *lock)
  272. {
  273. local_bh_disable();
  274. preempt_disable();
  275. rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
  276. LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
  277. }
  278. static inline void __spin_lock(spinlock_t *lock)
  279. {
  280. preempt_disable();
  281. spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
  282. LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
  283. }
  284. static inline void __write_lock(rwlock_t *lock)
  285. {
  286. preempt_disable();
  287. rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
  288. LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
  289. }
  290. #endif /* CONFIG_PREEMPT */
  291. static inline void __spin_unlock(spinlock_t *lock)
  292. {
  293. spin_release(&lock->dep_map, 1, _RET_IP_);
  294. _raw_spin_unlock(lock);
  295. preempt_enable();
  296. }
  297. static inline void __write_unlock(rwlock_t *lock)
  298. {
  299. rwlock_release(&lock->dep_map, 1, _RET_IP_);
  300. _raw_write_unlock(lock);
  301. preempt_enable();
  302. }
  303. static inline void __read_unlock(rwlock_t *lock)
  304. {
  305. rwlock_release(&lock->dep_map, 1, _RET_IP_);
  306. _raw_read_unlock(lock);
  307. preempt_enable();
  308. }
  309. static inline void __spin_unlock_irqrestore(spinlock_t *lock,
  310. unsigned long flags)
  311. {
  312. spin_release(&lock->dep_map, 1, _RET_IP_);
  313. _raw_spin_unlock(lock);
  314. local_irq_restore(flags);
  315. preempt_enable();
  316. }
  317. static inline void __spin_unlock_irq(spinlock_t *lock)
  318. {
  319. spin_release(&lock->dep_map, 1, _RET_IP_);
  320. _raw_spin_unlock(lock);
  321. local_irq_enable();
  322. preempt_enable();
  323. }
  324. static inline void __spin_unlock_bh(spinlock_t *lock)
  325. {
  326. spin_release(&lock->dep_map, 1, _RET_IP_);
  327. _raw_spin_unlock(lock);
  328. preempt_enable_no_resched();
  329. local_bh_enable_ip((unsigned long)__builtin_return_address(0));
  330. }
  331. static inline void __read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
  332. {
  333. rwlock_release(&lock->dep_map, 1, _RET_IP_);
  334. _raw_read_unlock(lock);
  335. local_irq_restore(flags);
  336. preempt_enable();
  337. }
  338. static inline void __read_unlock_irq(rwlock_t *lock)
  339. {
  340. rwlock_release(&lock->dep_map, 1, _RET_IP_);
  341. _raw_read_unlock(lock);
  342. local_irq_enable();
  343. preempt_enable();
  344. }
  345. static inline void __read_unlock_bh(rwlock_t *lock)
  346. {
  347. rwlock_release(&lock->dep_map, 1, _RET_IP_);
  348. _raw_read_unlock(lock);
  349. preempt_enable_no_resched();
  350. local_bh_enable_ip((unsigned long)__builtin_return_address(0));
  351. }
  352. static inline void __write_unlock_irqrestore(rwlock_t *lock,
  353. unsigned long flags)
  354. {
  355. rwlock_release(&lock->dep_map, 1, _RET_IP_);
  356. _raw_write_unlock(lock);
  357. local_irq_restore(flags);
  358. preempt_enable();
  359. }
  360. static inline void __write_unlock_irq(rwlock_t *lock)
  361. {
  362. rwlock_release(&lock->dep_map, 1, _RET_IP_);
  363. _raw_write_unlock(lock);
  364. local_irq_enable();
  365. preempt_enable();
  366. }
  367. static inline void __write_unlock_bh(rwlock_t *lock)
  368. {
  369. rwlock_release(&lock->dep_map, 1, _RET_IP_);
  370. _raw_write_unlock(lock);
  371. preempt_enable_no_resched();
  372. local_bh_enable_ip((unsigned long)__builtin_return_address(0));
  373. }
  374. static inline int __spin_trylock_bh(spinlock_t *lock)
  375. {
  376. local_bh_disable();
  377. preempt_disable();
  378. if (_raw_spin_trylock(lock)) {
  379. spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
  380. return 1;
  381. }
  382. preempt_enable_no_resched();
  383. local_bh_enable_ip((unsigned long)__builtin_return_address(0));
  384. return 0;
  385. }
  386. #endif /* __LINUX_SPINLOCK_API_SMP_H */