spinlock.c 7.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371
  1. /*
  2. * Copyright (2004) Linus Torvalds
  3. *
  4. * Author: Zwane Mwaikambo <zwane@fsmlabs.com>
  5. *
  6. * Copyright (2004) Ingo Molnar
  7. */
  8. #include <linux/config.h>
  9. #include <linux/linkage.h>
  10. #include <linux/preempt.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/module.h>
  14. /*
  15. * Generic declaration of the raw read_trylock() function,
  16. * architectures are supposed to optimize this:
  17. */
  18. int __lockfunc generic_raw_read_trylock(rwlock_t *lock)
  19. {
  20. _raw_read_lock(lock);
  21. return 1;
  22. }
  23. EXPORT_SYMBOL(generic_raw_read_trylock);
  24. int __lockfunc _spin_trylock(spinlock_t *lock)
  25. {
  26. preempt_disable();
  27. if (_raw_spin_trylock(lock))
  28. return 1;
  29. preempt_enable();
  30. return 0;
  31. }
  32. EXPORT_SYMBOL(_spin_trylock);
  33. int __lockfunc _read_trylock(rwlock_t *lock)
  34. {
  35. preempt_disable();
  36. if (_raw_read_trylock(lock))
  37. return 1;
  38. preempt_enable();
  39. return 0;
  40. }
  41. EXPORT_SYMBOL(_read_trylock);
  42. int __lockfunc _write_trylock(rwlock_t *lock)
  43. {
  44. preempt_disable();
  45. if (_raw_write_trylock(lock))
  46. return 1;
  47. preempt_enable();
  48. return 0;
  49. }
  50. EXPORT_SYMBOL(_write_trylock);
  51. #ifndef CONFIG_PREEMPT
  52. void __lockfunc _read_lock(rwlock_t *lock)
  53. {
  54. preempt_disable();
  55. _raw_read_lock(lock);
  56. }
  57. EXPORT_SYMBOL(_read_lock);
  58. unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
  59. {
  60. unsigned long flags;
  61. local_irq_save(flags);
  62. preempt_disable();
  63. _raw_spin_lock_flags(lock, flags);
  64. return flags;
  65. }
  66. EXPORT_SYMBOL(_spin_lock_irqsave);
  67. void __lockfunc _spin_lock_irq(spinlock_t *lock)
  68. {
  69. local_irq_disable();
  70. preempt_disable();
  71. _raw_spin_lock(lock);
  72. }
  73. EXPORT_SYMBOL(_spin_lock_irq);
  74. void __lockfunc _spin_lock_bh(spinlock_t *lock)
  75. {
  76. local_bh_disable();
  77. preempt_disable();
  78. _raw_spin_lock(lock);
  79. }
  80. EXPORT_SYMBOL(_spin_lock_bh);
  81. unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
  82. {
  83. unsigned long flags;
  84. local_irq_save(flags);
  85. preempt_disable();
  86. _raw_read_lock(lock);
  87. return flags;
  88. }
  89. EXPORT_SYMBOL(_read_lock_irqsave);
  90. void __lockfunc _read_lock_irq(rwlock_t *lock)
  91. {
  92. local_irq_disable();
  93. preempt_disable();
  94. _raw_read_lock(lock);
  95. }
  96. EXPORT_SYMBOL(_read_lock_irq);
  97. void __lockfunc _read_lock_bh(rwlock_t *lock)
  98. {
  99. local_bh_disable();
  100. preempt_disable();
  101. _raw_read_lock(lock);
  102. }
  103. EXPORT_SYMBOL(_read_lock_bh);
  104. unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
  105. {
  106. unsigned long flags;
  107. local_irq_save(flags);
  108. preempt_disable();
  109. _raw_write_lock(lock);
  110. return flags;
  111. }
  112. EXPORT_SYMBOL(_write_lock_irqsave);
  113. void __lockfunc _write_lock_irq(rwlock_t *lock)
  114. {
  115. local_irq_disable();
  116. preempt_disable();
  117. _raw_write_lock(lock);
  118. }
  119. EXPORT_SYMBOL(_write_lock_irq);
  120. void __lockfunc _write_lock_bh(rwlock_t *lock)
  121. {
  122. local_bh_disable();
  123. preempt_disable();
  124. _raw_write_lock(lock);
  125. }
  126. EXPORT_SYMBOL(_write_lock_bh);
  127. void __lockfunc _spin_lock(spinlock_t *lock)
  128. {
  129. preempt_disable();
  130. _raw_spin_lock(lock);
  131. }
  132. EXPORT_SYMBOL(_spin_lock);
  133. void __lockfunc _write_lock(rwlock_t *lock)
  134. {
  135. preempt_disable();
  136. _raw_write_lock(lock);
  137. }
  138. EXPORT_SYMBOL(_write_lock);
  139. #else /* CONFIG_PREEMPT: */
  140. /*
  141. * This could be a long-held lock. We both prepare to spin for a long
  142. * time (making _this_ CPU preemptable if possible), and we also signal
  143. * towards that other CPU that it should break the lock ASAP.
  144. *
  145. * (We do this in a function because inlining it would be excessive.)
  146. */
  147. #define BUILD_LOCK_OPS(op, locktype) \
  148. void __lockfunc _##op##_lock(locktype##_t *lock) \
  149. { \
  150. preempt_disable(); \
  151. for (;;) { \
  152. if (likely(_raw_##op##_trylock(lock))) \
  153. break; \
  154. preempt_enable(); \
  155. if (!(lock)->break_lock) \
  156. (lock)->break_lock = 1; \
  157. while (!op##_can_lock(lock) && (lock)->break_lock) \
  158. cpu_relax(); \
  159. preempt_disable(); \
  160. } \
  161. (lock)->break_lock = 0; \
  162. } \
  163. \
  164. EXPORT_SYMBOL(_##op##_lock); \
  165. \
  166. unsigned long __lockfunc _##op##_lock_irqsave(locktype##_t *lock) \
  167. { \
  168. unsigned long flags; \
  169. \
  170. preempt_disable(); \
  171. for (;;) { \
  172. local_irq_save(flags); \
  173. if (likely(_raw_##op##_trylock(lock))) \
  174. break; \
  175. local_irq_restore(flags); \
  176. \
  177. preempt_enable(); \
  178. if (!(lock)->break_lock) \
  179. (lock)->break_lock = 1; \
  180. while (!op##_can_lock(lock) && (lock)->break_lock) \
  181. cpu_relax(); \
  182. preempt_disable(); \
  183. } \
  184. (lock)->break_lock = 0; \
  185. return flags; \
  186. } \
  187. \
  188. EXPORT_SYMBOL(_##op##_lock_irqsave); \
  189. \
  190. void __lockfunc _##op##_lock_irq(locktype##_t *lock) \
  191. { \
  192. _##op##_lock_irqsave(lock); \
  193. } \
  194. \
  195. EXPORT_SYMBOL(_##op##_lock_irq); \
  196. \
  197. void __lockfunc _##op##_lock_bh(locktype##_t *lock) \
  198. { \
  199. unsigned long flags; \
  200. \
  201. /* */ \
  202. /* Careful: we must exclude softirqs too, hence the */ \
  203. /* irq-disabling. We use the generic preemption-aware */ \
  204. /* function: */ \
  205. /**/ \
  206. flags = _##op##_lock_irqsave(lock); \
  207. local_bh_disable(); \
  208. local_irq_restore(flags); \
  209. } \
  210. \
  211. EXPORT_SYMBOL(_##op##_lock_bh)
  212. /*
  213. * Build preemption-friendly versions of the following
  214. * lock-spinning functions:
  215. *
  216. * _[spin|read|write]_lock()
  217. * _[spin|read|write]_lock_irq()
  218. * _[spin|read|write]_lock_irqsave()
  219. * _[spin|read|write]_lock_bh()
  220. */
  221. BUILD_LOCK_OPS(spin, spinlock);
  222. BUILD_LOCK_OPS(read, rwlock);
  223. BUILD_LOCK_OPS(write, rwlock);
  224. #endif /* CONFIG_PREEMPT */
  225. void __lockfunc _spin_unlock(spinlock_t *lock)
  226. {
  227. _raw_spin_unlock(lock);
  228. preempt_enable();
  229. }
  230. EXPORT_SYMBOL(_spin_unlock);
  231. void __lockfunc _write_unlock(rwlock_t *lock)
  232. {
  233. _raw_write_unlock(lock);
  234. preempt_enable();
  235. }
  236. EXPORT_SYMBOL(_write_unlock);
  237. void __lockfunc _read_unlock(rwlock_t *lock)
  238. {
  239. _raw_read_unlock(lock);
  240. preempt_enable();
  241. }
  242. EXPORT_SYMBOL(_read_unlock);
  243. void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
  244. {
  245. _raw_spin_unlock(lock);
  246. local_irq_restore(flags);
  247. preempt_enable();
  248. }
  249. EXPORT_SYMBOL(_spin_unlock_irqrestore);
  250. void __lockfunc _spin_unlock_irq(spinlock_t *lock)
  251. {
  252. _raw_spin_unlock(lock);
  253. local_irq_enable();
  254. preempt_enable();
  255. }
  256. EXPORT_SYMBOL(_spin_unlock_irq);
  257. void __lockfunc _spin_unlock_bh(spinlock_t *lock)
  258. {
  259. _raw_spin_unlock(lock);
  260. preempt_enable_no_resched();
  261. local_bh_enable();
  262. }
  263. EXPORT_SYMBOL(_spin_unlock_bh);
  264. void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
  265. {
  266. _raw_read_unlock(lock);
  267. local_irq_restore(flags);
  268. preempt_enable();
  269. }
  270. EXPORT_SYMBOL(_read_unlock_irqrestore);
  271. void __lockfunc _read_unlock_irq(rwlock_t *lock)
  272. {
  273. _raw_read_unlock(lock);
  274. local_irq_enable();
  275. preempt_enable();
  276. }
  277. EXPORT_SYMBOL(_read_unlock_irq);
  278. void __lockfunc _read_unlock_bh(rwlock_t *lock)
  279. {
  280. _raw_read_unlock(lock);
  281. preempt_enable_no_resched();
  282. local_bh_enable();
  283. }
  284. EXPORT_SYMBOL(_read_unlock_bh);
  285. void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
  286. {
  287. _raw_write_unlock(lock);
  288. local_irq_restore(flags);
  289. preempt_enable();
  290. }
  291. EXPORT_SYMBOL(_write_unlock_irqrestore);
  292. void __lockfunc _write_unlock_irq(rwlock_t *lock)
  293. {
  294. _raw_write_unlock(lock);
  295. local_irq_enable();
  296. preempt_enable();
  297. }
  298. EXPORT_SYMBOL(_write_unlock_irq);
  299. void __lockfunc _write_unlock_bh(rwlock_t *lock)
  300. {
  301. _raw_write_unlock(lock);
  302. preempt_enable_no_resched();
  303. local_bh_enable();
  304. }
  305. EXPORT_SYMBOL(_write_unlock_bh);
  306. int __lockfunc _spin_trylock_bh(spinlock_t *lock)
  307. {
  308. local_bh_disable();
  309. preempt_disable();
  310. if (_raw_spin_trylock(lock))
  311. return 1;
  312. preempt_enable_no_resched();
  313. local_bh_enable();
  314. return 0;
  315. }
  316. EXPORT_SYMBOL(_spin_trylock_bh);
  317. int in_lock_functions(unsigned long addr)
  318. {
  319. /* Linker adds these: start and end of __lockfunc functions */
  320. extern char __lock_text_start[], __lock_text_end[];
  321. return addr >= (unsigned long)__lock_text_start
  322. && addr < (unsigned long)__lock_text_end;
  323. }
  324. EXPORT_SYMBOL(in_lock_functions);