spinlock.c 7.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372
  1. /*
  2. * Copyright (2004) Linus Torvalds
  3. *
  4. * Author: Zwane Mwaikambo <zwane@fsmlabs.com>
  5. *
  6. * Copyright (2004, 2005) Ingo Molnar
  7. *
  8. * This file contains the spinlock/rwlock implementations for the
  9. * SMP and the DEBUG_SPINLOCK cases. (UP-nondebug inlines them)
  10. */
  11. #include <linux/linkage.h>
  12. #include <linux/preempt.h>
  13. #include <linux/spinlock.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/module.h>
  16. /*
  17. * Generic declaration of the raw read_trylock() function,
  18. * architectures are supposed to optimize this:
  19. */
  20. int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock)
  21. {
  22. __raw_read_lock(lock);
  23. return 1;
  24. }
  25. EXPORT_SYMBOL(generic__raw_read_trylock);
  26. int __lockfunc _spin_trylock(spinlock_t *lock)
  27. {
  28. preempt_disable();
  29. if (_raw_spin_trylock(lock))
  30. return 1;
  31. preempt_enable();
  32. return 0;
  33. }
  34. EXPORT_SYMBOL(_spin_trylock);
  35. int __lockfunc _read_trylock(rwlock_t *lock)
  36. {
  37. preempt_disable();
  38. if (_raw_read_trylock(lock))
  39. return 1;
  40. preempt_enable();
  41. return 0;
  42. }
  43. EXPORT_SYMBOL(_read_trylock);
  44. int __lockfunc _write_trylock(rwlock_t *lock)
  45. {
  46. preempt_disable();
  47. if (_raw_write_trylock(lock))
  48. return 1;
  49. preempt_enable();
  50. return 0;
  51. }
  52. EXPORT_SYMBOL(_write_trylock);
  53. #if !defined(CONFIG_PREEMPT) || !defined(CONFIG_SMP)
  54. void __lockfunc _read_lock(rwlock_t *lock)
  55. {
  56. preempt_disable();
  57. _raw_read_lock(lock);
  58. }
  59. EXPORT_SYMBOL(_read_lock);
  60. unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
  61. {
  62. unsigned long flags;
  63. local_irq_save(flags);
  64. preempt_disable();
  65. _raw_spin_lock_flags(lock, &flags);
  66. return flags;
  67. }
  68. EXPORT_SYMBOL(_spin_lock_irqsave);
  69. void __lockfunc _spin_lock_irq(spinlock_t *lock)
  70. {
  71. local_irq_disable();
  72. preempt_disable();
  73. _raw_spin_lock(lock);
  74. }
  75. EXPORT_SYMBOL(_spin_lock_irq);
  76. void __lockfunc _spin_lock_bh(spinlock_t *lock)
  77. {
  78. local_bh_disable();
  79. preempt_disable();
  80. _raw_spin_lock(lock);
  81. }
  82. EXPORT_SYMBOL(_spin_lock_bh);
  83. unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
  84. {
  85. unsigned long flags;
  86. local_irq_save(flags);
  87. preempt_disable();
  88. _raw_read_lock(lock);
  89. return flags;
  90. }
  91. EXPORT_SYMBOL(_read_lock_irqsave);
  92. void __lockfunc _read_lock_irq(rwlock_t *lock)
  93. {
  94. local_irq_disable();
  95. preempt_disable();
  96. _raw_read_lock(lock);
  97. }
  98. EXPORT_SYMBOL(_read_lock_irq);
  99. void __lockfunc _read_lock_bh(rwlock_t *lock)
  100. {
  101. local_bh_disable();
  102. preempt_disable();
  103. _raw_read_lock(lock);
  104. }
  105. EXPORT_SYMBOL(_read_lock_bh);
  106. unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
  107. {
  108. unsigned long flags;
  109. local_irq_save(flags);
  110. preempt_disable();
  111. _raw_write_lock(lock);
  112. return flags;
  113. }
  114. EXPORT_SYMBOL(_write_lock_irqsave);
  115. void __lockfunc _write_lock_irq(rwlock_t *lock)
  116. {
  117. local_irq_disable();
  118. preempt_disable();
  119. _raw_write_lock(lock);
  120. }
  121. EXPORT_SYMBOL(_write_lock_irq);
  122. void __lockfunc _write_lock_bh(rwlock_t *lock)
  123. {
  124. local_bh_disable();
  125. preempt_disable();
  126. _raw_write_lock(lock);
  127. }
  128. EXPORT_SYMBOL(_write_lock_bh);
  129. void __lockfunc _spin_lock(spinlock_t *lock)
  130. {
  131. preempt_disable();
  132. _raw_spin_lock(lock);
  133. }
  134. EXPORT_SYMBOL(_spin_lock);
  135. void __lockfunc _write_lock(rwlock_t *lock)
  136. {
  137. preempt_disable();
  138. _raw_write_lock(lock);
  139. }
  140. EXPORT_SYMBOL(_write_lock);
  141. #else /* CONFIG_PREEMPT: */
  142. /*
  143. * This could be a long-held lock. We both prepare to spin for a long
  144. * time (making _this_ CPU preemptable if possible), and we also signal
  145. * towards that other CPU that it should break the lock ASAP.
  146. *
  147. * (We do this in a function because inlining it would be excessive.)
  148. */
  149. #define BUILD_LOCK_OPS(op, locktype) \
  150. void __lockfunc _##op##_lock(locktype##_t *lock) \
  151. { \
  152. for (;;) { \
  153. preempt_disable(); \
  154. if (likely(_raw_##op##_trylock(lock))) \
  155. break; \
  156. preempt_enable(); \
  157. \
  158. if (!(lock)->break_lock) \
  159. (lock)->break_lock = 1; \
  160. while (!op##_can_lock(lock) && (lock)->break_lock) \
  161. cpu_relax(); \
  162. } \
  163. (lock)->break_lock = 0; \
  164. } \
  165. \
  166. EXPORT_SYMBOL(_##op##_lock); \
  167. \
  168. unsigned long __lockfunc _##op##_lock_irqsave(locktype##_t *lock) \
  169. { \
  170. unsigned long flags; \
  171. \
  172. for (;;) { \
  173. preempt_disable(); \
  174. local_irq_save(flags); \
  175. if (likely(_raw_##op##_trylock(lock))) \
  176. break; \
  177. local_irq_restore(flags); \
  178. preempt_enable(); \
  179. \
  180. if (!(lock)->break_lock) \
  181. (lock)->break_lock = 1; \
  182. while (!op##_can_lock(lock) && (lock)->break_lock) \
  183. cpu_relax(); \
  184. } \
  185. (lock)->break_lock = 0; \
  186. return flags; \
  187. } \
  188. \
  189. EXPORT_SYMBOL(_##op##_lock_irqsave); \
  190. \
  191. void __lockfunc _##op##_lock_irq(locktype##_t *lock) \
  192. { \
  193. _##op##_lock_irqsave(lock); \
  194. } \
  195. \
  196. EXPORT_SYMBOL(_##op##_lock_irq); \
  197. \
  198. void __lockfunc _##op##_lock_bh(locktype##_t *lock) \
  199. { \
  200. unsigned long flags; \
  201. \
  202. /* */ \
  203. /* Careful: we must exclude softirqs too, hence the */ \
  204. /* irq-disabling. We use the generic preemption-aware */ \
  205. /* function: */ \
  206. /**/ \
  207. flags = _##op##_lock_irqsave(lock); \
  208. local_bh_disable(); \
  209. local_irq_restore(flags); \
  210. } \
  211. \
  212. EXPORT_SYMBOL(_##op##_lock_bh)
  213. /*
  214. * Build preemption-friendly versions of the following
  215. * lock-spinning functions:
  216. *
  217. * _[spin|read|write]_lock()
  218. * _[spin|read|write]_lock_irq()
  219. * _[spin|read|write]_lock_irqsave()
  220. * _[spin|read|write]_lock_bh()
  221. */
  222. BUILD_LOCK_OPS(spin, spinlock);
  223. BUILD_LOCK_OPS(read, rwlock);
  224. BUILD_LOCK_OPS(write, rwlock);
  225. #endif /* CONFIG_PREEMPT */
  226. void __lockfunc _spin_unlock(spinlock_t *lock)
  227. {
  228. _raw_spin_unlock(lock);
  229. preempt_enable();
  230. }
  231. EXPORT_SYMBOL(_spin_unlock);
  232. void __lockfunc _write_unlock(rwlock_t *lock)
  233. {
  234. _raw_write_unlock(lock);
  235. preempt_enable();
  236. }
  237. EXPORT_SYMBOL(_write_unlock);
  238. void __lockfunc _read_unlock(rwlock_t *lock)
  239. {
  240. _raw_read_unlock(lock);
  241. preempt_enable();
  242. }
  243. EXPORT_SYMBOL(_read_unlock);
  244. void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
  245. {
  246. _raw_spin_unlock(lock);
  247. local_irq_restore(flags);
  248. preempt_enable();
  249. }
  250. EXPORT_SYMBOL(_spin_unlock_irqrestore);
  251. void __lockfunc _spin_unlock_irq(spinlock_t *lock)
  252. {
  253. _raw_spin_unlock(lock);
  254. local_irq_enable();
  255. preempt_enable();
  256. }
  257. EXPORT_SYMBOL(_spin_unlock_irq);
  258. void __lockfunc _spin_unlock_bh(spinlock_t *lock)
  259. {
  260. _raw_spin_unlock(lock);
  261. preempt_enable_no_resched();
  262. local_bh_enable();
  263. }
  264. EXPORT_SYMBOL(_spin_unlock_bh);
  265. void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
  266. {
  267. _raw_read_unlock(lock);
  268. local_irq_restore(flags);
  269. preempt_enable();
  270. }
  271. EXPORT_SYMBOL(_read_unlock_irqrestore);
  272. void __lockfunc _read_unlock_irq(rwlock_t *lock)
  273. {
  274. _raw_read_unlock(lock);
  275. local_irq_enable();
  276. preempt_enable();
  277. }
  278. EXPORT_SYMBOL(_read_unlock_irq);
  279. void __lockfunc _read_unlock_bh(rwlock_t *lock)
  280. {
  281. _raw_read_unlock(lock);
  282. preempt_enable_no_resched();
  283. local_bh_enable();
  284. }
  285. EXPORT_SYMBOL(_read_unlock_bh);
  286. void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
  287. {
  288. _raw_write_unlock(lock);
  289. local_irq_restore(flags);
  290. preempt_enable();
  291. }
  292. EXPORT_SYMBOL(_write_unlock_irqrestore);
  293. void __lockfunc _write_unlock_irq(rwlock_t *lock)
  294. {
  295. _raw_write_unlock(lock);
  296. local_irq_enable();
  297. preempt_enable();
  298. }
  299. EXPORT_SYMBOL(_write_unlock_irq);
  300. void __lockfunc _write_unlock_bh(rwlock_t *lock)
  301. {
  302. _raw_write_unlock(lock);
  303. preempt_enable_no_resched();
  304. local_bh_enable();
  305. }
  306. EXPORT_SYMBOL(_write_unlock_bh);
  307. int __lockfunc _spin_trylock_bh(spinlock_t *lock)
  308. {
  309. local_bh_disable();
  310. preempt_disable();
  311. if (_raw_spin_trylock(lock))
  312. return 1;
  313. preempt_enable_no_resched();
  314. local_bh_enable();
  315. return 0;
  316. }
  317. EXPORT_SYMBOL(_spin_trylock_bh);
  318. int in_lock_functions(unsigned long addr)
  319. {
  320. /* Linker adds these: start and end of __lockfunc functions */
  321. extern char __lock_text_start[], __lock_text_end[];
  322. return addr >= (unsigned long)__lock_text_start
  323. && addr < (unsigned long)__lock_text_end;
  324. }
  325. EXPORT_SYMBOL(in_lock_functions);