spinlock.h 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606
  1. #ifndef __LINUX_SPINLOCK_H
  2. #define __LINUX_SPINLOCK_H
  3. /*
  4. * include/linux/spinlock.h - generic locking declarations
  5. */
  6. #include <linux/config.h>
  7. #include <linux/preempt.h>
  8. #include <linux/linkage.h>
  9. #include <linux/compiler.h>
  10. #include <linux/thread_info.h>
  11. #include <linux/kernel.h>
  12. #include <linux/stringify.h>
  13. #include <asm/processor.h> /* for cpu relax */
  14. #include <asm/system.h>
  15. /*
  16. * Must define these before including other files, inline functions need them
  17. */
  18. #define LOCK_SECTION_NAME \
  19. ".text.lock." __stringify(KBUILD_BASENAME)
  20. #define LOCK_SECTION_START(extra) \
  21. ".subsection 1\n\t" \
  22. extra \
  23. ".ifndef " LOCK_SECTION_NAME "\n\t" \
  24. LOCK_SECTION_NAME ":\n\t" \
  25. ".endif\n"
  26. #define LOCK_SECTION_END \
  27. ".previous\n\t"
  28. #define __lockfunc fastcall __attribute__((section(".spinlock.text")))
  29. /*
  30. * If CONFIG_SMP is set, pull in the _raw_* definitions
  31. */
  32. #ifdef CONFIG_SMP
  33. #define assert_spin_locked(x) BUG_ON(!spin_is_locked(x))
  34. #include <asm/spinlock.h>
  35. int __lockfunc _spin_trylock(spinlock_t *lock);
  36. int __lockfunc _read_trylock(rwlock_t *lock);
  37. int __lockfunc _write_trylock(rwlock_t *lock);
  38. void __lockfunc _spin_lock(spinlock_t *lock) __acquires(spinlock_t);
  39. void __lockfunc _read_lock(rwlock_t *lock) __acquires(rwlock_t);
  40. void __lockfunc _write_lock(rwlock_t *lock) __acquires(rwlock_t);
  41. void __lockfunc _spin_unlock(spinlock_t *lock) __releases(spinlock_t);
  42. void __lockfunc _read_unlock(rwlock_t *lock) __releases(rwlock_t);
  43. void __lockfunc _write_unlock(rwlock_t *lock) __releases(rwlock_t);
  44. unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock) __acquires(spinlock_t);
  45. unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock) __acquires(rwlock_t);
  46. unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock) __acquires(rwlock_t);
  47. void __lockfunc _spin_lock_irq(spinlock_t *lock) __acquires(spinlock_t);
  48. void __lockfunc _spin_lock_bh(spinlock_t *lock) __acquires(spinlock_t);
  49. void __lockfunc _read_lock_irq(rwlock_t *lock) __acquires(rwlock_t);
  50. void __lockfunc _read_lock_bh(rwlock_t *lock) __acquires(rwlock_t);
  51. void __lockfunc _write_lock_irq(rwlock_t *lock) __acquires(rwlock_t);
  52. void __lockfunc _write_lock_bh(rwlock_t *lock) __acquires(rwlock_t);
  53. void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) __releases(spinlock_t);
  54. void __lockfunc _spin_unlock_irq(spinlock_t *lock) __releases(spinlock_t);
  55. void __lockfunc _spin_unlock_bh(spinlock_t *lock) __releases(spinlock_t);
  56. void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) __releases(rwlock_t);
  57. void __lockfunc _read_unlock_irq(rwlock_t *lock) __releases(rwlock_t);
  58. void __lockfunc _read_unlock_bh(rwlock_t *lock) __releases(rwlock_t);
  59. void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) __releases(rwlock_t);
  60. void __lockfunc _write_unlock_irq(rwlock_t *lock) __releases(rwlock_t);
  61. void __lockfunc _write_unlock_bh(rwlock_t *lock) __releases(rwlock_t);
  62. int __lockfunc _spin_trylock_bh(spinlock_t *lock);
  63. int __lockfunc generic_raw_read_trylock(rwlock_t *lock);
  64. int in_lock_functions(unsigned long addr);
  65. #else
  66. #define in_lock_functions(ADDR) 0
  67. #if !defined(CONFIG_PREEMPT) && !defined(CONFIG_DEBUG_SPINLOCK)
  68. # define _atomic_dec_and_lock(atomic,lock) atomic_dec_and_test(atomic)
  69. # define ATOMIC_DEC_AND_LOCK
  70. #endif
  71. #ifdef CONFIG_DEBUG_SPINLOCK
  72. #define SPINLOCK_MAGIC 0x1D244B3C
  73. typedef struct {
  74. unsigned long magic;
  75. volatile unsigned long lock;
  76. volatile unsigned int babble;
  77. const char *module;
  78. char *owner;
  79. int oline;
  80. } spinlock_t;
  81. #define SPIN_LOCK_UNLOCKED (spinlock_t) { SPINLOCK_MAGIC, 0, 10, __FILE__ , NULL, 0}
  82. #define spin_lock_init(x) \
  83. do { \
  84. (x)->magic = SPINLOCK_MAGIC; \
  85. (x)->lock = 0; \
  86. (x)->babble = 5; \
  87. (x)->module = __FILE__; \
  88. (x)->owner = NULL; \
  89. (x)->oline = 0; \
  90. } while (0)
  91. #define CHECK_LOCK(x) \
  92. do { \
  93. if ((x)->magic != SPINLOCK_MAGIC) { \
  94. printk(KERN_ERR "%s:%d: spin_is_locked on uninitialized spinlock %p.\n", \
  95. __FILE__, __LINE__, (x)); \
  96. } \
  97. } while(0)
  98. #define _raw_spin_lock(x) \
  99. do { \
  100. CHECK_LOCK(x); \
  101. if ((x)->lock&&(x)->babble) { \
  102. (x)->babble--; \
  103. printk("%s:%d: spin_lock(%s:%p) already locked by %s/%d\n", \
  104. __FILE__,__LINE__, (x)->module, \
  105. (x), (x)->owner, (x)->oline); \
  106. } \
  107. (x)->lock = 1; \
  108. (x)->owner = __FILE__; \
  109. (x)->oline = __LINE__; \
  110. } while (0)
  111. /* without debugging, spin_is_locked on UP always says
  112. * FALSE. --> printk if already locked. */
  113. #define spin_is_locked(x) \
  114. ({ \
  115. CHECK_LOCK(x); \
  116. if ((x)->lock&&(x)->babble) { \
  117. (x)->babble--; \
  118. printk("%s:%d: spin_is_locked(%s:%p) already locked by %s/%d\n", \
  119. __FILE__,__LINE__, (x)->module, \
  120. (x), (x)->owner, (x)->oline); \
  121. } \
  122. 0; \
  123. })
  124. /* with debugging, assert_spin_locked() on UP does check
  125. * the lock value properly */
  126. #define assert_spin_locked(x) \
  127. ({ \
  128. CHECK_LOCK(x); \
  129. BUG_ON(!(x)->lock); \
  130. })
  131. /* without debugging, spin_trylock on UP always says
  132. * TRUE. --> printk if already locked. */
  133. #define _raw_spin_trylock(x) \
  134. ({ \
  135. CHECK_LOCK(x); \
  136. if ((x)->lock&&(x)->babble) { \
  137. (x)->babble--; \
  138. printk("%s:%d: spin_trylock(%s:%p) already locked by %s/%d\n", \
  139. __FILE__,__LINE__, (x)->module, \
  140. (x), (x)->owner, (x)->oline); \
  141. } \
  142. (x)->lock = 1; \
  143. (x)->owner = __FILE__; \
  144. (x)->oline = __LINE__; \
  145. 1; \
  146. })
  147. #define spin_unlock_wait(x) \
  148. do { \
  149. CHECK_LOCK(x); \
  150. if ((x)->lock&&(x)->babble) { \
  151. (x)->babble--; \
  152. printk("%s:%d: spin_unlock_wait(%s:%p) owned by %s/%d\n", \
  153. __FILE__,__LINE__, (x)->module, (x), \
  154. (x)->owner, (x)->oline); \
  155. }\
  156. } while (0)
  157. #define _raw_spin_unlock(x) \
  158. do { \
  159. CHECK_LOCK(x); \
  160. if (!(x)->lock&&(x)->babble) { \
  161. (x)->babble--; \
  162. printk("%s:%d: spin_unlock(%s:%p) not locked\n", \
  163. __FILE__,__LINE__, (x)->module, (x));\
  164. } \
  165. (x)->lock = 0; \
  166. } while (0)
  167. #else
  168. /*
  169. * gcc versions before ~2.95 have a nasty bug with empty initializers.
  170. */
  171. #if (__GNUC__ > 2)
  172. typedef struct { } spinlock_t;
  173. #define SPIN_LOCK_UNLOCKED (spinlock_t) { }
  174. #else
  175. typedef struct { int gcc_is_buggy; } spinlock_t;
  176. #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
  177. #endif
  178. /*
  179. * If CONFIG_SMP is unset, declare the _raw_* definitions as nops
  180. */
  181. #define spin_lock_init(lock) do { (void)(lock); } while(0)
  182. #define _raw_spin_lock(lock) do { (void)(lock); } while(0)
  183. #define spin_is_locked(lock) ((void)(lock), 0)
  184. #define assert_spin_locked(lock) do { (void)(lock); } while(0)
  185. #define _raw_spin_trylock(lock) (((void)(lock), 1))
  186. #define spin_unlock_wait(lock) (void)(lock)
  187. #define _raw_spin_unlock(lock) do { (void)(lock); } while(0)
  188. #endif /* CONFIG_DEBUG_SPINLOCK */
  189. /* RW spinlocks: No debug version */
  190. #if (__GNUC__ > 2)
  191. typedef struct { } rwlock_t;
  192. #define RW_LOCK_UNLOCKED (rwlock_t) { }
  193. #else
  194. typedef struct { int gcc_is_buggy; } rwlock_t;
  195. #define RW_LOCK_UNLOCKED (rwlock_t) { 0 }
  196. #endif
  197. #define rwlock_init(lock) do { (void)(lock); } while(0)
  198. #define _raw_read_lock(lock) do { (void)(lock); } while(0)
  199. #define _raw_read_unlock(lock) do { (void)(lock); } while(0)
  200. #define _raw_write_lock(lock) do { (void)(lock); } while(0)
  201. #define _raw_write_unlock(lock) do { (void)(lock); } while(0)
  202. #define read_can_lock(lock) (((void)(lock), 1))
  203. #define write_can_lock(lock) (((void)(lock), 1))
  204. #define _raw_read_trylock(lock) ({ (void)(lock); (1); })
  205. #define _raw_write_trylock(lock) ({ (void)(lock); (1); })
  206. #define _spin_trylock(lock) ({preempt_disable(); _raw_spin_trylock(lock) ? \
  207. 1 : ({preempt_enable(); 0;});})
  208. #define _read_trylock(lock) ({preempt_disable();_raw_read_trylock(lock) ? \
  209. 1 : ({preempt_enable(); 0;});})
  210. #define _write_trylock(lock) ({preempt_disable(); _raw_write_trylock(lock) ? \
  211. 1 : ({preempt_enable(); 0;});})
  212. #define _spin_trylock_bh(lock) ({preempt_disable(); local_bh_disable(); \
  213. _raw_spin_trylock(lock) ? \
  214. 1 : ({preempt_enable_no_resched(); local_bh_enable(); 0;});})
  215. #define _spin_lock(lock) \
  216. do { \
  217. preempt_disable(); \
  218. _raw_spin_lock(lock); \
  219. __acquire(lock); \
  220. } while(0)
  221. #define _write_lock(lock) \
  222. do { \
  223. preempt_disable(); \
  224. _raw_write_lock(lock); \
  225. __acquire(lock); \
  226. } while(0)
  227. #define _read_lock(lock) \
  228. do { \
  229. preempt_disable(); \
  230. _raw_read_lock(lock); \
  231. __acquire(lock); \
  232. } while(0)
  233. #define _spin_unlock(lock) \
  234. do { \
  235. _raw_spin_unlock(lock); \
  236. preempt_enable(); \
  237. __release(lock); \
  238. } while (0)
  239. #define _write_unlock(lock) \
  240. do { \
  241. _raw_write_unlock(lock); \
  242. preempt_enable(); \
  243. __release(lock); \
  244. } while(0)
  245. #define _read_unlock(lock) \
  246. do { \
  247. _raw_read_unlock(lock); \
  248. preempt_enable(); \
  249. __release(lock); \
  250. } while(0)
  251. #define _spin_lock_irqsave(lock, flags) \
  252. do { \
  253. local_irq_save(flags); \
  254. preempt_disable(); \
  255. _raw_spin_lock(lock); \
  256. __acquire(lock); \
  257. } while (0)
  258. #define _spin_lock_irq(lock) \
  259. do { \
  260. local_irq_disable(); \
  261. preempt_disable(); \
  262. _raw_spin_lock(lock); \
  263. __acquire(lock); \
  264. } while (0)
  265. #define _spin_lock_bh(lock) \
  266. do { \
  267. local_bh_disable(); \
  268. preempt_disable(); \
  269. _raw_spin_lock(lock); \
  270. __acquire(lock); \
  271. } while (0)
  272. #define _read_lock_irqsave(lock, flags) \
  273. do { \
  274. local_irq_save(flags); \
  275. preempt_disable(); \
  276. _raw_read_lock(lock); \
  277. __acquire(lock); \
  278. } while (0)
  279. #define _read_lock_irq(lock) \
  280. do { \
  281. local_irq_disable(); \
  282. preempt_disable(); \
  283. _raw_read_lock(lock); \
  284. __acquire(lock); \
  285. } while (0)
  286. #define _read_lock_bh(lock) \
  287. do { \
  288. local_bh_disable(); \
  289. preempt_disable(); \
  290. _raw_read_lock(lock); \
  291. __acquire(lock); \
  292. } while (0)
  293. #define _write_lock_irqsave(lock, flags) \
  294. do { \
  295. local_irq_save(flags); \
  296. preempt_disable(); \
  297. _raw_write_lock(lock); \
  298. __acquire(lock); \
  299. } while (0)
  300. #define _write_lock_irq(lock) \
  301. do { \
  302. local_irq_disable(); \
  303. preempt_disable(); \
  304. _raw_write_lock(lock); \
  305. __acquire(lock); \
  306. } while (0)
  307. #define _write_lock_bh(lock) \
  308. do { \
  309. local_bh_disable(); \
  310. preempt_disable(); \
  311. _raw_write_lock(lock); \
  312. __acquire(lock); \
  313. } while (0)
  314. #define _spin_unlock_irqrestore(lock, flags) \
  315. do { \
  316. _raw_spin_unlock(lock); \
  317. local_irq_restore(flags); \
  318. preempt_enable(); \
  319. __release(lock); \
  320. } while (0)
  321. #define _spin_unlock_irq(lock) \
  322. do { \
  323. _raw_spin_unlock(lock); \
  324. local_irq_enable(); \
  325. preempt_enable(); \
  326. __release(lock); \
  327. } while (0)
  328. #define _spin_unlock_bh(lock) \
  329. do { \
  330. _raw_spin_unlock(lock); \
  331. preempt_enable_no_resched(); \
  332. local_bh_enable(); \
  333. __release(lock); \
  334. } while (0)
  335. #define _write_unlock_bh(lock) \
  336. do { \
  337. _raw_write_unlock(lock); \
  338. preempt_enable_no_resched(); \
  339. local_bh_enable(); \
  340. __release(lock); \
  341. } while (0)
  342. #define _read_unlock_irqrestore(lock, flags) \
  343. do { \
  344. _raw_read_unlock(lock); \
  345. local_irq_restore(flags); \
  346. preempt_enable(); \
  347. __release(lock); \
  348. } while (0)
  349. #define _write_unlock_irqrestore(lock, flags) \
  350. do { \
  351. _raw_write_unlock(lock); \
  352. local_irq_restore(flags); \
  353. preempt_enable(); \
  354. __release(lock); \
  355. } while (0)
  356. #define _read_unlock_irq(lock) \
  357. do { \
  358. _raw_read_unlock(lock); \
  359. local_irq_enable(); \
  360. preempt_enable(); \
  361. __release(lock); \
  362. } while (0)
  363. #define _read_unlock_bh(lock) \
  364. do { \
  365. _raw_read_unlock(lock); \
  366. preempt_enable_no_resched(); \
  367. local_bh_enable(); \
  368. __release(lock); \
  369. } while (0)
  370. #define _write_unlock_irq(lock) \
  371. do { \
  372. _raw_write_unlock(lock); \
  373. local_irq_enable(); \
  374. preempt_enable(); \
  375. __release(lock); \
  376. } while (0)
  377. #endif /* !SMP */
  378. /*
  379. * Define the various spin_lock and rw_lock methods. Note we define these
  380. * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various
  381. * methods are defined as nops in the case they are not required.
  382. */
  383. #define spin_trylock(lock) __cond_lock(_spin_trylock(lock))
  384. #define read_trylock(lock) __cond_lock(_read_trylock(lock))
  385. #define write_trylock(lock) __cond_lock(_write_trylock(lock))
  386. #define spin_lock(lock) _spin_lock(lock)
  387. #define write_lock(lock) _write_lock(lock)
  388. #define read_lock(lock) _read_lock(lock)
  389. #ifdef CONFIG_SMP
  390. #define spin_lock_irqsave(lock, flags) flags = _spin_lock_irqsave(lock)
  391. #define read_lock_irqsave(lock, flags) flags = _read_lock_irqsave(lock)
  392. #define write_lock_irqsave(lock, flags) flags = _write_lock_irqsave(lock)
  393. #else
  394. #define spin_lock_irqsave(lock, flags) _spin_lock_irqsave(lock, flags)
  395. #define read_lock_irqsave(lock, flags) _read_lock_irqsave(lock, flags)
  396. #define write_lock_irqsave(lock, flags) _write_lock_irqsave(lock, flags)
  397. #endif
  398. #define spin_lock_irq(lock) _spin_lock_irq(lock)
  399. #define spin_lock_bh(lock) _spin_lock_bh(lock)
  400. #define read_lock_irq(lock) _read_lock_irq(lock)
  401. #define read_lock_bh(lock) _read_lock_bh(lock)
  402. #define write_lock_irq(lock) _write_lock_irq(lock)
  403. #define write_lock_bh(lock) _write_lock_bh(lock)
  404. #define spin_unlock(lock) _spin_unlock(lock)
  405. #define write_unlock(lock) _write_unlock(lock)
  406. #define read_unlock(lock) _read_unlock(lock)
  407. #define spin_unlock_irqrestore(lock, flags) _spin_unlock_irqrestore(lock, flags)
  408. #define spin_unlock_irq(lock) _spin_unlock_irq(lock)
  409. #define spin_unlock_bh(lock) _spin_unlock_bh(lock)
  410. #define read_unlock_irqrestore(lock, flags) _read_unlock_irqrestore(lock, flags)
  411. #define read_unlock_irq(lock) _read_unlock_irq(lock)
  412. #define read_unlock_bh(lock) _read_unlock_bh(lock)
  413. #define write_unlock_irqrestore(lock, flags) _write_unlock_irqrestore(lock, flags)
  414. #define write_unlock_irq(lock) _write_unlock_irq(lock)
  415. #define write_unlock_bh(lock) _write_unlock_bh(lock)
  416. #define spin_trylock_bh(lock) __cond_lock(_spin_trylock_bh(lock))
  417. #define spin_trylock_irq(lock) \
  418. ({ \
  419. local_irq_disable(); \
  420. _spin_trylock(lock) ? \
  421. 1 : ({local_irq_enable(); 0; }); \
  422. })
  423. #define spin_trylock_irqsave(lock, flags) \
  424. ({ \
  425. local_irq_save(flags); \
  426. _spin_trylock(lock) ? \
  427. 1 : ({local_irq_restore(flags); 0;}); \
  428. })
  429. #ifdef CONFIG_LOCKMETER
  430. extern void _metered_spin_lock (spinlock_t *lock);
  431. extern void _metered_spin_unlock (spinlock_t *lock);
  432. extern int _metered_spin_trylock(spinlock_t *lock);
  433. extern void _metered_read_lock (rwlock_t *lock);
  434. extern void _metered_read_unlock (rwlock_t *lock);
  435. extern void _metered_write_lock (rwlock_t *lock);
  436. extern void _metered_write_unlock (rwlock_t *lock);
  437. extern int _metered_read_trylock (rwlock_t *lock);
  438. extern int _metered_write_trylock(rwlock_t *lock);
  439. #endif
  440. /* "lock on reference count zero" */
  441. #ifndef ATOMIC_DEC_AND_LOCK
  442. #include <asm/atomic.h>
  443. extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
  444. #endif
  445. #define atomic_dec_and_lock(atomic,lock) __cond_lock(_atomic_dec_and_lock(atomic,lock))
  446. /*
  447. * bit-based spin_lock()
  448. *
  449. * Don't use this unless you really need to: spin_lock() and spin_unlock()
  450. * are significantly faster.
  451. */
  452. static inline void bit_spin_lock(int bitnum, unsigned long *addr)
  453. {
  454. /*
  455. * Assuming the lock is uncontended, this never enters
  456. * the body of the outer loop. If it is contended, then
  457. * within the inner loop a non-atomic test is used to
  458. * busywait with less bus contention for a good time to
  459. * attempt to acquire the lock bit.
  460. */
  461. preempt_disable();
  462. #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
  463. while (test_and_set_bit(bitnum, addr)) {
  464. while (test_bit(bitnum, addr)) {
  465. preempt_enable();
  466. cpu_relax();
  467. preempt_disable();
  468. }
  469. }
  470. #endif
  471. __acquire(bitlock);
  472. }
  473. /*
  474. * Return true if it was acquired
  475. */
  476. static inline int bit_spin_trylock(int bitnum, unsigned long *addr)
  477. {
  478. preempt_disable();
  479. #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
  480. if (test_and_set_bit(bitnum, addr)) {
  481. preempt_enable();
  482. return 0;
  483. }
  484. #endif
  485. __acquire(bitlock);
  486. return 1;
  487. }
  488. /*
  489. * bit-based spin_unlock()
  490. */
  491. static inline void bit_spin_unlock(int bitnum, unsigned long *addr)
  492. {
  493. #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
  494. BUG_ON(!test_bit(bitnum, addr));
  495. smp_mb__before_clear_bit();
  496. clear_bit(bitnum, addr);
  497. #endif
  498. preempt_enable();
  499. __release(bitlock);
  500. }
  501. /*
  502. * Return true if the lock is held.
  503. */
  504. static inline int bit_spin_is_locked(int bitnum, unsigned long *addr)
  505. {
  506. #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
  507. return test_bit(bitnum, addr);
  508. #elif defined CONFIG_PREEMPT
  509. return preempt_count();
  510. #else
  511. return 1;
  512. #endif
  513. }
  514. #define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED
  515. #define DEFINE_RWLOCK(x) rwlock_t x = RW_LOCK_UNLOCKED
  516. /**
  517. * spin_can_lock - would spin_trylock() succeed?
  518. * @lock: the spinlock in question.
  519. */
  520. #define spin_can_lock(lock) (!spin_is_locked(lock))
  521. #endif /* __LINUX_SPINLOCK_H */