semaphore-helper.h 2.1 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889
  1. #ifndef __ASM_SH_SEMAPHORE_HELPER_H
  2. #define __ASM_SH_SEMAPHORE_HELPER_H
  3. /*
  4. * SMP- and interrupt-safe semaphores helper functions.
  5. *
  6. * (C) Copyright 1996 Linus Torvalds
  7. * (C) Copyright 1999 Andrea Arcangeli
  8. */
  9. /*
  10. * These two _must_ execute atomically wrt each other.
  11. *
  12. * This is trivially done with load_locked/store_cond,
  13. * which we have. Let the rest of the losers suck eggs.
  14. */
  15. static __inline__ void wake_one_more(struct semaphore * sem)
  16. {
  17. atomic_inc((atomic_t *)&sem->sleepers);
  18. }
  19. static __inline__ int waking_non_zero(struct semaphore *sem)
  20. {
  21. unsigned long flags;
  22. int ret = 0;
  23. spin_lock_irqsave(&semaphore_wake_lock, flags);
  24. if (sem->sleepers > 0) {
  25. sem->sleepers--;
  26. ret = 1;
  27. }
  28. spin_unlock_irqrestore(&semaphore_wake_lock, flags);
  29. return ret;
  30. }
  31. /*
  32. * waking_non_zero_interruptible:
  33. * 1 got the lock
  34. * 0 go to sleep
  35. * -EINTR interrupted
  36. *
  37. * We must undo the sem->count down_interruptible() increment while we are
  38. * protected by the spinlock in order to make atomic this atomic_inc() with the
  39. * atomic_read() in wake_one_more(), otherwise we can race. -arca
  40. */
  41. static __inline__ int waking_non_zero_interruptible(struct semaphore *sem,
  42. struct task_struct *tsk)
  43. {
  44. unsigned long flags;
  45. int ret = 0;
  46. spin_lock_irqsave(&semaphore_wake_lock, flags);
  47. if (sem->sleepers > 0) {
  48. sem->sleepers--;
  49. ret = 1;
  50. } else if (signal_pending(tsk)) {
  51. atomic_inc(&sem->count);
  52. ret = -EINTR;
  53. }
  54. spin_unlock_irqrestore(&semaphore_wake_lock, flags);
  55. return ret;
  56. }
  57. /*
  58. * waking_non_zero_trylock:
  59. * 1 failed to lock
  60. * 0 got the lock
  61. *
  62. * We must undo the sem->count down_trylock() increment while we are
  63. * protected by the spinlock in order to make atomic this atomic_inc() with the
  64. * atomic_read() in wake_one_more(), otherwise we can race. -arca
  65. */
  66. static __inline__ int waking_non_zero_trylock(struct semaphore *sem)
  67. {
  68. unsigned long flags;
  69. int ret = 1;
  70. spin_lock_irqsave(&semaphore_wake_lock, flags);
  71. if (sem->sleepers <= 0)
  72. atomic_inc(&sem->count);
  73. else {
  74. sem->sleepers--;
  75. ret = 0;
  76. }
  77. spin_unlock_irqrestore(&semaphore_wake_lock, flags);
  78. return ret;
  79. }
  80. #endif /* __ASM_SH_SEMAPHORE_HELPER_H */