semaphore-helper.h 2.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101
  1. #ifndef __ASM_SH64_SEMAPHORE_HELPER_H
  2. #define __ASM_SH64_SEMAPHORE_HELPER_H
  3. /*
  4. * This file is subject to the terms and conditions of the GNU General Public
  5. * License. See the file "COPYING" in the main directory of this archive
  6. * for more details.
  7. *
  8. * include/asm-sh64/semaphore-helper.h
  9. *
  10. * Copyright (C) 2000, 2001 Paolo Alberelli
  11. *
  12. */
  13. #include <asm/errno.h>
  14. /*
  15. * SMP- and interrupt-safe semaphores helper functions.
  16. *
  17. * (C) Copyright 1996 Linus Torvalds
  18. * (C) Copyright 1999 Andrea Arcangeli
  19. */
  20. /*
  21. * These two _must_ execute atomically wrt each other.
  22. *
  23. * This is trivially done with load_locked/store_cond,
  24. * which we have. Let the rest of the losers suck eggs.
  25. */
  26. static __inline__ void wake_one_more(struct semaphore * sem)
  27. {
  28. atomic_inc((atomic_t *)&sem->sleepers);
  29. }
  30. static __inline__ int waking_non_zero(struct semaphore *sem)
  31. {
  32. unsigned long flags;
  33. int ret = 0;
  34. spin_lock_irqsave(&semaphore_wake_lock, flags);
  35. if (sem->sleepers > 0) {
  36. sem->sleepers--;
  37. ret = 1;
  38. }
  39. spin_unlock_irqrestore(&semaphore_wake_lock, flags);
  40. return ret;
  41. }
  42. /*
  43. * waking_non_zero_interruptible:
  44. * 1 got the lock
  45. * 0 go to sleep
  46. * -EINTR interrupted
  47. *
  48. * We must undo the sem->count down_interruptible() increment while we are
  49. * protected by the spinlock in order to make atomic this atomic_inc() with the
  50. * atomic_read() in wake_one_more(), otherwise we can race. -arca
  51. */
  52. static __inline__ int waking_non_zero_interruptible(struct semaphore *sem,
  53. struct task_struct *tsk)
  54. {
  55. unsigned long flags;
  56. int ret = 0;
  57. spin_lock_irqsave(&semaphore_wake_lock, flags);
  58. if (sem->sleepers > 0) {
  59. sem->sleepers--;
  60. ret = 1;
  61. } else if (signal_pending(tsk)) {
  62. atomic_inc(&sem->count);
  63. ret = -EINTR;
  64. }
  65. spin_unlock_irqrestore(&semaphore_wake_lock, flags);
  66. return ret;
  67. }
  68. /*
  69. * waking_non_zero_trylock:
  70. * 1 failed to lock
  71. * 0 got the lock
  72. *
  73. * We must undo the sem->count down_trylock() increment while we are
  74. * protected by the spinlock in order to make atomic this atomic_inc() with the
  75. * atomic_read() in wake_one_more(), otherwise we can race. -arca
  76. */
  77. static __inline__ int waking_non_zero_trylock(struct semaphore *sem)
  78. {
  79. unsigned long flags;
  80. int ret = 1;
  81. spin_lock_irqsave(&semaphore_wake_lock, flags);
  82. if (sem->sleepers <= 0)
  83. atomic_inc(&sem->count);
  84. else {
  85. sem->sleepers--;
  86. ret = 0;
  87. }
  88. spin_unlock_irqrestore(&semaphore_wake_lock, flags);
  89. return ret;
  90. }
  91. #endif /* __ASM_SH64_SEMAPHORE_HELPER_H */