semaphore-helper.h 2.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142
  1. #ifndef _M68K_SEMAPHORE_HELPER_H
  2. #define _M68K_SEMAPHORE_HELPER_H
  3. /*
  4. * SMP- and interrupt-safe semaphores helper functions.
  5. *
  6. * (C) Copyright 1996 Linus Torvalds
  7. *
  8. * m68k version by Andreas Schwab
  9. */
  10. #include <linux/errno.h>
  11. /*
  12. * These two _must_ execute atomically wrt each other.
  13. */
  14. static inline void wake_one_more(struct semaphore * sem)
  15. {
  16. atomic_inc(&sem->waking);
  17. }
  18. #ifndef CONFIG_RMW_INSNS
  19. extern spinlock_t semaphore_wake_lock;
  20. #endif
  21. static inline int waking_non_zero(struct semaphore *sem)
  22. {
  23. int ret;
  24. #ifndef CONFIG_RMW_INSNS
  25. unsigned long flags;
  26. spin_lock_irqsave(&semaphore_wake_lock, flags);
  27. ret = 0;
  28. if (atomic_read(&sem->waking) > 0) {
  29. atomic_dec(&sem->waking);
  30. ret = 1;
  31. }
  32. spin_unlock_irqrestore(&semaphore_wake_lock, flags);
  33. #else
  34. int tmp1, tmp2;
  35. __asm__ __volatile__
  36. ("1: movel %1,%2\n"
  37. " jle 2f\n"
  38. " subql #1,%2\n"
  39. " casl %1,%2,%3\n"
  40. " jne 1b\n"
  41. " moveq #1,%0\n"
  42. "2:"
  43. : "=d" (ret), "=d" (tmp1), "=d" (tmp2)
  44. : "m" (sem->waking), "0" (0), "1" (sem->waking));
  45. #endif
  46. return ret;
  47. }
  48. /*
  49. * waking_non_zero_interruptible:
  50. * 1 got the lock
  51. * 0 go to sleep
  52. * -EINTR interrupted
  53. */
  54. static inline int waking_non_zero_interruptible(struct semaphore *sem,
  55. struct task_struct *tsk)
  56. {
  57. int ret;
  58. #ifndef CONFIG_RMW_INSNS
  59. unsigned long flags;
  60. spin_lock_irqsave(&semaphore_wake_lock, flags);
  61. ret = 0;
  62. if (atomic_read(&sem->waking) > 0) {
  63. atomic_dec(&sem->waking);
  64. ret = 1;
  65. } else if (signal_pending(tsk)) {
  66. atomic_inc(&sem->count);
  67. ret = -EINTR;
  68. }
  69. spin_unlock_irqrestore(&semaphore_wake_lock, flags);
  70. #else
  71. int tmp1, tmp2;
  72. __asm__ __volatile__
  73. ("1: movel %1,%2\n"
  74. " jle 2f\n"
  75. " subql #1,%2\n"
  76. " casl %1,%2,%3\n"
  77. " jne 1b\n"
  78. " moveq #1,%0\n"
  79. " jra %a4\n"
  80. "2:"
  81. : "=d" (ret), "=d" (tmp1), "=d" (tmp2)
  82. : "m" (sem->waking), "i" (&&next), "0" (0), "1" (sem->waking));
  83. if (signal_pending(tsk)) {
  84. atomic_inc(&sem->count);
  85. ret = -EINTR;
  86. }
  87. next:
  88. #endif
  89. return ret;
  90. }
  91. /*
  92. * waking_non_zero_trylock:
  93. * 1 failed to lock
  94. * 0 got the lock
  95. */
  96. static inline int waking_non_zero_trylock(struct semaphore *sem)
  97. {
  98. int ret;
  99. #ifndef CONFIG_RMW_INSNS
  100. unsigned long flags;
  101. spin_lock_irqsave(&semaphore_wake_lock, flags);
  102. ret = 1;
  103. if (atomic_read(&sem->waking) > 0) {
  104. atomic_dec(&sem->waking);
  105. ret = 0;
  106. } else
  107. atomic_inc(&sem->count);
  108. spin_unlock_irqrestore(&semaphore_wake_lock, flags);
  109. #else
  110. int tmp1, tmp2;
  111. __asm__ __volatile__
  112. ("1: movel %1,%2\n"
  113. " jle 2f\n"
  114. " subql #1,%2\n"
  115. " casl %1,%2,%3\n"
  116. " jne 1b\n"
  117. " moveq #0,%0\n"
  118. "2:"
  119. : "=d" (ret), "=d" (tmp1), "=d" (tmp2)
  120. : "m" (sem->waking), "0" (1), "1" (sem->waking));
  121. if (ret)
  122. atomic_inc(&sem->count);
  123. #endif
  124. return ret;
  125. }
  126. #endif