semaphore-helper.h 2.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143
  1. #ifndef _M68K_SEMAPHORE_HELPER_H
  2. #define _M68K_SEMAPHORE_HELPER_H
  3. /*
  4. * SMP- and interrupt-safe semaphores helper functions.
  5. *
  6. * (C) Copyright 1996 Linus Torvalds
  7. *
  8. * m68k version by Andreas Schwab
  9. */
  10. #include <linux/config.h>
  11. #include <linux/errno.h>
  12. /*
  13. * These two _must_ execute atomically wrt each other.
  14. */
  15. static inline void wake_one_more(struct semaphore * sem)
  16. {
  17. atomic_inc(&sem->waking);
  18. }
  19. #ifndef CONFIG_RMW_INSNS
  20. extern spinlock_t semaphore_wake_lock;
  21. #endif
  22. static inline int waking_non_zero(struct semaphore *sem)
  23. {
  24. int ret;
  25. #ifndef CONFIG_RMW_INSNS
  26. unsigned long flags;
  27. spin_lock_irqsave(&semaphore_wake_lock, flags);
  28. ret = 0;
  29. if (atomic_read(&sem->waking) > 0) {
  30. atomic_dec(&sem->waking);
  31. ret = 1;
  32. }
  33. spin_unlock_irqrestore(&semaphore_wake_lock, flags);
  34. #else
  35. int tmp1, tmp2;
  36. __asm__ __volatile__
  37. ("1: movel %1,%2\n"
  38. " jle 2f\n"
  39. " subql #1,%2\n"
  40. " casl %1,%2,%3\n"
  41. " jne 1b\n"
  42. " moveq #1,%0\n"
  43. "2:"
  44. : "=d" (ret), "=d" (tmp1), "=d" (tmp2)
  45. : "m" (sem->waking), "0" (0), "1" (sem->waking));
  46. #endif
  47. return ret;
  48. }
  49. /*
  50. * waking_non_zero_interruptible:
  51. * 1 got the lock
  52. * 0 go to sleep
  53. * -EINTR interrupted
  54. */
  55. static inline int waking_non_zero_interruptible(struct semaphore *sem,
  56. struct task_struct *tsk)
  57. {
  58. int ret;
  59. #ifndef CONFIG_RMW_INSNS
  60. unsigned long flags;
  61. spin_lock_irqsave(&semaphore_wake_lock, flags);
  62. ret = 0;
  63. if (atomic_read(&sem->waking) > 0) {
  64. atomic_dec(&sem->waking);
  65. ret = 1;
  66. } else if (signal_pending(tsk)) {
  67. atomic_inc(&sem->count);
  68. ret = -EINTR;
  69. }
  70. spin_unlock_irqrestore(&semaphore_wake_lock, flags);
  71. #else
  72. int tmp1, tmp2;
  73. __asm__ __volatile__
  74. ("1: movel %1,%2\n"
  75. " jle 2f\n"
  76. " subql #1,%2\n"
  77. " casl %1,%2,%3\n"
  78. " jne 1b\n"
  79. " moveq #1,%0\n"
  80. " jra %a4\n"
  81. "2:"
  82. : "=d" (ret), "=d" (tmp1), "=d" (tmp2)
  83. : "m" (sem->waking), "i" (&&next), "0" (0), "1" (sem->waking));
  84. if (signal_pending(tsk)) {
  85. atomic_inc(&sem->count);
  86. ret = -EINTR;
  87. }
  88. next:
  89. #endif
  90. return ret;
  91. }
  92. /*
  93. * waking_non_zero_trylock:
  94. * 1 failed to lock
  95. * 0 got the lock
  96. */
  97. static inline int waking_non_zero_trylock(struct semaphore *sem)
  98. {
  99. int ret;
  100. #ifndef CONFIG_RMW_INSNS
  101. unsigned long flags;
  102. spin_lock_irqsave(&semaphore_wake_lock, flags);
  103. ret = 1;
  104. if (atomic_read(&sem->waking) > 0) {
  105. atomic_dec(&sem->waking);
  106. ret = 0;
  107. } else
  108. atomic_inc(&sem->count);
  109. spin_unlock_irqrestore(&semaphore_wake_lock, flags);
  110. #else
  111. int tmp1, tmp2;
  112. __asm__ __volatile__
  113. ("1: movel %1,%2\n"
  114. " jle 2f\n"
  115. " subql #1,%2\n"
  116. " casl %1,%2,%3\n"
  117. " jne 1b\n"
  118. " moveq #0,%0\n"
  119. "2:"
  120. : "=d" (ret), "=d" (tmp1), "=d" (tmp2)
  121. : "m" (sem->waking), "0" (1), "1" (sem->waking));
  122. if (ret)
  123. atomic_inc(&sem->count);
  124. #endif
  125. return ret;
  126. }
  127. #endif