semaphore.h 3.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144
  1. #ifndef _ASM_M32R_SEMAPHORE_H
  2. #define _ASM_M32R_SEMAPHORE_H
  3. #include <linux/linkage.h>
  4. #ifdef __KERNEL__
  5. /*
  6. * SMP- and interrupt-safe semaphores..
  7. *
  8. * Copyright (C) 1996 Linus Torvalds
  9. * Copyright (C) 2004, 2006 Hirokazu Takata <takata at linux-m32r.org>
  10. */
  11. #include <linux/wait.h>
  12. #include <linux/rwsem.h>
  13. #include <asm/assembler.h>
  14. #include <asm/system.h>
  15. #include <asm/atomic.h>
  16. struct semaphore {
  17. atomic_t count;
  18. int sleepers;
  19. wait_queue_head_t wait;
  20. };
  21. #define __SEMAPHORE_INITIALIZER(name, n) \
  22. { \
  23. .count = ATOMIC_INIT(n), \
  24. .sleepers = 0, \
  25. .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
  26. }
  27. #define __DECLARE_SEMAPHORE_GENERIC(name,count) \
  28. struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
  29. #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
  30. static inline void sema_init (struct semaphore *sem, int val)
  31. {
  32. /*
  33. * *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
  34. *
  35. * i'd rather use the more flexible initialization above, but sadly
  36. * GCC 2.7.2.3 emits a bogus warning. EGCS doesnt. Oh well.
  37. */
  38. atomic_set(&sem->count, val);
  39. sem->sleepers = 0;
  40. init_waitqueue_head(&sem->wait);
  41. }
  42. static inline void init_MUTEX (struct semaphore *sem)
  43. {
  44. sema_init(sem, 1);
  45. }
  46. static inline void init_MUTEX_LOCKED (struct semaphore *sem)
  47. {
  48. sema_init(sem, 0);
  49. }
  50. asmlinkage void __down_failed(void /* special register calling convention */);
  51. asmlinkage int __down_failed_interruptible(void /* params in registers */);
  52. asmlinkage int __down_failed_trylock(void /* params in registers */);
  53. asmlinkage void __up_wakeup(void /* special register calling convention */);
  54. asmlinkage void __down(struct semaphore * sem);
  55. asmlinkage int __down_interruptible(struct semaphore * sem);
  56. asmlinkage int __down_trylock(struct semaphore * sem);
  57. asmlinkage void __up(struct semaphore * sem);
  58. /*
  59. * Atomically decrement the semaphore's count. If it goes negative,
  60. * block the calling thread in the TASK_UNINTERRUPTIBLE state.
  61. */
  62. static inline void down(struct semaphore * sem)
  63. {
  64. might_sleep();
  65. if (unlikely(atomic_dec_return(&sem->count) < 0))
  66. __down(sem);
  67. }
  68. /*
  69. * Interruptible try to acquire a semaphore. If we obtained
  70. * it, return zero. If we were interrupted, returns -EINTR
  71. */
  72. static inline int down_interruptible(struct semaphore * sem)
  73. {
  74. int result = 0;
  75. might_sleep();
  76. if (unlikely(atomic_dec_return(&sem->count) < 0))
  77. result = __down_interruptible(sem);
  78. return result;
  79. }
  80. /*
  81. * Non-blockingly attempt to down() a semaphore.
  82. * Returns zero if we acquired it
  83. */
  84. static inline int down_trylock(struct semaphore * sem)
  85. {
  86. unsigned long flags;
  87. long count;
  88. int result = 0;
  89. local_irq_save(flags);
  90. __asm__ __volatile__ (
  91. "# down_trylock \n\t"
  92. DCACHE_CLEAR("%0", "r4", "%1")
  93. M32R_LOCK" %0, @%1; \n\t"
  94. "addi %0, #-1; \n\t"
  95. M32R_UNLOCK" %0, @%1; \n\t"
  96. : "=&r" (count)
  97. : "r" (&sem->count)
  98. : "memory"
  99. #ifdef CONFIG_CHIP_M32700_TS1
  100. , "r4"
  101. #endif /* CONFIG_CHIP_M32700_TS1 */
  102. );
  103. local_irq_restore(flags);
  104. if (unlikely(count < 0))
  105. result = __down_trylock(sem);
  106. return result;
  107. }
  108. /*
  109. * Note! This is subtle. We jump to wake people up only if
  110. * the semaphore was negative (== somebody was waiting on it).
  111. * The default case (no contention) will result in NO
  112. * jumps for both down() and up().
  113. */
  114. static inline void up(struct semaphore * sem)
  115. {
  116. if (unlikely(atomic_inc_return(&sem->count) <= 0))
  117. __up(sem);
  118. }
  119. #endif /* __KERNEL__ */
  120. #endif /* _ASM_M32R_SEMAPHORE_H */