semaphore.h 3.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145
  1. #ifndef _ASM_M32R_SEMAPHORE_H
  2. #define _ASM_M32R_SEMAPHORE_H
  3. #include <linux/linkage.h>
  4. #ifdef __KERNEL__
  5. /*
  6. * SMP- and interrupt-safe semaphores..
  7. *
  8. * Copyright (C) 1996 Linus Torvalds
  9. * Copyright (C) 2004, 2006 Hirokazu Takata <takata at linux-m32r.org>
  10. */
  11. #include <linux/wait.h>
  12. #include <linux/rwsem.h>
  13. #include <asm/assembler.h>
  14. #include <asm/system.h>
  15. #include <asm/atomic.h>
  16. struct semaphore {
  17. atomic_t count;
  18. int sleepers;
  19. wait_queue_head_t wait;
  20. };
  21. #define __SEMAPHORE_INITIALIZER(name, n) \
  22. { \
  23. .count = ATOMIC_INIT(n), \
  24. .sleepers = 0, \
  25. .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
  26. }
  27. #define __DECLARE_SEMAPHORE_GENERIC(name,count) \
  28. struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
  29. #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
  30. #define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0)
  31. static inline void sema_init (struct semaphore *sem, int val)
  32. {
  33. /*
  34. * *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
  35. *
  36. * i'd rather use the more flexible initialization above, but sadly
  37. * GCC 2.7.2.3 emits a bogus warning. EGCS doesnt. Oh well.
  38. */
  39. atomic_set(&sem->count, val);
  40. sem->sleepers = 0;
  41. init_waitqueue_head(&sem->wait);
  42. }
  43. static inline void init_MUTEX (struct semaphore *sem)
  44. {
  45. sema_init(sem, 1);
  46. }
  47. static inline void init_MUTEX_LOCKED (struct semaphore *sem)
  48. {
  49. sema_init(sem, 0);
  50. }
  51. asmlinkage void __down_failed(void /* special register calling convention */);
  52. asmlinkage int __down_failed_interruptible(void /* params in registers */);
  53. asmlinkage int __down_failed_trylock(void /* params in registers */);
  54. asmlinkage void __up_wakeup(void /* special register calling convention */);
  55. asmlinkage void __down(struct semaphore * sem);
  56. asmlinkage int __down_interruptible(struct semaphore * sem);
  57. asmlinkage int __down_trylock(struct semaphore * sem);
  58. asmlinkage void __up(struct semaphore * sem);
  59. /*
  60. * Atomically decrement the semaphore's count. If it goes negative,
  61. * block the calling thread in the TASK_UNINTERRUPTIBLE state.
  62. */
  63. static inline void down(struct semaphore * sem)
  64. {
  65. might_sleep();
  66. if (unlikely(atomic_dec_return(&sem->count) < 0))
  67. __down(sem);
  68. }
  69. /*
  70. * Interruptible try to acquire a semaphore. If we obtained
  71. * it, return zero. If we were interrupted, returns -EINTR
  72. */
  73. static inline int down_interruptible(struct semaphore * sem)
  74. {
  75. int result = 0;
  76. might_sleep();
  77. if (unlikely(atomic_dec_return(&sem->count) < 0))
  78. result = __down_interruptible(sem);
  79. return result;
  80. }
  81. /*
  82. * Non-blockingly attempt to down() a semaphore.
  83. * Returns zero if we acquired it
  84. */
  85. static inline int down_trylock(struct semaphore * sem)
  86. {
  87. unsigned long flags;
  88. long count;
  89. int result = 0;
  90. local_irq_save(flags);
  91. __asm__ __volatile__ (
  92. "# down_trylock \n\t"
  93. DCACHE_CLEAR("%0", "r4", "%1")
  94. M32R_LOCK" %0, @%1; \n\t"
  95. "addi %0, #-1; \n\t"
  96. M32R_UNLOCK" %0, @%1; \n\t"
  97. : "=&r" (count)
  98. : "r" (&sem->count)
  99. : "memory"
  100. #ifdef CONFIG_CHIP_M32700_TS1
  101. , "r4"
  102. #endif /* CONFIG_CHIP_M32700_TS1 */
  103. );
  104. local_irq_restore(flags);
  105. if (unlikely(count < 0))
  106. result = __down_trylock(sem);
  107. return result;
  108. }
  109. /*
  110. * Note! This is subtle. We jump to wake people up only if
  111. * the semaphore was negative (== somebody was waiting on it).
  112. * The default case (no contention) will result in NO
  113. * jumps for both down() and up().
  114. */
  115. static inline void up(struct semaphore * sem)
  116. {
  117. if (unlikely(atomic_inc_return(&sem->count) <= 0))
  118. __up(sem);
  119. }
  120. #endif /* __KERNEL__ */
  121. #endif /* _ASM_M32R_SEMAPHORE_H */