semaphore.c 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165
  1. /*
  2. * IA-64 semaphore implementation (derived from x86 version).
  3. *
  4. * Copyright (C) 1999-2000, 2002 Hewlett-Packard Co
  5. * David Mosberger-Tang <davidm@hpl.hp.com>
  6. */
  7. /*
  8. * Semaphores are implemented using a two-way counter: The "count"
  9. * variable is decremented for each process that tries to acquire the
  10. * semaphore, while the "sleepers" variable is a count of such
  11. * acquires.
  12. *
  13. * Notably, the inline "up()" and "down()" functions can efficiently
  14. * test if they need to do any extra work (up needs to do something
  15. * only if count was negative before the increment operation.
  16. *
  17. * "sleeping" and the contention routine ordering is protected
  18. * by the spinlock in the semaphore's waitqueue head.
  19. *
  20. * Note that these functions are only called when there is contention
  21. * on the lock, and as such all this is the "non-critical" part of the
  22. * whole semaphore business. The critical part is the inline stuff in
  23. * <asm/semaphore.h> where we want to avoid any extra jumps and calls.
  24. */
  25. #include <linux/sched.h>
  26. #include <linux/init.h>
  27. #include <asm/errno.h>
  28. #include <asm/semaphore.h>
  29. /*
  30. * Logic:
  31. * - Only on a boundary condition do we need to care. When we go
  32. * from a negative count to a non-negative, we wake people up.
  33. * - When we go from a non-negative count to a negative do we
  34. * (a) synchronize with the "sleepers" count and (b) make sure
  35. * that we're on the wakeup list before we synchronize so that
  36. * we cannot lose wakeup events.
  37. */
  38. void
  39. __up (struct semaphore *sem)
  40. {
  41. wake_up(&sem->wait);
  42. }
  43. void __sched __down (struct semaphore *sem)
  44. {
  45. struct task_struct *tsk = current;
  46. DECLARE_WAITQUEUE(wait, tsk);
  47. unsigned long flags;
  48. tsk->state = TASK_UNINTERRUPTIBLE;
  49. spin_lock_irqsave(&sem->wait.lock, flags);
  50. add_wait_queue_exclusive_locked(&sem->wait, &wait);
  51. sem->sleepers++;
  52. for (;;) {
  53. int sleepers = sem->sleepers;
  54. /*
  55. * Add "everybody else" into it. They aren't
  56. * playing, because we own the spinlock in
  57. * the wait_queue_head.
  58. */
  59. if (!atomic_add_negative(sleepers - 1, &sem->count)) {
  60. sem->sleepers = 0;
  61. break;
  62. }
  63. sem->sleepers = 1; /* us - see -1 above */
  64. spin_unlock_irqrestore(&sem->wait.lock, flags);
  65. schedule();
  66. spin_lock_irqsave(&sem->wait.lock, flags);
  67. tsk->state = TASK_UNINTERRUPTIBLE;
  68. }
  69. remove_wait_queue_locked(&sem->wait, &wait);
  70. wake_up_locked(&sem->wait);
  71. spin_unlock_irqrestore(&sem->wait.lock, flags);
  72. tsk->state = TASK_RUNNING;
  73. }
  74. int __sched __down_interruptible (struct semaphore * sem)
  75. {
  76. int retval = 0;
  77. struct task_struct *tsk = current;
  78. DECLARE_WAITQUEUE(wait, tsk);
  79. unsigned long flags;
  80. tsk->state = TASK_INTERRUPTIBLE;
  81. spin_lock_irqsave(&sem->wait.lock, flags);
  82. add_wait_queue_exclusive_locked(&sem->wait, &wait);
  83. sem->sleepers ++;
  84. for (;;) {
  85. int sleepers = sem->sleepers;
  86. /*
  87. * With signals pending, this turns into
  88. * the trylock failure case - we won't be
  89. * sleeping, and we* can't get the lock as
  90. * it has contention. Just correct the count
  91. * and exit.
  92. */
  93. if (signal_pending(current)) {
  94. retval = -EINTR;
  95. sem->sleepers = 0;
  96. atomic_add(sleepers, &sem->count);
  97. break;
  98. }
  99. /*
  100. * Add "everybody else" into it. They aren't
  101. * playing, because we own the spinlock in
  102. * wait_queue_head. The "-1" is because we're
  103. * still hoping to get the semaphore.
  104. */
  105. if (!atomic_add_negative(sleepers - 1, &sem->count)) {
  106. sem->sleepers = 0;
  107. break;
  108. }
  109. sem->sleepers = 1; /* us - see -1 above */
  110. spin_unlock_irqrestore(&sem->wait.lock, flags);
  111. schedule();
  112. spin_lock_irqsave(&sem->wait.lock, flags);
  113. tsk->state = TASK_INTERRUPTIBLE;
  114. }
  115. remove_wait_queue_locked(&sem->wait, &wait);
  116. wake_up_locked(&sem->wait);
  117. spin_unlock_irqrestore(&sem->wait.lock, flags);
  118. tsk->state = TASK_RUNNING;
  119. return retval;
  120. }
  121. /*
  122. * Trylock failed - make sure we correct for having decremented the
  123. * count.
  124. */
  125. int
  126. __down_trylock (struct semaphore *sem)
  127. {
  128. unsigned long flags;
  129. int sleepers;
  130. spin_lock_irqsave(&sem->wait.lock, flags);
  131. sleepers = sem->sleepers + 1;
  132. sem->sleepers = 0;
  133. /*
  134. * Add "everybody else" and us into it. They aren't
  135. * playing, because we own the spinlock in the
  136. * wait_queue_head.
  137. */
  138. if (!atomic_add_negative(sleepers, &sem->count)) {
  139. wake_up_locked(&sem->wait);
  140. }
  141. spin_unlock_irqrestore(&sem->wait.lock, flags);
  142. return 1;
  143. }