semaphore.c 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148
  1. /*
  2. * AVR32 sempahore implementation.
  3. *
  4. * Copyright (C) 2004-2006 Atmel Corporation
  5. *
  6. * Based on linux/arch/i386/kernel/semaphore.c
  7. * Copyright (C) 1999 Linus Torvalds
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as
  11. * published by the Free Software Foundation.
  12. */
  13. #include <linux/sched.h>
  14. #include <linux/errno.h>
  15. #include <linux/module.h>
  16. #include <asm/semaphore.h>
  17. #include <asm/atomic.h>
  18. /*
  19. * Semaphores are implemented using a two-way counter:
  20. * The "count" variable is decremented for each process
  21. * that tries to acquire the semaphore, while the "sleeping"
  22. * variable is a count of such acquires.
  23. *
  24. * Notably, the inline "up()" and "down()" functions can
  25. * efficiently test if they need to do any extra work (up
  26. * needs to do something only if count was negative before
  27. * the increment operation.
  28. *
  29. * "sleeping" and the contention routine ordering is protected
  30. * by the spinlock in the semaphore's waitqueue head.
  31. *
  32. * Note that these functions are only called when there is
  33. * contention on the lock, and as such all this is the
  34. * "non-critical" part of the whole semaphore business. The
  35. * critical part is the inline stuff in <asm/semaphore.h>
  36. * where we want to avoid any extra jumps and calls.
  37. */
  38. /*
  39. * Logic:
  40. * - only on a boundary condition do we need to care. When we go
  41. * from a negative count to a non-negative, we wake people up.
  42. * - when we go from a non-negative count to a negative do we
  43. * (a) synchronize with the "sleeper" count and (b) make sure
  44. * that we're on the wakeup list before we synchronize so that
  45. * we cannot lose wakeup events.
  46. */
  47. void __up(struct semaphore *sem)
  48. {
  49. wake_up(&sem->wait);
  50. }
  51. EXPORT_SYMBOL(__up);
  52. void __sched __down(struct semaphore *sem)
  53. {
  54. struct task_struct *tsk = current;
  55. DECLARE_WAITQUEUE(wait, tsk);
  56. unsigned long flags;
  57. tsk->state = TASK_UNINTERRUPTIBLE;
  58. spin_lock_irqsave(&sem->wait.lock, flags);
  59. add_wait_queue_exclusive_locked(&sem->wait, &wait);
  60. sem->sleepers++;
  61. for (;;) {
  62. int sleepers = sem->sleepers;
  63. /*
  64. * Add "everybody else" into it. They aren't
  65. * playing, because we own the spinlock in
  66. * the wait_queue_head.
  67. */
  68. if (atomic_add_return(sleepers - 1, &sem->count) >= 0) {
  69. sem->sleepers = 0;
  70. break;
  71. }
  72. sem->sleepers = 1; /* us - see -1 above */
  73. spin_unlock_irqrestore(&sem->wait.lock, flags);
  74. schedule();
  75. spin_lock_irqsave(&sem->wait.lock, flags);
  76. tsk->state = TASK_UNINTERRUPTIBLE;
  77. }
  78. remove_wait_queue_locked(&sem->wait, &wait);
  79. wake_up_locked(&sem->wait);
  80. spin_unlock_irqrestore(&sem->wait.lock, flags);
  81. tsk->state = TASK_RUNNING;
  82. }
  83. EXPORT_SYMBOL(__down);
  84. int __sched __down_interruptible(struct semaphore *sem)
  85. {
  86. int retval = 0;
  87. struct task_struct *tsk = current;
  88. DECLARE_WAITQUEUE(wait, tsk);
  89. unsigned long flags;
  90. tsk->state = TASK_INTERRUPTIBLE;
  91. spin_lock_irqsave(&sem->wait.lock, flags);
  92. add_wait_queue_exclusive_locked(&sem->wait, &wait);
  93. sem->sleepers++;
  94. for (;;) {
  95. int sleepers = sem->sleepers;
  96. /*
  97. * With signals pending, this turns into the trylock
  98. * failure case - we won't be sleeping, and we can't
  99. * get the lock as it has contention. Just correct the
  100. * count and exit.
  101. */
  102. if (signal_pending(current)) {
  103. retval = -EINTR;
  104. sem->sleepers = 0;
  105. atomic_add(sleepers, &sem->count);
  106. break;
  107. }
  108. /*
  109. * Add "everybody else" into it. They aren't
  110. * playing, because we own the spinlock in
  111. * the wait_queue_head.
  112. */
  113. if (atomic_add_return(sleepers - 1, &sem->count) >= 0) {
  114. sem->sleepers = 0;
  115. break;
  116. }
  117. sem->sleepers = 1; /* us - see -1 above */
  118. spin_unlock_irqrestore(&sem->wait.lock, flags);
  119. schedule();
  120. spin_lock_irqsave(&sem->wait.lock, flags);
  121. tsk->state = TASK_INTERRUPTIBLE;
  122. }
  123. remove_wait_queue_locked(&sem->wait, &wait);
  124. wake_up_locked(&sem->wait);
  125. spin_unlock_irqrestore(&sem->wait.lock, flags);
  126. tsk->state = TASK_RUNNING;
  127. return retval;
  128. }
  129. EXPORT_SYMBOL(__down_interruptible);