locking.c 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233
  1. /*
  2. * Copyright (C) 2008 Oracle. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public
  6. * License v2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public
  14. * License along with this program; if not, write to the
  15. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16. * Boston, MA 021110-1307, USA.
  17. */
  18. #include <linux/sched.h>
  19. #include <linux/pagemap.h>
  20. #include <linux/spinlock.h>
  21. #include <linux/page-flags.h>
  22. #include <asm/bug.h>
  23. #include "ctree.h"
  24. #include "extent_io.h"
  25. #include "locking.h"
  26. static inline void spin_nested(struct extent_buffer *eb)
  27. {
  28. spin_lock(&eb->lock);
  29. }
  30. /*
  31. * Setting a lock to blocking will drop the spinlock and set the
  32. * flag that forces other procs who want the lock to wait. After
  33. * this you can safely schedule with the lock held.
  34. */
  35. void btrfs_set_lock_blocking(struct extent_buffer *eb)
  36. {
  37. if (!test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags)) {
  38. set_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags);
  39. spin_unlock(&eb->lock);
  40. }
  41. /* exit with the spin lock released and the bit set */
  42. }
  43. /*
  44. * clearing the blocking flag will take the spinlock again.
  45. * After this you can't safely schedule
  46. */
  47. void btrfs_clear_lock_blocking(struct extent_buffer *eb)
  48. {
  49. if (test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags)) {
  50. spin_nested(eb);
  51. clear_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags);
  52. smp_mb__after_clear_bit();
  53. }
  54. /* exit with the spin lock held */
  55. }
  56. /*
  57. * unfortunately, many of the places that currently set a lock to blocking
  58. * don't end up blocking for very long, and often they don't block
  59. * at all. For a dbench 50 run, if we don't spin on the blocking bit
  60. * at all, the context switch rate can jump up to 400,000/sec or more.
  61. *
  62. * So, we're still stuck with this crummy spin on the blocking bit,
  63. * at least until the most common causes of the short blocks
  64. * can be dealt with.
  65. */
  66. static int btrfs_spin_on_block(struct extent_buffer *eb)
  67. {
  68. int i;
  69. for (i = 0; i < 512; i++) {
  70. if (!test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags))
  71. return 1;
  72. if (need_resched())
  73. break;
  74. cpu_relax();
  75. }
  76. return 0;
  77. }
  78. /*
  79. * This is somewhat different from trylock. It will take the
  80. * spinlock but if it finds the lock is set to blocking, it will
  81. * return without the lock held.
  82. *
  83. * returns 1 if it was able to take the lock and zero otherwise
  84. *
  85. * After this call, scheduling is not safe without first calling
  86. * btrfs_set_lock_blocking()
  87. */
  88. int btrfs_try_spin_lock(struct extent_buffer *eb)
  89. {
  90. int i;
  91. if (btrfs_spin_on_block(eb)) {
  92. spin_nested(eb);
  93. if (!test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags))
  94. return 1;
  95. spin_unlock(&eb->lock);
  96. }
  97. /* spin for a bit on the BLOCKING flag */
  98. for (i = 0; i < 2; i++) {
  99. cpu_relax();
  100. if (!btrfs_spin_on_block(eb))
  101. break;
  102. spin_nested(eb);
  103. if (!test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags))
  104. return 1;
  105. spin_unlock(&eb->lock);
  106. }
  107. return 0;
  108. }
  109. /*
  110. * the autoremove wake function will return 0 if it tried to wake up
  111. * a process that was already awake, which means that process won't
  112. * count as an exclusive wakeup. The waitq code will continue waking
  113. * procs until it finds one that was actually sleeping.
  114. *
  115. * For btrfs, this isn't quite what we want. We want a single proc
  116. * to be notified that the lock is ready for taking. If that proc
  117. * already happen to be awake, great, it will loop around and try for
  118. * the lock.
  119. *
  120. * So, btrfs_wake_function always returns 1, even when the proc that we
  121. * tried to wake up was already awake.
  122. */
  123. static int btrfs_wake_function(wait_queue_t *wait, unsigned mode,
  124. int sync, void *key)
  125. {
  126. autoremove_wake_function(wait, mode, sync, key);
  127. return 1;
  128. }
  129. /*
  130. * returns with the extent buffer spinlocked.
  131. *
  132. * This will spin and/or wait as required to take the lock, and then
  133. * return with the spinlock held.
  134. *
  135. * After this call, scheduling is not safe without first calling
  136. * btrfs_set_lock_blocking()
  137. */
  138. int btrfs_tree_lock(struct extent_buffer *eb)
  139. {
  140. DEFINE_WAIT(wait);
  141. wait.func = btrfs_wake_function;
  142. if (!btrfs_spin_on_block(eb))
  143. goto sleep;
  144. while(1) {
  145. spin_nested(eb);
  146. /* nobody is blocking, exit with the spinlock held */
  147. if (!test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags))
  148. return 0;
  149. /*
  150. * we have the spinlock, but the real owner is blocking.
  151. * wait for them
  152. */
  153. spin_unlock(&eb->lock);
  154. /*
  155. * spin for a bit, and if the blocking flag goes away,
  156. * loop around
  157. */
  158. cpu_relax();
  159. if (btrfs_spin_on_block(eb))
  160. continue;
  161. sleep:
  162. prepare_to_wait_exclusive(&eb->lock_wq, &wait,
  163. TASK_UNINTERRUPTIBLE);
  164. if (test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags))
  165. schedule();
  166. finish_wait(&eb->lock_wq, &wait);
  167. }
  168. return 0;
  169. }
  170. /*
  171. * Very quick trylock, this does not spin or schedule. It returns
  172. * 1 with the spinlock held if it was able to take the lock, or it
  173. * returns zero if it was unable to take the lock.
  174. *
  175. * After this call, scheduling is not safe without first calling
  176. * btrfs_set_lock_blocking()
  177. */
  178. int btrfs_try_tree_lock(struct extent_buffer *eb)
  179. {
  180. if (spin_trylock(&eb->lock)) {
  181. if (test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags)) {
  182. /*
  183. * we've got the spinlock, but the real owner is
  184. * blocking. Drop the spinlock and return failure
  185. */
  186. spin_unlock(&eb->lock);
  187. return 0;
  188. }
  189. return 1;
  190. }
  191. /* someone else has the spinlock giveup */
  192. return 0;
  193. }
  194. int btrfs_tree_unlock(struct extent_buffer *eb)
  195. {
  196. /*
  197. * if we were a blocking owner, we don't have the spinlock held
  198. * just clear the bit and look for waiters
  199. */
  200. if (test_and_clear_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags))
  201. smp_mb__after_clear_bit();
  202. else
  203. spin_unlock(&eb->lock);
  204. if (waitqueue_active(&eb->lock_wq))
  205. wake_up(&eb->lock_wq);
  206. return 0;
  207. }
  208. void btrfs_assert_tree_locked(struct extent_buffer *eb)
  209. {
  210. if (!test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags))
  211. assert_spin_locked(&eb->lock);
  212. }