barrier.h 3.2 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 2001 Hiroyuki Kondo, Hirokazu Takata, and Hitoshi Yamamoto
  7. * Copyright (C) 2004, 2006 Hirokazu Takata <takata at linux-m32r.org>
  8. */
  9. #ifndef _ASM_M32R_BARRIER_H
  10. #define _ASM_M32R_BARRIER_H
  11. #define nop() __asm__ __volatile__ ("nop" : : )
  12. /*
  13. * Memory barrier.
  14. *
  15. * mb() prevents loads and stores being reordered across this point.
  16. * rmb() prevents loads being reordered across this point.
  17. * wmb() prevents stores being reordered across this point.
  18. */
  19. #define mb() barrier()
  20. #define rmb() mb()
  21. #define wmb() mb()
  22. /**
  23. * read_barrier_depends - Flush all pending reads that subsequents reads
  24. * depend on.
  25. *
  26. * No data-dependent reads from memory-like regions are ever reordered
  27. * over this barrier. All reads preceding this primitive are guaranteed
  28. * to access memory (but not necessarily other CPUs' caches) before any
  29. * reads following this primitive that depend on the data return by
  30. * any of the preceding reads. This primitive is much lighter weight than
  31. * rmb() on most CPUs, and is never heavier weight than is
  32. * rmb().
  33. *
  34. * These ordering constraints are respected by both the local CPU
  35. * and the compiler.
  36. *
  37. * Ordering is not guaranteed by anything other than these primitives,
  38. * not even by data dependencies. See the documentation for
  39. * memory_barrier() for examples and URLs to more information.
  40. *
  41. * For example, the following code would force ordering (the initial
  42. * value of "a" is zero, "b" is one, and "p" is "&a"):
  43. *
  44. * <programlisting>
  45. * CPU 0 CPU 1
  46. *
  47. * b = 2;
  48. * memory_barrier();
  49. * p = &b; q = p;
  50. * read_barrier_depends();
  51. * d = *q;
  52. * </programlisting>
  53. *
  54. *
  55. * because the read of "*q" depends on the read of "p" and these
  56. * two reads are separated by a read_barrier_depends(). However,
  57. * the following code, with the same initial values for "a" and "b":
  58. *
  59. * <programlisting>
  60. * CPU 0 CPU 1
  61. *
  62. * a = 2;
  63. * memory_barrier();
  64. * b = 3; y = b;
  65. * read_barrier_depends();
  66. * x = a;
  67. * </programlisting>
  68. *
  69. * does not enforce ordering, since there is no data dependency between
  70. * the read of "a" and the read of "b". Therefore, on some CPUs, such
  71. * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
  72. * in cases like this where there are no data dependencies.
  73. **/
  74. #define read_barrier_depends() do { } while (0)
  75. #ifdef CONFIG_SMP
  76. #define smp_mb() mb()
  77. #define smp_rmb() rmb()
  78. #define smp_wmb() wmb()
  79. #define smp_read_barrier_depends() read_barrier_depends()
  80. #define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
  81. #else
  82. #define smp_mb() barrier()
  83. #define smp_rmb() barrier()
  84. #define smp_wmb() barrier()
  85. #define smp_read_barrier_depends() do { } while (0)
  86. #define set_mb(var, value) do { var = value; barrier(); } while (0)
  87. #endif
  88. #endif /* _ASM_M32R_BARRIER_H */