system.h 3.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133
  1. #ifndef _ASM_X86_SYSTEM_H_
  2. #define _ASM_X86_SYSTEM_H_
  3. #include <asm/asm.h>
  4. #include <asm/segment.h>
  5. #include <asm/cpufeature.h>
  6. #include <asm/cmpxchg.h>
  7. #include <asm/nops.h>
  8. #include <asm/system-um.h>
  9. #include <linux/kernel.h>
  10. #include <linux/irqflags.h>
  11. /* entries in ARCH_DLINFO: */
  12. #ifdef CONFIG_IA32_EMULATION
  13. # define AT_VECTOR_SIZE_ARCH 2
  14. #else
  15. # define AT_VECTOR_SIZE_ARCH 1
  16. #endif
  17. extern unsigned long arch_align_stack(unsigned long sp);
  18. void default_idle(void);
  19. /*
  20. * Force strict CPU ordering.
  21. * And yes, this is required on UP too when we're talking
  22. * to devices.
  23. */
  24. #ifdef CONFIG_X86_32
  25. /*
  26. * Some non-Intel clones support out of order store. wmb() ceases to be a
  27. * nop for these.
  28. */
  29. #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
  30. #define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
  31. #define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
  32. #else
  33. #define mb() asm volatile("mfence":::"memory")
  34. #define rmb() asm volatile("lfence":::"memory")
  35. #define wmb() asm volatile("sfence" ::: "memory")
  36. #endif
  37. /**
  38. * read_barrier_depends - Flush all pending reads that subsequents reads
  39. * depend on.
  40. *
  41. * No data-dependent reads from memory-like regions are ever reordered
  42. * over this barrier. All reads preceding this primitive are guaranteed
  43. * to access memory (but not necessarily other CPUs' caches) before any
  44. * reads following this primitive that depend on the data return by
  45. * any of the preceding reads. This primitive is much lighter weight than
  46. * rmb() on most CPUs, and is never heavier weight than is
  47. * rmb().
  48. *
  49. * These ordering constraints are respected by both the local CPU
  50. * and the compiler.
  51. *
  52. * Ordering is not guaranteed by anything other than these primitives,
  53. * not even by data dependencies. See the documentation for
  54. * memory_barrier() for examples and URLs to more information.
  55. *
  56. * For example, the following code would force ordering (the initial
  57. * value of "a" is zero, "b" is one, and "p" is "&a"):
  58. *
  59. * <programlisting>
  60. * CPU 0 CPU 1
  61. *
  62. * b = 2;
  63. * memory_barrier();
  64. * p = &b; q = p;
  65. * read_barrier_depends();
  66. * d = *q;
  67. * </programlisting>
  68. *
  69. * because the read of "*q" depends on the read of "p" and these
  70. * two reads are separated by a read_barrier_depends(). However,
  71. * the following code, with the same initial values for "a" and "b":
  72. *
  73. * <programlisting>
  74. * CPU 0 CPU 1
  75. *
  76. * a = 2;
  77. * memory_barrier();
  78. * b = 3; y = b;
  79. * read_barrier_depends();
  80. * x = a;
  81. * </programlisting>
  82. *
  83. * does not enforce ordering, since there is no data dependency between
  84. * the read of "a" and the read of "b". Therefore, on some CPUs, such
  85. * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
  86. * in cases like this where there are no data dependencies.
  87. **/
  88. #define read_barrier_depends() do { } while (0)
  89. #ifdef CONFIG_SMP
  90. #define smp_mb() mb()
  91. #ifdef CONFIG_X86_PPRO_FENCE
  92. # define smp_rmb() rmb()
  93. #else
  94. # define smp_rmb() barrier()
  95. #endif
  96. #ifdef CONFIG_X86_OOSTORE
  97. # define smp_wmb() wmb()
  98. #else
  99. # define smp_wmb() barrier()
  100. #endif
  101. #define smp_read_barrier_depends() read_barrier_depends()
  102. #define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
  103. #else
  104. #define smp_mb() barrier()
  105. #define smp_rmb() barrier()
  106. #define smp_wmb() barrier()
  107. #define smp_read_barrier_depends() do { } while (0)
  108. #define set_mb(var, value) do { var = value; barrier(); } while (0)
  109. #endif
  110. /*
  111. * Stop RDTSC speculation. This is needed when you need to use RDTSC
  112. * (or get_cycles or vread that possibly accesses the TSC) in a defined
  113. * code region.
  114. *
  115. * (Could use an alternative three way for this if there was one.)
  116. */
  117. static inline void rdtsc_barrier(void)
  118. {
  119. alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC);
  120. alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC);
  121. }
  122. #endif