percpu.h 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183
  1. #ifndef _ASM_X86_PERCPU_H
  2. #define _ASM_X86_PERCPU_H
  3. #ifdef CONFIG_X86_64
  4. #define __percpu_seg gs
  5. #define __percpu_mov_op movq
  6. #else
  7. #define __percpu_seg fs
  8. #define __percpu_mov_op movl
  9. #endif
  10. #ifdef __ASSEMBLY__
  11. /*
  12. * PER_CPU finds an address of a per-cpu variable.
  13. *
  14. * Args:
  15. * var - variable name
  16. * reg - 32bit register
  17. *
  18. * The resulting address is stored in the "reg" argument.
  19. *
  20. * Example:
  21. * PER_CPU(cpu_gdt_descr, %ebx)
  22. */
  23. #ifdef CONFIG_SMP
  24. #define PER_CPU(var, reg) \
  25. __percpu_mov_op %__percpu_seg:per_cpu__this_cpu_off, reg; \
  26. lea per_cpu__##var(reg), reg
  27. #define PER_CPU_VAR(var) %__percpu_seg:per_cpu__##var
  28. #else /* ! SMP */
  29. #define PER_CPU(var, reg) \
  30. __percpu_mov_op $per_cpu__##var, reg
  31. #define PER_CPU_VAR(var) per_cpu__##var
  32. #endif /* SMP */
  33. #else /* ...!ASSEMBLY */
  34. #include <linux/stringify.h>
  35. #ifdef CONFIG_SMP
  36. #define __percpu_arg(x) "%%"__stringify(__percpu_seg)":%P" #x
  37. #define __my_cpu_offset percpu_read(this_cpu_off)
  38. #else
  39. #define __percpu_arg(x) "%" #x
  40. #endif
  41. /* For arch-specific code, we can use direct single-insn ops (they
  42. * don't give an lvalue though). */
  43. extern void __bad_percpu_size(void);
  44. #define percpu_to_op(op, var, val) \
  45. do { \
  46. typedef typeof(var) T__; \
  47. if (0) { \
  48. T__ tmp__; \
  49. tmp__ = (val); \
  50. } \
  51. switch (sizeof(var)) { \
  52. case 1: \
  53. asm(op "b %1,"__percpu_arg(0) \
  54. : "+m" (var) \
  55. : "ri" ((T__)val)); \
  56. break; \
  57. case 2: \
  58. asm(op "w %1,"__percpu_arg(0) \
  59. : "+m" (var) \
  60. : "ri" ((T__)val)); \
  61. break; \
  62. case 4: \
  63. asm(op "l %1,"__percpu_arg(0) \
  64. : "+m" (var) \
  65. : "ri" ((T__)val)); \
  66. break; \
  67. case 8: \
  68. asm(op "q %1,"__percpu_arg(0) \
  69. : "+m" (var) \
  70. : "re" ((T__)val)); \
  71. break; \
  72. default: __bad_percpu_size(); \
  73. } \
  74. } while (0)
  75. #define percpu_from_op(op, var) \
  76. ({ \
  77. typeof(var) ret__; \
  78. switch (sizeof(var)) { \
  79. case 1: \
  80. asm(op "b "__percpu_arg(1)",%0" \
  81. : "=r" (ret__) \
  82. : "m" (var)); \
  83. break; \
  84. case 2: \
  85. asm(op "w "__percpu_arg(1)",%0" \
  86. : "=r" (ret__) \
  87. : "m" (var)); \
  88. break; \
  89. case 4: \
  90. asm(op "l "__percpu_arg(1)",%0" \
  91. : "=r" (ret__) \
  92. : "m" (var)); \
  93. break; \
  94. case 8: \
  95. asm(op "q "__percpu_arg(1)",%0" \
  96. : "=r" (ret__) \
  97. : "m" (var)); \
  98. break; \
  99. default: __bad_percpu_size(); \
  100. } \
  101. ret__; \
  102. })
  103. #define percpu_read(var) percpu_from_op("mov", per_cpu__##var)
  104. #define percpu_write(var, val) percpu_to_op("mov", per_cpu__##var, val)
  105. #define percpu_add(var, val) percpu_to_op("add", per_cpu__##var, val)
  106. #define percpu_sub(var, val) percpu_to_op("sub", per_cpu__##var, val)
  107. #define percpu_and(var, val) percpu_to_op("and", per_cpu__##var, val)
  108. #define percpu_or(var, val) percpu_to_op("or", per_cpu__##var, val)
  109. #define percpu_xor(var, val) percpu_to_op("xor", per_cpu__##var, val)
  110. /* This is not atomic against other CPUs -- CPU preemption needs to be off */
  111. #define x86_test_and_clear_bit_percpu(bit, var) \
  112. ({ \
  113. int old__; \
  114. asm volatile("btr %2,"__percpu_arg(1)"\n\tsbbl %0,%0" \
  115. : "=r" (old__), "+m" (per_cpu__##var) \
  116. : "dIr" (bit)); \
  117. old__; \
  118. })
  119. #include <asm-generic/percpu.h>
  120. /* We can use this directly for local CPU (faster). */
  121. DECLARE_PER_CPU(unsigned long, this_cpu_off);
  122. #endif /* !__ASSEMBLY__ */
  123. #ifdef CONFIG_SMP
  124. /*
  125. * Define the "EARLY_PER_CPU" macros. These are used for some per_cpu
  126. * variables that are initialized and accessed before there are per_cpu
  127. * areas allocated.
  128. */
  129. #define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \
  130. DEFINE_PER_CPU(_type, _name) = _initvalue; \
  131. __typeof__(_type) _name##_early_map[NR_CPUS] __initdata = \
  132. { [0 ... NR_CPUS-1] = _initvalue }; \
  133. __typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map
  134. #define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \
  135. EXPORT_PER_CPU_SYMBOL(_name)
  136. #define DECLARE_EARLY_PER_CPU(_type, _name) \
  137. DECLARE_PER_CPU(_type, _name); \
  138. extern __typeof__(_type) *_name##_early_ptr; \
  139. extern __typeof__(_type) _name##_early_map[]
  140. #define early_per_cpu_ptr(_name) (_name##_early_ptr)
  141. #define early_per_cpu_map(_name, _idx) (_name##_early_map[_idx])
  142. #define early_per_cpu(_name, _cpu) \
  143. *(early_per_cpu_ptr(_name) ? \
  144. &early_per_cpu_ptr(_name)[_cpu] : \
  145. &per_cpu(_name, _cpu))
  146. #else /* !CONFIG_SMP */
  147. #define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \
  148. DEFINE_PER_CPU(_type, _name) = _initvalue
  149. #define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \
  150. EXPORT_PER_CPU_SYMBOL(_name)
  151. #define DECLARE_EARLY_PER_CPU(_type, _name) \
  152. DECLARE_PER_CPU(_type, _name)
  153. #define early_per_cpu(_name, _cpu) per_cpu(_name, _cpu)
  154. #define early_per_cpu_ptr(_name) NULL
  155. /* no early_per_cpu_map() */
  156. #endif /* !CONFIG_SMP */
  157. #endif /* _ASM_X86_PERCPU_H */