percpu.h 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215
  1. #ifndef _ASM_X86_PERCPU_H
  2. #define _ASM_X86_PERCPU_H
  3. #ifdef CONFIG_X86_64
  4. #define __percpu_seg gs
  5. #define __percpu_mov_op movq
  6. #else
  7. #define __percpu_seg fs
  8. #define __percpu_mov_op movl
  9. #endif
  10. #ifdef __ASSEMBLY__
  11. /*
  12. * PER_CPU finds an address of a per-cpu variable.
  13. *
  14. * Args:
  15. * var - variable name
  16. * reg - 32bit register
  17. *
  18. * The resulting address is stored in the "reg" argument.
  19. *
  20. * Example:
  21. * PER_CPU(cpu_gdt_descr, %ebx)
  22. */
  23. #ifdef CONFIG_SMP
  24. #define PER_CPU(var, reg) \
  25. __percpu_mov_op %__percpu_seg:per_cpu__this_cpu_off, reg; \
  26. lea per_cpu__##var(reg), reg
  27. #define PER_CPU_VAR(var) %__percpu_seg:per_cpu__##var
  28. #else /* ! SMP */
  29. #define PER_CPU(var, reg) \
  30. __percpu_mov_op $per_cpu__##var, reg
  31. #define PER_CPU_VAR(var) per_cpu__##var
  32. #endif /* SMP */
  33. #ifdef CONFIG_X86_64_SMP
  34. #define INIT_PER_CPU_VAR(var) init_per_cpu__##var
  35. #else
  36. #define INIT_PER_CPU_VAR(var) per_cpu__##var
  37. #endif
  38. #else /* ...!ASSEMBLY */
  39. #include <linux/kernel.h>
  40. #include <linux/stringify.h>
  41. #ifdef CONFIG_SMP
  42. #define __percpu_arg(x) "%%"__stringify(__percpu_seg)":%P" #x
  43. #define __my_cpu_offset percpu_read(this_cpu_off)
  44. #else
  45. #define __percpu_arg(x) "%" #x
  46. #endif
  47. /*
  48. * Initialized pointers to per-cpu variables needed for the boot
  49. * processor need to use these macros to get the proper address
  50. * offset from __per_cpu_load on SMP.
  51. *
  52. * There also must be an entry in vmlinux_64.lds.S
  53. */
  54. #define DECLARE_INIT_PER_CPU(var) \
  55. extern typeof(per_cpu_var(var)) init_per_cpu_var(var)
  56. #ifdef CONFIG_X86_64_SMP
  57. #define init_per_cpu_var(var) init_per_cpu__##var
  58. #else
  59. #define init_per_cpu_var(var) per_cpu_var(var)
  60. #endif
  61. /* For arch-specific code, we can use direct single-insn ops (they
  62. * don't give an lvalue though). */
  63. extern void __bad_percpu_size(void);
  64. #define percpu_to_op(op, var, val) \
  65. do { \
  66. typedef typeof(var) T__; \
  67. if (0) { \
  68. T__ tmp__; \
  69. tmp__ = (val); \
  70. } \
  71. switch (sizeof(var)) { \
  72. case 1: \
  73. asm(op "b %1,"__percpu_arg(0) \
  74. : "+m" (var) \
  75. : "qi" ((T__)(val))); \
  76. break; \
  77. case 2: \
  78. asm(op "w %1,"__percpu_arg(0) \
  79. : "+m" (var) \
  80. : "ri" ((T__)(val))); \
  81. break; \
  82. case 4: \
  83. asm(op "l %1,"__percpu_arg(0) \
  84. : "+m" (var) \
  85. : "ri" ((T__)(val))); \
  86. break; \
  87. case 8: \
  88. asm(op "q %1,"__percpu_arg(0) \
  89. : "+m" (var) \
  90. : "re" ((T__)(val))); \
  91. break; \
  92. default: __bad_percpu_size(); \
  93. } \
  94. } while (0)
  95. #define percpu_from_op(op, var) \
  96. ({ \
  97. typeof(var) ret__; \
  98. switch (sizeof(var)) { \
  99. case 1: \
  100. asm(op "b "__percpu_arg(1)",%0" \
  101. : "=q" (ret__) \
  102. : "m" (var)); \
  103. break; \
  104. case 2: \
  105. asm(op "w "__percpu_arg(1)",%0" \
  106. : "=r" (ret__) \
  107. : "m" (var)); \
  108. break; \
  109. case 4: \
  110. asm(op "l "__percpu_arg(1)",%0" \
  111. : "=r" (ret__) \
  112. : "m" (var)); \
  113. break; \
  114. case 8: \
  115. asm(op "q "__percpu_arg(1)",%0" \
  116. : "=r" (ret__) \
  117. : "m" (var)); \
  118. break; \
  119. default: __bad_percpu_size(); \
  120. } \
  121. ret__; \
  122. })
  123. #define percpu_read(var) percpu_from_op("mov", per_cpu__##var)
  124. #define percpu_write(var, val) percpu_to_op("mov", per_cpu__##var, val)
  125. #define percpu_add(var, val) percpu_to_op("add", per_cpu__##var, val)
  126. #define percpu_sub(var, val) percpu_to_op("sub", per_cpu__##var, val)
  127. #define percpu_and(var, val) percpu_to_op("and", per_cpu__##var, val)
  128. #define percpu_or(var, val) percpu_to_op("or", per_cpu__##var, val)
  129. #define percpu_xor(var, val) percpu_to_op("xor", per_cpu__##var, val)
  130. /* This is not atomic against other CPUs -- CPU preemption needs to be off */
  131. #define x86_test_and_clear_bit_percpu(bit, var) \
  132. ({ \
  133. int old__; \
  134. asm volatile("btr %2,"__percpu_arg(1)"\n\tsbbl %0,%0" \
  135. : "=r" (old__), "+m" (per_cpu__##var) \
  136. : "dIr" (bit)); \
  137. old__; \
  138. })
  139. #include <asm-generic/percpu.h>
  140. /* We can use this directly for local CPU (faster). */
  141. DECLARE_PER_CPU(unsigned long, this_cpu_off);
  142. #ifdef CONFIG_NEED_MULTIPLE_NODES
  143. void *pcpu_lpage_remapped(void *kaddr);
  144. #else
  145. static inline void *pcpu_lpage_remapped(void *kaddr)
  146. {
  147. return NULL;
  148. }
  149. #endif
  150. #endif /* !__ASSEMBLY__ */
  151. #ifdef CONFIG_SMP
  152. /*
  153. * Define the "EARLY_PER_CPU" macros. These are used for some per_cpu
  154. * variables that are initialized and accessed before there are per_cpu
  155. * areas allocated.
  156. */
  157. #define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \
  158. DEFINE_PER_CPU(_type, _name) = _initvalue; \
  159. __typeof__(_type) _name##_early_map[NR_CPUS] __initdata = \
  160. { [0 ... NR_CPUS-1] = _initvalue }; \
  161. __typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map
  162. #define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \
  163. EXPORT_PER_CPU_SYMBOL(_name)
  164. #define DECLARE_EARLY_PER_CPU(_type, _name) \
  165. DECLARE_PER_CPU(_type, _name); \
  166. extern __typeof__(_type) *_name##_early_ptr; \
  167. extern __typeof__(_type) _name##_early_map[]
  168. #define early_per_cpu_ptr(_name) (_name##_early_ptr)
  169. #define early_per_cpu_map(_name, _idx) (_name##_early_map[_idx])
  170. #define early_per_cpu(_name, _cpu) \
  171. *(early_per_cpu_ptr(_name) ? \
  172. &early_per_cpu_ptr(_name)[_cpu] : \
  173. &per_cpu(_name, _cpu))
  174. #else /* !CONFIG_SMP */
  175. #define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \
  176. DEFINE_PER_CPU(_type, _name) = _initvalue
  177. #define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \
  178. EXPORT_PER_CPU_SYMBOL(_name)
  179. #define DECLARE_EARLY_PER_CPU(_type, _name) \
  180. DECLARE_PER_CPU(_type, _name)
  181. #define early_per_cpu(_name, _cpu) per_cpu(_name, _cpu)
  182. #define early_per_cpu_ptr(_name) NULL
  183. /* no early_per_cpu_map() */
  184. #endif /* !CONFIG_SMP */
  185. #endif /* _ASM_X86_PERCPU_H */