percpu.h 2.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081
  1. #ifndef __ARCH_S390_PERCPU__
  2. #define __ARCH_S390_PERCPU__
  3. #include <linux/compiler.h>
  4. #include <asm/lowcore.h>
  5. #define __GENERIC_PER_CPU
  6. /*
  7. * s390 uses its own implementation for per cpu data, the offset of
  8. * the cpu local data area is cached in the cpu's lowcore memory.
  9. * For 64 bit module code s390 forces the use of a GOT slot for the
  10. * address of the per cpu variable. This is needed because the module
  11. * may be more than 4G above the per cpu area.
  12. */
  13. #if defined(__s390x__) && defined(MODULE)
  14. #define __reloc_hide(var,offset) (*({ \
  15. extern int simple_identifier_##var(void); \
  16. unsigned long *__ptr; \
  17. asm ( "larl %0,per_cpu__"#var"@GOTENT" \
  18. : "=a" (__ptr) : "X" (per_cpu__##var) ); \
  19. (typeof(&per_cpu__##var))((*__ptr) + (offset)); }))
  20. #else
  21. #define __reloc_hide(var, offset) (*({ \
  22. extern int simple_identifier_##var(void); \
  23. unsigned long __ptr; \
  24. asm ( "" : "=a" (__ptr) : "0" (&per_cpu__##var) ); \
  25. (typeof(&per_cpu__##var)) (__ptr + (offset)); }))
  26. #endif
  27. #ifdef CONFIG_SMP
  28. extern unsigned long __per_cpu_offset[NR_CPUS];
  29. /* Separate out the type, so (int[3], foo) works. */
  30. #define DEFINE_PER_CPU(type, name) \
  31. __attribute__((__section__(".data.percpu"))) \
  32. __typeof__(type) per_cpu__##name
  33. #define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
  34. __attribute__((__section__(".data.percpu.shared_aligned"))) \
  35. __typeof__(type) per_cpu__##name \
  36. ____cacheline_aligned_in_smp
  37. #define __get_cpu_var(var) __reloc_hide(var,S390_lowcore.percpu_offset)
  38. #define __raw_get_cpu_var(var) __reloc_hide(var,S390_lowcore.percpu_offset)
  39. #define per_cpu(var,cpu) __reloc_hide(var,__per_cpu_offset[cpu])
  40. #define per_cpu_offset(x) (__per_cpu_offset[x])
  41. /* A macro to avoid #include hell... */
  42. #define percpu_modcopy(pcpudst, src, size) \
  43. do { \
  44. unsigned int __i; \
  45. for_each_possible_cpu(__i) \
  46. memcpy((pcpudst)+__per_cpu_offset[__i], \
  47. (src), (size)); \
  48. } while (0)
  49. #else /* ! SMP */
  50. #define DEFINE_PER_CPU(type, name) \
  51. __typeof__(type) per_cpu__##name
  52. #define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
  53. DEFINE_PER_CPU(type, name)
  54. #define __get_cpu_var(var) __reloc_hide(var,0)
  55. #define __raw_get_cpu_var(var) __reloc_hide(var,0)
  56. #define per_cpu(var,cpu) __reloc_hide(var,0)
  57. #endif /* SMP */
  58. #define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name
  59. #define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
  60. #define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
  61. #endif /* __ARCH_S390_PERCPU__ */