percpu.h 2.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384
  1. #ifndef _ASM_IA64_PERCPU_H
  2. #define _ASM_IA64_PERCPU_H
  3. /*
  4. * Copyright (C) 2002-2003 Hewlett-Packard Co
  5. * David Mosberger-Tang <davidm@hpl.hp.com>
  6. */
  7. #define PERCPU_ENOUGH_ROOM PERCPU_PAGE_SIZE
  8. #ifdef __ASSEMBLY__
  9. # define THIS_CPU(var) (per_cpu__##var) /* use this to mark accesses to per-CPU variables... */
  10. #else /* !__ASSEMBLY__ */
  11. #include <linux/threads.h>
  12. #ifdef HAVE_MODEL_SMALL_ATTRIBUTE
  13. # define __SMALL_ADDR_AREA __attribute__((__model__ (__small__)))
  14. #else
  15. # define __SMALL_ADDR_AREA
  16. #endif
  17. #define DECLARE_PER_CPU(type, name) \
  18. extern __SMALL_ADDR_AREA __typeof__(type) per_cpu__##name
  19. /* Separate out the type, so (int[3], foo) works. */
  20. #define DEFINE_PER_CPU(type, name) \
  21. __attribute__((__section__(".data.percpu"))) \
  22. __SMALL_ADDR_AREA __typeof__(type) per_cpu__##name
  23. #ifdef CONFIG_SMP
  24. #define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
  25. __attribute__((__section__(".data.percpu.shared_aligned"))) \
  26. __SMALL_ADDR_AREA __typeof__(type) per_cpu__##name \
  27. ____cacheline_aligned_in_smp
  28. #else
  29. #define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
  30. DEFINE_PER_CPU(type, name)
  31. #endif
  32. /*
  33. * Pretty much a literal copy of asm-generic/percpu.h, except that percpu_modcopy() is an
  34. * external routine, to avoid include-hell.
  35. */
  36. #ifdef CONFIG_SMP
  37. extern unsigned long __per_cpu_offset[NR_CPUS];
  38. #define per_cpu_offset(x) (__per_cpu_offset[x])
  39. /* Equal to __per_cpu_offset[smp_processor_id()], but faster to access: */
  40. DECLARE_PER_CPU(unsigned long, local_per_cpu_offset);
  41. #define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset[cpu]))
  42. #define __get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __ia64_per_cpu_var(local_per_cpu_offset)))
  43. #define __raw_get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __ia64_per_cpu_var(local_per_cpu_offset)))
  44. extern void percpu_modcopy(void *pcpudst, const void *src, unsigned long size);
  45. extern void setup_per_cpu_areas (void);
  46. extern void *per_cpu_init(void);
  47. #else /* ! SMP */
  48. #define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var))
  49. #define __get_cpu_var(var) per_cpu__##var
  50. #define __raw_get_cpu_var(var) per_cpu__##var
  51. #define per_cpu_init() (__phys_per_cpu_start)
  52. #endif /* SMP */
  53. #define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
  54. #define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
  55. /*
  56. * Be extremely careful when taking the address of this variable! Due to virtual
  57. * remapping, it is different from the canonical address returned by __get_cpu_var(var)!
  58. * On the positive side, using __ia64_per_cpu_var() instead of __get_cpu_var() is slightly
  59. * more efficient.
  60. */
  61. #define __ia64_per_cpu_var(var) (per_cpu__##var)
  62. #endif /* !__ASSEMBLY__ */
  63. #endif /* _ASM_IA64_PERCPU_H */