percpu.h 3.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116
  1. #ifndef __LINUX_PERCPU_H
  2. #define __LINUX_PERCPU_H
  3. #include <linux/preempt.h>
  4. #include <linux/slab.h> /* For kmalloc() */
  5. #include <linux/smp.h>
  6. #include <linux/cpumask.h>
  7. #include <asm/percpu.h>
  8. #ifdef CONFIG_SMP
  9. #define DEFINE_PER_CPU(type, name) \
  10. __attribute__((__section__(".data.percpu"))) \
  11. PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name
  12. #ifdef MODULE
  13. #define SHARED_ALIGNED_SECTION ".data.percpu"
  14. #else
  15. #define SHARED_ALIGNED_SECTION ".data.percpu.shared_aligned"
  16. #endif
  17. #define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
  18. __attribute__((__section__(SHARED_ALIGNED_SECTION))) \
  19. PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name \
  20. ____cacheline_aligned_in_smp
  21. #define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \
  22. __attribute__((__section__(".data.percpu.page_aligned"))) \
  23. PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name
  24. #else
  25. #define DEFINE_PER_CPU(type, name) \
  26. PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name
  27. #define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
  28. DEFINE_PER_CPU(type, name)
  29. #define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \
  30. DEFINE_PER_CPU(type, name)
  31. #endif
  32. #define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
  33. #define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
  34. /* Enough to cover all DEFINE_PER_CPUs in kernel, including modules. */
  35. #ifndef PERCPU_ENOUGH_ROOM
  36. #ifdef CONFIG_MODULES
  37. #define PERCPU_MODULE_RESERVE 8192
  38. #else
  39. #define PERCPU_MODULE_RESERVE 0
  40. #endif
  41. #define PERCPU_ENOUGH_ROOM \
  42. (__per_cpu_end - __per_cpu_start + PERCPU_MODULE_RESERVE)
  43. #endif /* PERCPU_ENOUGH_ROOM */
  44. /*
  45. * Must be an lvalue. Since @var must be a simple identifier,
  46. * we force a syntax error here if it isn't.
  47. */
  48. #define get_cpu_var(var) (*({ \
  49. extern int simple_identifier_##var(void); \
  50. preempt_disable(); \
  51. &__get_cpu_var(var); }))
  52. #define put_cpu_var(var) preempt_enable()
  53. #ifdef CONFIG_SMP
  54. struct percpu_data {
  55. void *ptrs[1];
  56. };
  57. #define __percpu_disguise(pdata) (struct percpu_data *)~(unsigned long)(pdata)
  58. /*
  59. * Use this to get to a cpu's version of the per-cpu object dynamically
  60. * allocated. Non-atomic access to the current CPU's version should
  61. * probably be combined with get_cpu()/put_cpu().
  62. */
  63. #define percpu_ptr(ptr, cpu) \
  64. ({ \
  65. struct percpu_data *__p = __percpu_disguise(ptr); \
  66. (__typeof__(ptr))__p->ptrs[(cpu)]; \
  67. })
  68. extern void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask);
  69. extern void percpu_free(void *__pdata);
  70. #else /* CONFIG_SMP */
  71. #define percpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); })
  72. static __always_inline void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask)
  73. {
  74. return kzalloc(size, gfp);
  75. }
  76. static inline void percpu_free(void *__pdata)
  77. {
  78. kfree(__pdata);
  79. }
  80. #endif /* CONFIG_SMP */
  81. #define percpu_alloc_mask(size, gfp, mask) \
  82. __percpu_alloc_mask((size), (gfp), &(mask))
  83. #define percpu_alloc(size, gfp) percpu_alloc_mask((size), (gfp), cpu_online_map)
  84. /* (legacy) interface for use without CPU hotplug handling */
  85. #define __alloc_percpu(size) percpu_alloc_mask((size), GFP_KERNEL, \
  86. cpu_possible_map)
  87. #define alloc_percpu(type) (type *)__alloc_percpu(sizeof(type))
  88. #define free_percpu(ptr) percpu_free((ptr))
  89. #define per_cpu_ptr(ptr, cpu) percpu_ptr((ptr), (cpu))
  90. #endif /* __LINUX_PERCPU_H */