percpu.h 1.6 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061
  1. #ifndef __LINUX_PERCPU_H
  2. #define __LINUX_PERCPU_H
  3. #include <linux/spinlock.h> /* For preempt_disable() */
  4. #include <linux/slab.h> /* For kmalloc() */
  5. #include <linux/smp.h>
  6. #include <linux/string.h> /* For memset() */
  7. #include <asm/percpu.h>
  8. /* Enough to cover all DEFINE_PER_CPUs in kernel, including modules. */
  9. #ifndef PERCPU_ENOUGH_ROOM
  10. #define PERCPU_ENOUGH_ROOM 32768
  11. #endif
  12. /* Must be an lvalue. */
  13. #define get_cpu_var(var) (*({ preempt_disable(); &__get_cpu_var(var); }))
  14. #define put_cpu_var(var) preempt_enable()
  15. #ifdef CONFIG_SMP
  16. struct percpu_data {
  17. void *ptrs[NR_CPUS];
  18. void *blkp;
  19. };
  20. /*
  21. * Use this to get to a cpu's version of the per-cpu object allocated using
  22. * alloc_percpu. Non-atomic access to the current CPU's version should
  23. * probably be combined with get_cpu()/put_cpu().
  24. */
  25. #define per_cpu_ptr(ptr, cpu) \
  26. ({ \
  27. struct percpu_data *__p = (struct percpu_data *)~(unsigned long)(ptr); \
  28. (__typeof__(ptr))__p->ptrs[(cpu)]; \
  29. })
  30. extern void *__alloc_percpu(size_t size, size_t align);
  31. extern void free_percpu(const void *);
  32. #else /* CONFIG_SMP */
  33. #define per_cpu_ptr(ptr, cpu) (ptr)
  34. static inline void *__alloc_percpu(size_t size, size_t align)
  35. {
  36. void *ret = kmalloc(size, GFP_KERNEL);
  37. if (ret)
  38. memset(ret, 0, size);
  39. return ret;
  40. }
  41. static inline void free_percpu(const void *ptr)
  42. {
  43. kfree(ptr);
  44. }
  45. #endif /* CONFIG_SMP */
  46. /* Simple wrapper for the common case: zeros memory. */
  47. #define alloc_percpu(type) \
  48. ((type *)(__alloc_percpu(sizeof(type), __alignof__(type))))
  49. #endif /* __LINUX_PERCPU_H */