pda.h 1.2 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243
  1. #ifndef _ASM_X86_PDA_H
  2. #define _ASM_X86_PDA_H
  3. #ifndef __ASSEMBLY__
  4. #include <linux/stddef.h>
  5. #include <linux/types.h>
  6. #include <linux/cache.h>
  7. #include <linux/threads.h>
  8. #include <asm/page.h>
  9. #include <asm/percpu.h>
  10. /* Per processor datastructure. %gs points to it while the kernel runs */
  11. struct x8664_pda {
  12. unsigned long unused1;
  13. unsigned long unused2;
  14. unsigned long unused3;
  15. unsigned long unused4;
  16. int unused5;
  17. unsigned int unused6; /* 36 was cpunumber */
  18. unsigned long stack_canary; /* 40 stack canary value */
  19. /* gcc-ABI: this canary MUST be at
  20. offset 40!!! */
  21. short in_bootmem; /* pda lives in bootmem */
  22. } ____cacheline_aligned_in_smp;
  23. DECLARE_PER_CPU(struct x8664_pda, __pda);
  24. extern void pda_init(int);
  25. #define cpu_pda(cpu) (&per_cpu(__pda, cpu))
  26. #define read_pda(field) percpu_read(__pda.field)
  27. #define write_pda(field, val) percpu_write(__pda.field, val)
  28. #define add_pda(field, val) percpu_add(__pda.field, val)
  29. #define sub_pda(field, val) percpu_sub(__pda.field, val)
  30. #define or_pda(field, val) percpu_or(__pda.field, val)
  31. /* This is not atomic against other CPUs -- CPU preemption needs to be off */
  32. #define test_and_clear_bit_pda(bit, field) \
  33. x86_test_and_clear_bit_percpu(bit, __pda.field)
  34. #endif
  35. #endif /* _ASM_X86_PDA_H */