pda.h 3.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687
  1. #ifndef X86_64_PDA_H
  2. #define X86_64_PDA_H
  3. #ifndef __ASSEMBLY__
  4. #include <linux/stddef.h>
  5. #include <linux/types.h>
  6. #include <linux/cache.h>
  7. #include <asm/page.h>
  8. /* Per processor datastructure. %gs points to it while the kernel runs */
  9. struct x8664_pda {
  10. struct task_struct *pcurrent; /* Current process */
  11. unsigned long data_offset; /* Per cpu data offset from linker address */
  12. unsigned long kernelstack; /* top of kernel stack for current */
  13. unsigned long oldrsp; /* user rsp for system call */
  14. #if DEBUG_STKSZ > EXCEPTION_STKSZ
  15. unsigned long debugstack; /* #DB/#BP stack. */
  16. #endif
  17. int irqcount; /* Irq nesting counter. Starts with -1 */
  18. int cpunumber; /* Logical CPU number */
  19. char *irqstackptr; /* top of irqstack */
  20. int nodenumber; /* number of current node */
  21. unsigned int __softirq_pending;
  22. unsigned int __nmi_count; /* number of NMI on this CPUs */
  23. int mmu_state;
  24. struct mm_struct *active_mm;
  25. unsigned apic_timer_irqs;
  26. } ____cacheline_aligned_in_smp;
  27. extern struct x8664_pda *_cpu_pda[];
  28. extern struct x8664_pda boot_cpu_pda[];
  29. #define cpu_pda(i) (_cpu_pda[i])
  30. /*
  31. * There is no fast way to get the base address of the PDA, all the accesses
  32. * have to mention %fs/%gs. So it needs to be done this Torvaldian way.
  33. */
  34. #define sizeof_field(type,field) (sizeof(((type *)0)->field))
  35. #define typeof_field(type,field) typeof(((type *)0)->field)
  36. extern void __bad_pda_field(void);
  37. #define pda_offset(field) offsetof(struct x8664_pda, field)
  38. #define pda_to_op(op,field,val) do { \
  39. typedef typeof_field(struct x8664_pda, field) T__; \
  40. switch (sizeof_field(struct x8664_pda, field)) { \
  41. case 2: \
  42. asm volatile(op "w %0,%%gs:%P1"::"ri" ((T__)val),"i"(pda_offset(field)):"memory"); break; \
  43. case 4: \
  44. asm volatile(op "l %0,%%gs:%P1"::"ri" ((T__)val),"i"(pda_offset(field)):"memory"); break; \
  45. case 8: \
  46. asm volatile(op "q %0,%%gs:%P1"::"ri" ((T__)val),"i"(pda_offset(field)):"memory"); break; \
  47. default: __bad_pda_field(); \
  48. } \
  49. } while (0)
  50. /*
  51. * AK: PDA read accesses should be neither volatile nor have an memory clobber.
  52. * Unfortunately removing them causes all hell to break lose currently.
  53. */
  54. #define pda_from_op(op,field) ({ \
  55. typeof_field(struct x8664_pda, field) ret__; \
  56. switch (sizeof_field(struct x8664_pda, field)) { \
  57. case 2: \
  58. asm volatile(op "w %%gs:%P1,%0":"=r" (ret__):"i"(pda_offset(field)):"memory"); break;\
  59. case 4: \
  60. asm volatile(op "l %%gs:%P1,%0":"=r" (ret__):"i"(pda_offset(field)):"memory"); break;\
  61. case 8: \
  62. asm volatile(op "q %%gs:%P1,%0":"=r" (ret__):"i"(pda_offset(field)):"memory"); break;\
  63. default: __bad_pda_field(); \
  64. } \
  65. ret__; })
  66. #define read_pda(field) pda_from_op("mov",field)
  67. #define write_pda(field,val) pda_to_op("mov",field,val)
  68. #define add_pda(field,val) pda_to_op("add",field,val)
  69. #define sub_pda(field,val) pda_to_op("sub",field,val)
  70. #define or_pda(field,val) pda_to_op("or",field,val)
  71. #endif
  72. #define PDA_STACKOFFSET (5*8)
  73. #endif