pda.h 2.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384
  1. #ifndef X86_64_PDA_H
  2. #define X86_64_PDA_H
  3. #ifndef __ASSEMBLY__
  4. #include <linux/stddef.h>
  5. #include <linux/types.h>
  6. #include <linux/cache.h>
  7. #include <asm/page.h>
  8. /* Per processor datastructure. %gs points to it while the kernel runs */
  9. struct x8664_pda {
  10. struct task_struct *pcurrent; /* Current process */
  11. unsigned long data_offset; /* Per cpu data offset from linker address */
  12. unsigned long kernelstack; /* top of kernel stack for current */
  13. unsigned long oldrsp; /* user rsp for system call */
  14. #if DEBUG_STKSZ > EXCEPTION_STKSZ
  15. unsigned long debugstack; /* #DB/#BP stack. */
  16. #endif
  17. int irqcount; /* Irq nesting counter. Starts with -1 */
  18. int cpunumber; /* Logical CPU number */
  19. char *irqstackptr; /* top of irqstack */
  20. int nodenumber; /* number of current node */
  21. unsigned int __softirq_pending;
  22. unsigned int __nmi_count; /* number of NMI on this CPUs */
  23. struct mm_struct *active_mm;
  24. int mmu_state;
  25. unsigned apic_timer_irqs;
  26. } ____cacheline_aligned_in_smp;
  27. extern struct x8664_pda cpu_pda[];
  28. /*
  29. * There is no fast way to get the base address of the PDA, all the accesses
  30. * have to mention %fs/%gs. So it needs to be done this Torvaldian way.
  31. */
  32. #define sizeof_field(type,field) (sizeof(((type *)0)->field))
  33. #define typeof_field(type,field) typeof(((type *)0)->field)
  34. extern void __bad_pda_field(void);
  35. #define pda_offset(field) offsetof(struct x8664_pda, field)
  36. #define pda_to_op(op,field,val) do { \
  37. typedef typeof_field(struct x8664_pda, field) T__; \
  38. switch (sizeof_field(struct x8664_pda, field)) { \
  39. case 2: \
  40. asm volatile(op "w %0,%%gs:%P1"::"ri" ((T__)val),"i"(pda_offset(field)):"memory"); break; \
  41. case 4: \
  42. asm volatile(op "l %0,%%gs:%P1"::"ri" ((T__)val),"i"(pda_offset(field)):"memory"); break; \
  43. case 8: \
  44. asm volatile(op "q %0,%%gs:%P1"::"ri" ((T__)val),"i"(pda_offset(field)):"memory"); break; \
  45. default: __bad_pda_field(); \
  46. } \
  47. } while (0)
  48. /*
  49. * AK: PDA read accesses should be neither volatile nor have an memory clobber.
  50. * Unfortunately removing them causes all hell to break lose currently.
  51. */
  52. #define pda_from_op(op,field) ({ \
  53. typeof_field(struct x8664_pda, field) ret__; \
  54. switch (sizeof_field(struct x8664_pda, field)) { \
  55. case 2: \
  56. asm volatile(op "w %%gs:%P1,%0":"=r" (ret__):"i"(pda_offset(field)):"memory"); break;\
  57. case 4: \
  58. asm volatile(op "l %%gs:%P1,%0":"=r" (ret__):"i"(pda_offset(field)):"memory"); break;\
  59. case 8: \
  60. asm volatile(op "q %%gs:%P1,%0":"=r" (ret__):"i"(pda_offset(field)):"memory"); break;\
  61. default: __bad_pda_field(); \
  62. } \
  63. ret__; })
  64. #define read_pda(field) pda_from_op("mov",field)
  65. #define write_pda(field,val) pda_to_op("mov",field,val)
  66. #define add_pda(field,val) pda_to_op("add",field,val)
  67. #define sub_pda(field,val) pda_to_op("sub",field,val)
  68. #define or_pda(field,val) pda_to_op("or",field,val)
  69. #endif
  70. #define PDA_STACKOFFSET (5*8)
  71. #endif