pda.h 3.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136
  1. #ifndef X86_64_PDA_H
  2. #define X86_64_PDA_H
  3. #ifndef __ASSEMBLY__
  4. #include <linux/stddef.h>
  5. #include <linux/types.h>
  6. #include <linux/cache.h>
  7. #include <asm/page.h>
  8. /* Per processor datastructure. %gs points to it while the kernel runs */
  9. struct x8664_pda {
  10. struct task_struct *pcurrent; /* 0 Current process */
  11. unsigned long data_offset; /* 8 Per cpu data offset from linker
  12. address */
  13. unsigned long kernelstack; /* 16 top of kernel stack for current */
  14. unsigned long oldrsp; /* 24 user rsp for system call */
  15. int irqcount; /* 32 Irq nesting counter. Starts -1 */
  16. unsigned int cpunumber; /* 36 Logical CPU number */
  17. #ifdef CONFIG_CC_STACKPROTECTOR
  18. unsigned long stack_canary; /* 40 stack canary value */
  19. /* gcc-ABI: this canary MUST be at
  20. offset 40!!! */
  21. #endif
  22. char *irqstackptr;
  23. unsigned int __softirq_pending;
  24. unsigned int __nmi_count; /* number of NMI on this CPUs */
  25. short mmu_state;
  26. short isidle;
  27. struct mm_struct *active_mm;
  28. unsigned apic_timer_irqs;
  29. unsigned irq0_irqs;
  30. unsigned irq_resched_count;
  31. unsigned irq_call_count;
  32. unsigned irq_tlb_count;
  33. unsigned irq_thermal_count;
  34. unsigned irq_threshold_count;
  35. unsigned irq_spurious_count;
  36. } ____cacheline_aligned_in_smp;
  37. extern struct x8664_pda *_cpu_pda[];
  38. extern struct x8664_pda boot_cpu_pda[];
  39. extern void pda_init(int);
  40. #define cpu_pda(i) (_cpu_pda[i])
  41. /*
  42. * There is no fast way to get the base address of the PDA, all the accesses
  43. * have to mention %fs/%gs. So it needs to be done this Torvaldian way.
  44. */
  45. extern void __bad_pda_field(void) __attribute__((noreturn));
  46. /*
  47. * proxy_pda doesn't actually exist, but tell gcc it is accessed for
  48. * all PDA accesses so it gets read/write dependencies right.
  49. */
  50. extern struct x8664_pda _proxy_pda;
  51. #define pda_offset(field) offsetof(struct x8664_pda, field)
  52. #define pda_to_op(op, field, val) \
  53. do { \
  54. typedef typeof(_proxy_pda.field) T__; \
  55. if (0) { T__ tmp__; tmp__ = (val); } /* type checking */ \
  56. switch (sizeof(_proxy_pda.field)) { \
  57. case 2: \
  58. asm(op "w %1,%%gs:%c2" : \
  59. "+m" (_proxy_pda.field) : \
  60. "ri" ((T__)val), \
  61. "i"(pda_offset(field))); \
  62. break; \
  63. case 4: \
  64. asm(op "l %1,%%gs:%c2" : \
  65. "+m" (_proxy_pda.field) : \
  66. "ri" ((T__)val), \
  67. "i" (pda_offset(field))); \
  68. break; \
  69. case 8: \
  70. asm(op "q %1,%%gs:%c2": \
  71. "+m" (_proxy_pda.field) : \
  72. "ri" ((T__)val), \
  73. "i"(pda_offset(field))); \
  74. break; \
  75. default: \
  76. __bad_pda_field(); \
  77. } \
  78. } while (0)
  79. #define pda_from_op(op, field) \
  80. ({ \
  81. typeof(_proxy_pda.field) ret__; \
  82. switch (sizeof(_proxy_pda.field)) { \
  83. case 2: \
  84. asm(op "w %%gs:%c1,%0" : \
  85. "=r" (ret__) : \
  86. "i" (pda_offset(field)), \
  87. "m" (_proxy_pda.field)); \
  88. break; \
  89. case 4: \
  90. asm(op "l %%gs:%c1,%0": \
  91. "=r" (ret__): \
  92. "i" (pda_offset(field)), \
  93. "m" (_proxy_pda.field)); \
  94. break; \
  95. case 8: \
  96. asm(op "q %%gs:%c1,%0": \
  97. "=r" (ret__) : \
  98. "i" (pda_offset(field)), \
  99. "m" (_proxy_pda.field)); \
  100. break; \
  101. default: \
  102. __bad_pda_field(); \
  103. } \
  104. ret__; \
  105. })
  106. #define read_pda(field) pda_from_op("mov", field)
  107. #define write_pda(field, val) pda_to_op("mov", field, val)
  108. #define add_pda(field, val) pda_to_op("add", field, val)
  109. #define sub_pda(field, val) pda_to_op("sub", field, val)
  110. #define or_pda(field, val) pda_to_op("or", field, val)
  111. /* This is not atomic against other CPUs -- CPU preemption needs to be off */
  112. #define test_and_clear_bit_pda(bit, field) \
  113. ({ \
  114. int old__; \
  115. asm volatile("btr %2,%%gs:%c3\n\tsbbl %0,%0" \
  116. : "=r" (old__), "+m" (_proxy_pda.field) \
  117. : "dIr" (bit), "i" (pda_offset(field)) : "memory");\
  118. old__; \
  119. })
  120. #endif
  121. #define PDA_STACKOFFSET (5*8)
  122. #endif