pda.h 3.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138
  1. #ifndef _ASM_X86_PDA_H
  2. #define _ASM_X86_PDA_H
  3. #ifndef __ASSEMBLY__
  4. #include <linux/stddef.h>
  5. #include <linux/types.h>
  6. #include <linux/cache.h>
  7. #include <linux/threads.h>
  8. #include <asm/page.h>
  9. /* Per processor datastructure. %gs points to it while the kernel runs */
  10. struct x8664_pda {
  11. struct task_struct *pcurrent; /* 0 Current process */
  12. unsigned long data_offset; /* 8 Per cpu data offset from linker
  13. address */
  14. unsigned long kernelstack; /* 16 top of kernel stack for current */
  15. unsigned long oldrsp; /* 24 user rsp for system call */
  16. int irqcount; /* 32 Irq nesting counter. Starts -1 */
  17. unsigned int cpunumber; /* 36 Logical CPU number */
  18. #ifdef CONFIG_CC_STACKPROTECTOR
  19. unsigned long stack_canary; /* 40 stack canary value */
  20. /* gcc-ABI: this canary MUST be at
  21. offset 40!!! */
  22. #endif
  23. char *irqstackptr;
  24. short nodenumber; /* number of current node (32k max) */
  25. short in_bootmem; /* pda lives in bootmem */
  26. unsigned int __softirq_pending;
  27. unsigned int __nmi_count; /* number of NMI on this CPUs */
  28. short mmu_state;
  29. short isidle;
  30. struct mm_struct *active_mm;
  31. unsigned apic_timer_irqs;
  32. unsigned irq0_irqs;
  33. unsigned irq_resched_count;
  34. unsigned irq_call_count;
  35. unsigned irq_tlb_count;
  36. unsigned irq_thermal_count;
  37. unsigned irq_threshold_count;
  38. unsigned irq_spurious_count;
  39. } ____cacheline_aligned_in_smp;
  40. extern struct x8664_pda *_cpu_pda[NR_CPUS];
  41. extern void pda_init(int);
  42. #define cpu_pda(i) (_cpu_pda[i])
  43. /*
  44. * There is no fast way to get the base address of the PDA, all the accesses
  45. * have to mention %fs/%gs. So it needs to be done this Torvaldian way.
  46. */
  47. extern void __bad_pda_field(void) __attribute__((noreturn));
  48. /*
  49. * proxy_pda doesn't actually exist, but tell gcc it is accessed for
  50. * all PDA accesses so it gets read/write dependencies right.
  51. */
  52. extern struct x8664_pda _proxy_pda;
  53. #define pda_offset(field) offsetof(struct x8664_pda, field)
  54. #define pda_to_op(op, field, val) \
  55. do { \
  56. typedef typeof(_proxy_pda.field) T__; \
  57. if (0) { T__ tmp__; tmp__ = (val); } /* type checking */ \
  58. switch (sizeof(_proxy_pda.field)) { \
  59. case 2: \
  60. asm(op "w %1,%%gs:%c2" : \
  61. "+m" (_proxy_pda.field) : \
  62. "ri" ((T__)val), \
  63. "i"(pda_offset(field))); \
  64. break; \
  65. case 4: \
  66. asm(op "l %1,%%gs:%c2" : \
  67. "+m" (_proxy_pda.field) : \
  68. "ri" ((T__)val), \
  69. "i" (pda_offset(field))); \
  70. break; \
  71. case 8: \
  72. asm(op "q %1,%%gs:%c2": \
  73. "+m" (_proxy_pda.field) : \
  74. "r" ((T__)val), \
  75. "i"(pda_offset(field))); \
  76. break; \
  77. default: \
  78. __bad_pda_field(); \
  79. } \
  80. } while (0)
  81. #define pda_from_op(op, field) \
  82. ({ \
  83. typeof(_proxy_pda.field) ret__; \
  84. switch (sizeof(_proxy_pda.field)) { \
  85. case 2: \
  86. asm(op "w %%gs:%c1,%0" : \
  87. "=r" (ret__) : \
  88. "i" (pda_offset(field)), \
  89. "m" (_proxy_pda.field)); \
  90. break; \
  91. case 4: \
  92. asm(op "l %%gs:%c1,%0": \
  93. "=r" (ret__): \
  94. "i" (pda_offset(field)), \
  95. "m" (_proxy_pda.field)); \
  96. break; \
  97. case 8: \
  98. asm(op "q %%gs:%c1,%0": \
  99. "=r" (ret__) : \
  100. "i" (pda_offset(field)), \
  101. "m" (_proxy_pda.field)); \
  102. break; \
  103. default: \
  104. __bad_pda_field(); \
  105. } \
  106. ret__; \
  107. })
  108. #define read_pda(field) pda_from_op("mov", field)
  109. #define write_pda(field, val) pda_to_op("mov", field, val)
  110. #define add_pda(field, val) pda_to_op("add", field, val)
  111. #define sub_pda(field, val) pda_to_op("sub", field, val)
  112. #define or_pda(field, val) pda_to_op("or", field, val)
  113. /* This is not atomic against other CPUs -- CPU preemption needs to be off */
  114. #define test_and_clear_bit_pda(bit, field) \
  115. ({ \
  116. int old__; \
  117. asm volatile("btr %2,%%gs:%c3\n\tsbbl %0,%0" \
  118. : "=r" (old__), "+m" (_proxy_pda.field) \
  119. : "dIr" (bit), "i" (pda_offset(field)) : "memory");\
  120. old__; \
  121. })
  122. #endif
  123. #define PDA_STACKOFFSET (5*8)
  124. #endif /* _ASM_X86_PDA_H */