pda.h 3.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137
  1. #ifndef ASM_X86__PDA_H
  2. #define ASM_X86__PDA_H
  3. #ifndef __ASSEMBLY__
  4. #include <linux/stddef.h>
  5. #include <linux/types.h>
  6. #include <linux/cache.h>
  7. #include <asm/page.h>
  8. /* Per processor datastructure. %gs points to it while the kernel runs */
  9. struct x8664_pda {
  10. struct task_struct *pcurrent; /* 0 Current process */
  11. unsigned long data_offset; /* 8 Per cpu data offset from linker
  12. address */
  13. unsigned long kernelstack; /* 16 top of kernel stack for current */
  14. unsigned long oldrsp; /* 24 user rsp for system call */
  15. int irqcount; /* 32 Irq nesting counter. Starts -1 */
  16. unsigned int cpunumber; /* 36 Logical CPU number */
  17. #ifdef CONFIG_CC_STACKPROTECTOR
  18. unsigned long stack_canary; /* 40 stack canary value */
  19. /* gcc-ABI: this canary MUST be at
  20. offset 40!!! */
  21. #endif
  22. char *irqstackptr;
  23. short nodenumber; /* number of current node (32k max) */
  24. short in_bootmem; /* pda lives in bootmem */
  25. unsigned int __softirq_pending;
  26. unsigned int __nmi_count; /* number of NMI on this CPUs */
  27. short mmu_state;
  28. short isidle;
  29. struct mm_struct *active_mm;
  30. unsigned apic_timer_irqs;
  31. unsigned irq0_irqs;
  32. unsigned irq_resched_count;
  33. unsigned irq_call_count;
  34. unsigned irq_tlb_count;
  35. unsigned irq_thermal_count;
  36. unsigned irq_threshold_count;
  37. unsigned irq_spurious_count;
  38. } ____cacheline_aligned_in_smp;
  39. extern struct x8664_pda **_cpu_pda;
  40. extern void pda_init(int);
  41. #define cpu_pda(i) (_cpu_pda[i])
  42. /*
  43. * There is no fast way to get the base address of the PDA, all the accesses
  44. * have to mention %fs/%gs. So it needs to be done this Torvaldian way.
  45. */
  46. extern void __bad_pda_field(void) __attribute__((noreturn));
  47. /*
  48. * proxy_pda doesn't actually exist, but tell gcc it is accessed for
  49. * all PDA accesses so it gets read/write dependencies right.
  50. */
  51. extern struct x8664_pda _proxy_pda;
  52. #define pda_offset(field) offsetof(struct x8664_pda, field)
  53. #define pda_to_op(op, field, val) \
  54. do { \
  55. typedef typeof(_proxy_pda.field) T__; \
  56. if (0) { T__ tmp__; tmp__ = (val); } /* type checking */ \
  57. switch (sizeof(_proxy_pda.field)) { \
  58. case 2: \
  59. asm(op "w %1,%%gs:%c2" : \
  60. "+m" (_proxy_pda.field) : \
  61. "ri" ((T__)val), \
  62. "i"(pda_offset(field))); \
  63. break; \
  64. case 4: \
  65. asm(op "l %1,%%gs:%c2" : \
  66. "+m" (_proxy_pda.field) : \
  67. "ri" ((T__)val), \
  68. "i" (pda_offset(field))); \
  69. break; \
  70. case 8: \
  71. asm(op "q %1,%%gs:%c2": \
  72. "+m" (_proxy_pda.field) : \
  73. "ri" ((T__)val), \
  74. "i"(pda_offset(field))); \
  75. break; \
  76. default: \
  77. __bad_pda_field(); \
  78. } \
  79. } while (0)
  80. #define pda_from_op(op, field) \
  81. ({ \
  82. typeof(_proxy_pda.field) ret__; \
  83. switch (sizeof(_proxy_pda.field)) { \
  84. case 2: \
  85. asm(op "w %%gs:%c1,%0" : \
  86. "=r" (ret__) : \
  87. "i" (pda_offset(field)), \
  88. "m" (_proxy_pda.field)); \
  89. break; \
  90. case 4: \
  91. asm(op "l %%gs:%c1,%0": \
  92. "=r" (ret__): \
  93. "i" (pda_offset(field)), \
  94. "m" (_proxy_pda.field)); \
  95. break; \
  96. case 8: \
  97. asm(op "q %%gs:%c1,%0": \
  98. "=r" (ret__) : \
  99. "i" (pda_offset(field)), \
  100. "m" (_proxy_pda.field)); \
  101. break; \
  102. default: \
  103. __bad_pda_field(); \
  104. } \
  105. ret__; \
  106. })
  107. #define read_pda(field) pda_from_op("mov", field)
  108. #define write_pda(field, val) pda_to_op("mov", field, val)
  109. #define add_pda(field, val) pda_to_op("add", field, val)
  110. #define sub_pda(field, val) pda_to_op("sub", field, val)
  111. #define or_pda(field, val) pda_to_op("or", field, val)
  112. /* This is not atomic against other CPUs -- CPU preemption needs to be off */
  113. #define test_and_clear_bit_pda(bit, field) \
  114. ({ \
  115. int old__; \
  116. asm volatile("btr %2,%%gs:%c3\n\tsbbl %0,%0" \
  117. : "=r" (old__), "+m" (_proxy_pda.field) \
  118. : "dIr" (bit), "i" (pda_offset(field)) : "memory");\
  119. old__; \
  120. })
  121. #endif
  122. #define PDA_STACKOFFSET (5*8)
  123. #endif /* ASM_X86__PDA_H */