pda.h 3.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133
  1. #ifndef X86_64_PDA_H
  2. #define X86_64_PDA_H
  3. #ifndef __ASSEMBLY__
  4. #include <linux/stddef.h>
  5. #include <linux/types.h>
  6. #include <linux/cache.h>
  7. #include <asm/page.h>
  8. /* Per processor datastructure. %gs points to it while the kernel runs */
  9. struct x8664_pda {
  10. struct task_struct *pcurrent; /* 0 Current process */
  11. unsigned long data_offset; /* 8 Per cpu data offset from linker
  12. address */
  13. unsigned long kernelstack; /* 16 top of kernel stack for current */
  14. unsigned long oldrsp; /* 24 user rsp for system call */
  15. int irqcount; /* 32 Irq nesting counter. Starts -1 */
  16. unsigned int cpunumber; /* 36 Logical CPU number */
  17. #ifdef CONFIG_CC_STACKPROTECTOR
  18. unsigned long stack_canary; /* 40 stack canary value */
  19. /* gcc-ABI: this canary MUST be at
  20. offset 40!!! */
  21. #endif
  22. char *irqstackptr;
  23. unsigned int nodenumber; /* number of current node */
  24. unsigned int __softirq_pending;
  25. unsigned int __nmi_count; /* number of NMI on this CPUs */
  26. short mmu_state;
  27. short isidle;
  28. struct mm_struct *active_mm;
  29. unsigned apic_timer_irqs;
  30. unsigned irq0_irqs;
  31. unsigned irq_resched_count;
  32. unsigned irq_call_count;
  33. unsigned irq_tlb_count;
  34. unsigned irq_thermal_count;
  35. unsigned irq_threshold_count;
  36. unsigned irq_spurious_count;
  37. } ____cacheline_aligned_in_smp;
  38. extern struct x8664_pda *_cpu_pda[];
  39. extern struct x8664_pda boot_cpu_pda[];
  40. extern void pda_init(int);
  41. #define cpu_pda(i) (_cpu_pda[i])
  42. /*
  43. * There is no fast way to get the base address of the PDA, all the accesses
  44. * have to mention %fs/%gs. So it needs to be done this Torvaldian way.
  45. */
  46. extern void __bad_pda_field(void) __attribute__((noreturn));
  47. /*
  48. * proxy_pda doesn't actually exist, but tell gcc it is accessed for
  49. * all PDA accesses so it gets read/write dependencies right.
  50. */
  51. extern struct x8664_pda _proxy_pda;
  52. #define pda_offset(field) offsetof(struct x8664_pda, field)
  53. #define pda_to_op(op, field, val) do { \
  54. typedef typeof(_proxy_pda.field) T__; \
  55. if (0) { T__ tmp__; tmp__ = (val); } /* type checking */ \
  56. switch (sizeof(_proxy_pda.field)) { \
  57. case 2: \
  58. asm(op "w %1,%%gs:%c2" : \
  59. "+m" (_proxy_pda.field) : \
  60. "ri" ((T__)val), \
  61. "i"(pda_offset(field))); \
  62. break; \
  63. case 4: \
  64. asm(op "l %1,%%gs:%c2" : \
  65. "+m" (_proxy_pda.field) : \
  66. "ri" ((T__)val), \
  67. "i" (pda_offset(field))); \
  68. break; \
  69. case 8: \
  70. asm(op "q %1,%%gs:%c2": \
  71. "+m" (_proxy_pda.field) : \
  72. "ri" ((T__)val), \
  73. "i"(pda_offset(field))); \
  74. break; \
  75. default: \
  76. __bad_pda_field(); \
  77. } \
  78. } while (0)
  79. #define pda_from_op(op,field) ({ \
  80. typeof(_proxy_pda.field) ret__; \
  81. switch (sizeof(_proxy_pda.field)) { \
  82. case 2: \
  83. asm(op "w %%gs:%c1,%0" : \
  84. "=r" (ret__) : \
  85. "i" (pda_offset(field)), \
  86. "m" (_proxy_pda.field)); \
  87. break; \
  88. case 4: \
  89. asm(op "l %%gs:%c1,%0": \
  90. "=r" (ret__): \
  91. "i" (pda_offset(field)), \
  92. "m" (_proxy_pda.field)); \
  93. break; \
  94. case 8: \
  95. asm(op "q %%gs:%c1,%0": \
  96. "=r" (ret__) : \
  97. "i" (pda_offset(field)), \
  98. "m" (_proxy_pda.field)); \
  99. break; \
  100. default: \
  101. __bad_pda_field(); \
  102. } \
  103. ret__; })
  104. #define read_pda(field) pda_from_op("mov", field)
  105. #define write_pda(field, val) pda_to_op("mov", field, val)
  106. #define add_pda(field, val) pda_to_op("add", field, val)
  107. #define sub_pda(field, val) pda_to_op("sub", field, val)
  108. #define or_pda(field, val) pda_to_op("or", field, val)
  109. /* This is not atomic against other CPUs -- CPU preemption needs to be off */
  110. #define test_and_clear_bit_pda(bit, field) ({ \
  111. int old__; \
  112. asm volatile("btr %2,%%gs:%c3\n\tsbbl %0,%0" \
  113. : "=r" (old__), "+m" (_proxy_pda.field) \
  114. : "dIr" (bit), "i" (pda_offset(field)) : "memory"); \
  115. old__; \
  116. })
  117. #endif
  118. #define PDA_STACKOFFSET (5*8)
  119. #endif