system_64.h 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178
  1. #ifndef __ASM_SYSTEM_H
  2. #define __ASM_SYSTEM_H
  3. #include <linux/kernel.h>
  4. #include <asm/segment.h>
  5. #include <asm/cmpxchg.h>
  6. #ifdef __KERNEL__
  7. #define __SAVE(reg,offset) "movq %%" #reg ",(14-" #offset ")*8(%%rsp)\n\t"
  8. #define __RESTORE(reg,offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t"
  9. /* frame pointer must be last for get_wchan */
  10. #define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
  11. #define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t"
  12. #define __EXTRA_CLOBBER \
  13. ,"rcx","rbx","rdx","r8","r9","r10","r11","r12","r13","r14","r15"
  14. /* Save restore flags to clear handle leaking NT */
  15. #define switch_to(prev,next,last) \
  16. asm volatile(SAVE_CONTEXT \
  17. "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \
  18. "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \
  19. "call __switch_to\n\t" \
  20. ".globl thread_return\n" \
  21. "thread_return:\n\t" \
  22. "movq %%gs:%P[pda_pcurrent],%%rsi\n\t" \
  23. "movq %P[thread_info](%%rsi),%%r8\n\t" \
  24. LOCK_PREFIX "btr %[tif_fork],%P[ti_flags](%%r8)\n\t" \
  25. "movq %%rax,%%rdi\n\t" \
  26. "jc ret_from_fork\n\t" \
  27. RESTORE_CONTEXT \
  28. : "=a" (last) \
  29. : [next] "S" (next), [prev] "D" (prev), \
  30. [threadrsp] "i" (offsetof(struct task_struct, thread.rsp)), \
  31. [ti_flags] "i" (offsetof(struct thread_info, flags)),\
  32. [tif_fork] "i" (TIF_FORK), \
  33. [thread_info] "i" (offsetof(struct task_struct, stack)), \
  34. [pda_pcurrent] "i" (offsetof(struct x8664_pda, pcurrent)) \
  35. : "memory", "cc" __EXTRA_CLOBBER)
  36. extern void load_gs_index(unsigned);
  37. /*
  38. * Load a segment. Fall back on loading the zero
  39. * segment if something goes wrong..
  40. */
  41. #define loadsegment(seg,value) \
  42. asm volatile("\n" \
  43. "1:\t" \
  44. "movl %k0,%%" #seg "\n" \
  45. "2:\n" \
  46. ".section .fixup,\"ax\"\n" \
  47. "3:\t" \
  48. "movl %1,%%" #seg "\n\t" \
  49. "jmp 2b\n" \
  50. ".previous\n" \
  51. ".section __ex_table,\"a\"\n\t" \
  52. ".align 8\n\t" \
  53. ".quad 1b,3b\n" \
  54. ".previous" \
  55. : :"r" (value), "r" (0))
  56. /*
  57. * Clear and set 'TS' bit respectively
  58. */
  59. #define clts() __asm__ __volatile__ ("clts")
  60. static inline unsigned long read_cr0(void)
  61. {
  62. unsigned long cr0;
  63. asm volatile("movq %%cr0,%0" : "=r" (cr0));
  64. return cr0;
  65. }
  66. static inline void write_cr0(unsigned long val)
  67. {
  68. asm volatile("movq %0,%%cr0" :: "r" (val));
  69. }
  70. static inline unsigned long read_cr2(void)
  71. {
  72. unsigned long cr2;
  73. asm volatile("movq %%cr2,%0" : "=r" (cr2));
  74. return cr2;
  75. }
  76. static inline void write_cr2(unsigned long val)
  77. {
  78. asm volatile("movq %0,%%cr2" :: "r" (val));
  79. }
  80. static inline unsigned long read_cr3(void)
  81. {
  82. unsigned long cr3;
  83. asm volatile("movq %%cr3,%0" : "=r" (cr3));
  84. return cr3;
  85. }
  86. static inline void write_cr3(unsigned long val)
  87. {
  88. asm volatile("movq %0,%%cr3" :: "r" (val) : "memory");
  89. }
  90. static inline unsigned long read_cr4(void)
  91. {
  92. unsigned long cr4;
  93. asm volatile("movq %%cr4,%0" : "=r" (cr4));
  94. return cr4;
  95. }
  96. static inline void write_cr4(unsigned long val)
  97. {
  98. asm volatile("movq %0,%%cr4" :: "r" (val) : "memory");
  99. }
  100. static inline unsigned long read_cr8(void)
  101. {
  102. unsigned long cr8;
  103. asm volatile("movq %%cr8,%0" : "=r" (cr8));
  104. return cr8;
  105. }
  106. static inline void write_cr8(unsigned long val)
  107. {
  108. asm volatile("movq %0,%%cr8" :: "r" (val) : "memory");
  109. }
  110. #define stts() write_cr0(8 | read_cr0())
  111. #define wbinvd() \
  112. __asm__ __volatile__ ("wbinvd": : :"memory")
  113. #endif /* __KERNEL__ */
  114. static inline void clflush(volatile void *__p)
  115. {
  116. asm volatile("clflush %0" : "+m" (*(char __force *)__p));
  117. }
  118. #define nop() __asm__ __volatile__ ("nop")
  119. #ifdef CONFIG_SMP
  120. #define smp_mb() mb()
  121. #define smp_rmb() barrier()
  122. #define smp_wmb() barrier()
  123. #define smp_read_barrier_depends() do {} while(0)
  124. #else
  125. #define smp_mb() barrier()
  126. #define smp_rmb() barrier()
  127. #define smp_wmb() barrier()
  128. #define smp_read_barrier_depends() do {} while(0)
  129. #endif
  130. /*
  131. * Force strict CPU ordering.
  132. * And yes, this is required on UP too when we're talking
  133. * to devices.
  134. */
  135. #define mb() asm volatile("mfence":::"memory")
  136. #define rmb() asm volatile("lfence":::"memory")
  137. #define wmb() asm volatile("sfence" ::: "memory")
  138. #define read_barrier_depends() do {} while(0)
  139. #define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
  140. #define warn_if_not_ulong(x) do { unsigned long foo; (void) (&(x) == &foo); } while (0)
  141. #include <linux/irqflags.h>
  142. void cpu_idle_wait(void);
  143. extern unsigned long arch_align_stack(unsigned long sp);
  144. extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
  145. #endif