system.h 6.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254
  1. #ifndef __ASM_SYSTEM_H
  2. #define __ASM_SYSTEM_H
  3. #include <linux/kernel.h>
  4. #include <asm/segment.h>
  5. #include <asm/alternative.h>
  6. #ifdef __KERNEL__
  7. #define __STR(x) #x
  8. #define STR(x) __STR(x)
  9. #define __SAVE(reg,offset) "movq %%" #reg ",(14-" #offset ")*8(%%rsp)\n\t"
  10. #define __RESTORE(reg,offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t"
  11. /* frame pointer must be last for get_wchan */
  12. #define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
  13. #define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t"
  14. #define __EXTRA_CLOBBER \
  15. ,"rcx","rbx","rdx","r8","r9","r10","r11","r12","r13","r14","r15"
  16. /* Save restore flags to clear handle leaking NT */
  17. #define switch_to(prev,next,last) \
  18. asm volatile(SAVE_CONTEXT \
  19. "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \
  20. "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \
  21. "call __switch_to\n\t" \
  22. ".globl thread_return\n" \
  23. "thread_return:\n\t" \
  24. "movq %%gs:%P[pda_pcurrent],%%rsi\n\t" \
  25. "movq %P[thread_info](%%rsi),%%r8\n\t" \
  26. LOCK_PREFIX "btr %[tif_fork],%P[ti_flags](%%r8)\n\t" \
  27. "movq %%rax,%%rdi\n\t" \
  28. "jc ret_from_fork\n\t" \
  29. RESTORE_CONTEXT \
  30. : "=a" (last) \
  31. : [next] "S" (next), [prev] "D" (prev), \
  32. [threadrsp] "i" (offsetof(struct task_struct, thread.rsp)), \
  33. [ti_flags] "i" (offsetof(struct thread_info, flags)),\
  34. [tif_fork] "i" (TIF_FORK), \
  35. [thread_info] "i" (offsetof(struct task_struct, thread_info)), \
  36. [pda_pcurrent] "i" (offsetof(struct x8664_pda, pcurrent)) \
  37. : "memory", "cc" __EXTRA_CLOBBER)
  38. extern void load_gs_index(unsigned);
  39. /*
  40. * Load a segment. Fall back on loading the zero
  41. * segment if something goes wrong..
  42. */
  43. #define loadsegment(seg,value) \
  44. asm volatile("\n" \
  45. "1:\t" \
  46. "movl %k0,%%" #seg "\n" \
  47. "2:\n" \
  48. ".section .fixup,\"ax\"\n" \
  49. "3:\t" \
  50. "movl %1,%%" #seg "\n\t" \
  51. "jmp 2b\n" \
  52. ".previous\n" \
  53. ".section __ex_table,\"a\"\n\t" \
  54. ".align 8\n\t" \
  55. ".quad 1b,3b\n" \
  56. ".previous" \
  57. : :"r" (value), "r" (0))
  58. /*
  59. * Clear and set 'TS' bit respectively
  60. */
  61. #define clts() __asm__ __volatile__ ("clts")
  62. static inline unsigned long read_cr0(void)
  63. {
  64. unsigned long cr0;
  65. asm volatile("movq %%cr0,%0" : "=r" (cr0));
  66. return cr0;
  67. }
  68. static inline void write_cr0(unsigned long val)
  69. {
  70. asm volatile("movq %0,%%cr0" :: "r" (val));
  71. }
  72. static inline unsigned long read_cr3(void)
  73. {
  74. unsigned long cr3;
  75. asm("movq %%cr3,%0" : "=r" (cr3));
  76. return cr3;
  77. }
  78. static inline unsigned long read_cr4(void)
  79. {
  80. unsigned long cr4;
  81. asm("movq %%cr4,%0" : "=r" (cr4));
  82. return cr4;
  83. }
  84. static inline void write_cr4(unsigned long val)
  85. {
  86. asm volatile("movq %0,%%cr4" :: "r" (val));
  87. }
  88. #define stts() write_cr0(8 | read_cr0())
  89. #define wbinvd() \
  90. __asm__ __volatile__ ("wbinvd": : :"memory");
  91. /*
  92. * On SMP systems, when the scheduler does migration-cost autodetection,
  93. * it needs a way to flush as much of the CPU's caches as possible.
  94. */
  95. static inline void sched_cacheflush(void)
  96. {
  97. wbinvd();
  98. }
  99. #endif /* __KERNEL__ */
  100. #define nop() __asm__ __volatile__ ("nop")
  101. #define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
  102. #define tas(ptr) (xchg((ptr),1))
  103. #define __xg(x) ((volatile long *)(x))
  104. static inline void set_64bit(volatile unsigned long *ptr, unsigned long val)
  105. {
  106. *ptr = val;
  107. }
  108. #define _set_64bit set_64bit
  109. /*
  110. * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
  111. * Note 2: xchg has side effect, so that attribute volatile is necessary,
  112. * but generally the primitive is invalid, *ptr is output argument. --ANK
  113. */
  114. static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
  115. {
  116. switch (size) {
  117. case 1:
  118. __asm__ __volatile__("xchgb %b0,%1"
  119. :"=q" (x)
  120. :"m" (*__xg(ptr)), "0" (x)
  121. :"memory");
  122. break;
  123. case 2:
  124. __asm__ __volatile__("xchgw %w0,%1"
  125. :"=r" (x)
  126. :"m" (*__xg(ptr)), "0" (x)
  127. :"memory");
  128. break;
  129. case 4:
  130. __asm__ __volatile__("xchgl %k0,%1"
  131. :"=r" (x)
  132. :"m" (*__xg(ptr)), "0" (x)
  133. :"memory");
  134. break;
  135. case 8:
  136. __asm__ __volatile__("xchgq %0,%1"
  137. :"=r" (x)
  138. :"m" (*__xg(ptr)), "0" (x)
  139. :"memory");
  140. break;
  141. }
  142. return x;
  143. }
  144. /*
  145. * Atomic compare and exchange. Compare OLD with MEM, if identical,
  146. * store NEW in MEM. Return the initial value in MEM. Success is
  147. * indicated by comparing RETURN with OLD.
  148. */
  149. #define __HAVE_ARCH_CMPXCHG 1
  150. static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
  151. unsigned long new, int size)
  152. {
  153. unsigned long prev;
  154. switch (size) {
  155. case 1:
  156. __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
  157. : "=a"(prev)
  158. : "q"(new), "m"(*__xg(ptr)), "0"(old)
  159. : "memory");
  160. return prev;
  161. case 2:
  162. __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
  163. : "=a"(prev)
  164. : "r"(new), "m"(*__xg(ptr)), "0"(old)
  165. : "memory");
  166. return prev;
  167. case 4:
  168. __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %k1,%2"
  169. : "=a"(prev)
  170. : "r"(new), "m"(*__xg(ptr)), "0"(old)
  171. : "memory");
  172. return prev;
  173. case 8:
  174. __asm__ __volatile__(LOCK_PREFIX "cmpxchgq %1,%2"
  175. : "=a"(prev)
  176. : "r"(new), "m"(*__xg(ptr)), "0"(old)
  177. : "memory");
  178. return prev;
  179. }
  180. return old;
  181. }
  182. #define cmpxchg(ptr,o,n)\
  183. ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
  184. (unsigned long)(n),sizeof(*(ptr))))
  185. #ifdef CONFIG_SMP
  186. #define smp_mb() mb()
  187. #define smp_rmb() rmb()
  188. #define smp_wmb() wmb()
  189. #define smp_read_barrier_depends() do {} while(0)
  190. #else
  191. #define smp_mb() barrier()
  192. #define smp_rmb() barrier()
  193. #define smp_wmb() barrier()
  194. #define smp_read_barrier_depends() do {} while(0)
  195. #endif
  196. /*
  197. * Force strict CPU ordering.
  198. * And yes, this is required on UP too when we're talking
  199. * to devices.
  200. */
  201. #define mb() asm volatile("mfence":::"memory")
  202. #define rmb() asm volatile("lfence":::"memory")
  203. #ifdef CONFIG_UNORDERED_IO
  204. #define wmb() asm volatile("sfence" ::: "memory")
  205. #else
  206. #define wmb() asm volatile("" ::: "memory")
  207. #endif
  208. #define read_barrier_depends() do {} while(0)
  209. #define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
  210. #define warn_if_not_ulong(x) do { unsigned long foo; (void) (&(x) == &foo); } while (0)
  211. #include <linux/irqflags.h>
  212. void cpu_idle_wait(void);
  213. extern unsigned long arch_align_stack(unsigned long sp);
  214. extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
  215. #endif