system.h 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427
  1. #ifndef _ASM_X86_SYSTEM_H
  2. #define _ASM_X86_SYSTEM_H
  3. #include <asm/asm.h>
  4. #include <asm/segment.h>
  5. #include <asm/cpufeature.h>
  6. #include <asm/cmpxchg.h>
  7. #include <asm/nops.h>
  8. #include <linux/kernel.h>
  9. #include <linux/irqflags.h>
  10. /* entries in ARCH_DLINFO: */
  11. #ifdef CONFIG_IA32_EMULATION
  12. # define AT_VECTOR_SIZE_ARCH 2
  13. #else
  14. # define AT_VECTOR_SIZE_ARCH 1
  15. #endif
  16. #ifdef CONFIG_X86_32
  17. struct task_struct; /* one of the stranger aspects of C forward declarations */
  18. struct task_struct *__switch_to(struct task_struct *prev,
  19. struct task_struct *next);
  20. /*
  21. * Saving eflags is important. It switches not only IOPL between tasks,
  22. * it also protects other tasks from NT leaking through sysenter etc.
  23. */
  24. #define switch_to(prev, next, last) \
  25. do { \
  26. /* \
  27. * Context-switching clobbers all registers, so we clobber \
  28. * them explicitly, via unused output variables. \
  29. * (EAX and EBP is not listed because EBP is saved/restored \
  30. * explicitly for wchan access and EAX is the return value of \
  31. * __switch_to()) \
  32. */ \
  33. unsigned long ebx, ecx, edx, esi, edi; \
  34. \
  35. asm volatile("pushfl\n\t" /* save flags */ \
  36. "pushl %%ebp\n\t" /* save EBP */ \
  37. "movl %%esp,%[prev_sp]\n\t" /* save ESP */ \
  38. "movl %[next_sp],%%esp\n\t" /* restore ESP */ \
  39. "movl $1f,%[prev_ip]\n\t" /* save EIP */ \
  40. "pushl %[next_ip]\n\t" /* restore EIP */ \
  41. "jmp __switch_to\n" /* regparm call */ \
  42. "1:\t" \
  43. "popl %%ebp\n\t" /* restore EBP */ \
  44. "popfl\n" /* restore flags */ \
  45. \
  46. /* output parameters */ \
  47. : [prev_sp] "=m" (prev->thread.sp), \
  48. [prev_ip] "=m" (prev->thread.ip), \
  49. "=a" (last), \
  50. \
  51. /* clobbered output registers: */ \
  52. "=b" (ebx), "=c" (ecx), "=d" (edx), \
  53. "=S" (esi), "=D" (edi) \
  54. \
  55. /* input parameters: */ \
  56. : [next_sp] "m" (next->thread.sp), \
  57. [next_ip] "m" (next->thread.ip), \
  58. \
  59. /* regparm parameters for __switch_to(): */ \
  60. [prev] "a" (prev), \
  61. [next] "d" (next) \
  62. \
  63. : /* reloaded segment registers */ \
  64. "memory"); \
  65. } while (0)
  66. /*
  67. * disable hlt during certain critical i/o operations
  68. */
  69. #define HAVE_DISABLE_HLT
  70. #else
  71. #define __SAVE(reg, offset) "movq %%" #reg ",(14-" #offset ")*8(%%rsp)\n\t"
  72. #define __RESTORE(reg, offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t"
  73. /* frame pointer must be last for get_wchan */
  74. #define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
  75. #define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t"
  76. #define __EXTRA_CLOBBER \
  77. , "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \
  78. "r12", "r13", "r14", "r15"
  79. /* Save restore flags to clear handle leaking NT */
  80. #define switch_to(prev, next, last) \
  81. asm volatile(SAVE_CONTEXT \
  82. "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \
  83. "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \
  84. "call __switch_to\n\t" \
  85. ".globl thread_return\n" \
  86. "thread_return:\n\t" \
  87. "movq %%gs:%P[pda_pcurrent],%%rsi\n\t" \
  88. "movq %P[thread_info](%%rsi),%%r8\n\t" \
  89. LOCK_PREFIX "btr %[tif_fork],%P[ti_flags](%%r8)\n\t" \
  90. "movq %%rax,%%rdi\n\t" \
  91. "jc ret_from_fork\n\t" \
  92. RESTORE_CONTEXT \
  93. : "=a" (last) \
  94. : [next] "S" (next), [prev] "D" (prev), \
  95. [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
  96. [ti_flags] "i" (offsetof(struct thread_info, flags)), \
  97. [tif_fork] "i" (TIF_FORK), \
  98. [thread_info] "i" (offsetof(struct task_struct, stack)), \
  99. [pda_pcurrent] "i" (offsetof(struct x8664_pda, pcurrent)) \
  100. : "memory", "cc" __EXTRA_CLOBBER)
  101. #endif
  102. #ifdef __KERNEL__
  103. #define _set_base(addr, base) do { unsigned long __pr; \
  104. __asm__ __volatile__ ("movw %%dx,%1\n\t" \
  105. "rorl $16,%%edx\n\t" \
  106. "movb %%dl,%2\n\t" \
  107. "movb %%dh,%3" \
  108. :"=&d" (__pr) \
  109. :"m" (*((addr)+2)), \
  110. "m" (*((addr)+4)), \
  111. "m" (*((addr)+7)), \
  112. "0" (base) \
  113. ); } while (0)
  114. #define _set_limit(addr, limit) do { unsigned long __lr; \
  115. __asm__ __volatile__ ("movw %%dx,%1\n\t" \
  116. "rorl $16,%%edx\n\t" \
  117. "movb %2,%%dh\n\t" \
  118. "andb $0xf0,%%dh\n\t" \
  119. "orb %%dh,%%dl\n\t" \
  120. "movb %%dl,%2" \
  121. :"=&d" (__lr) \
  122. :"m" (*(addr)), \
  123. "m" (*((addr)+6)), \
  124. "0" (limit) \
  125. ); } while (0)
  126. #define set_base(ldt, base) _set_base(((char *)&(ldt)) , (base))
  127. #define set_limit(ldt, limit) _set_limit(((char *)&(ldt)) , ((limit)-1))
  128. extern void native_load_gs_index(unsigned);
  129. /*
  130. * Load a segment. Fall back on loading the zero
  131. * segment if something goes wrong..
  132. */
  133. #define loadsegment(seg, value) \
  134. asm volatile("\n" \
  135. "1:\t" \
  136. "movl %k0,%%" #seg "\n" \
  137. "2:\n" \
  138. ".section .fixup,\"ax\"\n" \
  139. "3:\t" \
  140. "movl %k1, %%" #seg "\n\t" \
  141. "jmp 2b\n" \
  142. ".previous\n" \
  143. _ASM_EXTABLE(1b,3b) \
  144. : :"r" (value), "r" (0) : "memory")
  145. /*
  146. * Save a segment register away
  147. */
  148. #define savesegment(seg, value) \
  149. asm("mov %%" #seg ",%0":"=r" (value) : : "memory")
  150. static inline unsigned long get_limit(unsigned long segment)
  151. {
  152. unsigned long __limit;
  153. asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
  154. return __limit + 1;
  155. }
  156. static inline void native_clts(void)
  157. {
  158. asm volatile("clts");
  159. }
  160. /*
  161. * Volatile isn't enough to prevent the compiler from reordering the
  162. * read/write functions for the control registers and messing everything up.
  163. * A memory clobber would solve the problem, but would prevent reordering of
  164. * all loads stores around it, which can hurt performance. Solution is to
  165. * use a variable and mimic reads and writes to it to enforce serialization
  166. */
  167. static unsigned long __force_order;
  168. static inline unsigned long native_read_cr0(void)
  169. {
  170. unsigned long val;
  171. asm volatile("mov %%cr0,%0\n\t" : "=r" (val), "=m" (__force_order));
  172. return val;
  173. }
  174. static inline void native_write_cr0(unsigned long val)
  175. {
  176. asm volatile("mov %0,%%cr0": : "r" (val), "m" (__force_order));
  177. }
  178. static inline unsigned long native_read_cr2(void)
  179. {
  180. unsigned long val;
  181. asm volatile("mov %%cr2,%0\n\t" : "=r" (val), "=m" (__force_order));
  182. return val;
  183. }
  184. static inline void native_write_cr2(unsigned long val)
  185. {
  186. asm volatile("mov %0,%%cr2": : "r" (val), "m" (__force_order));
  187. }
  188. static inline unsigned long native_read_cr3(void)
  189. {
  190. unsigned long val;
  191. asm volatile("mov %%cr3,%0\n\t" : "=r" (val), "=m" (__force_order));
  192. return val;
  193. }
  194. static inline void native_write_cr3(unsigned long val)
  195. {
  196. asm volatile("mov %0,%%cr3": : "r" (val), "m" (__force_order));
  197. }
  198. static inline unsigned long native_read_cr4(void)
  199. {
  200. unsigned long val;
  201. asm volatile("mov %%cr4,%0\n\t" : "=r" (val), "=m" (__force_order));
  202. return val;
  203. }
  204. static inline unsigned long native_read_cr4_safe(void)
  205. {
  206. unsigned long val;
  207. /* This could fault if %cr4 does not exist. In x86_64, a cr4 always
  208. * exists, so it will never fail. */
  209. #ifdef CONFIG_X86_32
  210. asm volatile("1: mov %%cr4, %0\n"
  211. "2:\n"
  212. _ASM_EXTABLE(1b, 2b)
  213. : "=r" (val), "=m" (__force_order) : "0" (0));
  214. #else
  215. val = native_read_cr4();
  216. #endif
  217. return val;
  218. }
  219. static inline void native_write_cr4(unsigned long val)
  220. {
  221. asm volatile("mov %0,%%cr4": : "r" (val), "m" (__force_order));
  222. }
  223. #ifdef CONFIG_X86_64
  224. static inline unsigned long native_read_cr8(void)
  225. {
  226. unsigned long cr8;
  227. asm volatile("movq %%cr8,%0" : "=r" (cr8));
  228. return cr8;
  229. }
  230. static inline void native_write_cr8(unsigned long val)
  231. {
  232. asm volatile("movq %0,%%cr8" :: "r" (val) : "memory");
  233. }
  234. #endif
  235. static inline void native_wbinvd(void)
  236. {
  237. asm volatile("wbinvd": : :"memory");
  238. }
  239. #ifdef CONFIG_PARAVIRT
  240. #include <asm/paravirt.h>
  241. #else
  242. #define read_cr0() (native_read_cr0())
  243. #define write_cr0(x) (native_write_cr0(x))
  244. #define read_cr2() (native_read_cr2())
  245. #define write_cr2(x) (native_write_cr2(x))
  246. #define read_cr3() (native_read_cr3())
  247. #define write_cr3(x) (native_write_cr3(x))
  248. #define read_cr4() (native_read_cr4())
  249. #define read_cr4_safe() (native_read_cr4_safe())
  250. #define write_cr4(x) (native_write_cr4(x))
  251. #define wbinvd() (native_wbinvd())
  252. #ifdef CONFIG_X86_64
  253. #define read_cr8() (native_read_cr8())
  254. #define write_cr8(x) (native_write_cr8(x))
  255. #define load_gs_index native_load_gs_index
  256. #endif
  257. /* Clear the 'TS' bit */
  258. #define clts() (native_clts())
  259. #endif/* CONFIG_PARAVIRT */
  260. #define stts() write_cr0(read_cr0() | X86_CR0_TS)
  261. #endif /* __KERNEL__ */
  262. static inline void clflush(volatile void *__p)
  263. {
  264. asm volatile("clflush %0" : "+m" (*(volatile char __force *)__p));
  265. }
  266. #define nop() asm volatile ("nop")
  267. void disable_hlt(void);
  268. void enable_hlt(void);
  269. void cpu_idle_wait(void);
  270. extern unsigned long arch_align_stack(unsigned long sp);
  271. extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
  272. void default_idle(void);
  273. void stop_this_cpu(void *dummy);
  274. /*
  275. * Force strict CPU ordering.
  276. * And yes, this is required on UP too when we're talking
  277. * to devices.
  278. */
  279. #ifdef CONFIG_X86_32
  280. /*
  281. * Some non-Intel clones support out of order store. wmb() ceases to be a
  282. * nop for these.
  283. */
  284. #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
  285. #define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
  286. #define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
  287. #else
  288. #define mb() asm volatile("mfence":::"memory")
  289. #define rmb() asm volatile("lfence":::"memory")
  290. #define wmb() asm volatile("sfence" ::: "memory")
  291. #endif
  292. /**
  293. * read_barrier_depends - Flush all pending reads that subsequents reads
  294. * depend on.
  295. *
  296. * No data-dependent reads from memory-like regions are ever reordered
  297. * over this barrier. All reads preceding this primitive are guaranteed
  298. * to access memory (but not necessarily other CPUs' caches) before any
  299. * reads following this primitive that depend on the data return by
  300. * any of the preceding reads. This primitive is much lighter weight than
  301. * rmb() on most CPUs, and is never heavier weight than is
  302. * rmb().
  303. *
  304. * These ordering constraints are respected by both the local CPU
  305. * and the compiler.
  306. *
  307. * Ordering is not guaranteed by anything other than these primitives,
  308. * not even by data dependencies. See the documentation for
  309. * memory_barrier() for examples and URLs to more information.
  310. *
  311. * For example, the following code would force ordering (the initial
  312. * value of "a" is zero, "b" is one, and "p" is "&a"):
  313. *
  314. * <programlisting>
  315. * CPU 0 CPU 1
  316. *
  317. * b = 2;
  318. * memory_barrier();
  319. * p = &b; q = p;
  320. * read_barrier_depends();
  321. * d = *q;
  322. * </programlisting>
  323. *
  324. * because the read of "*q" depends on the read of "p" and these
  325. * two reads are separated by a read_barrier_depends(). However,
  326. * the following code, with the same initial values for "a" and "b":
  327. *
  328. * <programlisting>
  329. * CPU 0 CPU 1
  330. *
  331. * a = 2;
  332. * memory_barrier();
  333. * b = 3; y = b;
  334. * read_barrier_depends();
  335. * x = a;
  336. * </programlisting>
  337. *
  338. * does not enforce ordering, since there is no data dependency between
  339. * the read of "a" and the read of "b". Therefore, on some CPUs, such
  340. * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
  341. * in cases like this where there are no data dependencies.
  342. **/
  343. #define read_barrier_depends() do { } while (0)
  344. #ifdef CONFIG_SMP
  345. #define smp_mb() mb()
  346. #ifdef CONFIG_X86_PPRO_FENCE
  347. # define smp_rmb() rmb()
  348. #else
  349. # define smp_rmb() barrier()
  350. #endif
  351. #ifdef CONFIG_X86_OOSTORE
  352. # define smp_wmb() wmb()
  353. #else
  354. # define smp_wmb() barrier()
  355. #endif
  356. #define smp_read_barrier_depends() read_barrier_depends()
  357. #define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
  358. #else
  359. #define smp_mb() barrier()
  360. #define smp_rmb() barrier()
  361. #define smp_wmb() barrier()
  362. #define smp_read_barrier_depends() do { } while (0)
  363. #define set_mb(var, value) do { var = value; barrier(); } while (0)
  364. #endif
  365. /*
  366. * Stop RDTSC speculation. This is needed when you need to use RDTSC
  367. * (or get_cycles or vread that possibly accesses the TSC) in a defined
  368. * code region.
  369. *
  370. * (Could use an alternative three way for this if there was one.)
  371. */
  372. static inline void rdtsc_barrier(void)
  373. {
  374. alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC);
  375. alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC);
  376. }
  377. #endif /* _ASM_X86_SYSTEM_H */