system.h 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205
  1. #ifndef _ASM_X86_SYSTEM_H_
  2. #define _ASM_X86_SYSTEM_H_
  3. #include <asm/asm.h>
  4. #include <linux/kernel.h>
  5. #ifdef CONFIG_X86_32
  6. # include "system_32.h"
  7. #else
  8. # include "system_64.h"
  9. #endif
  10. #ifdef __KERNEL__
  11. #define _set_base(addr, base) do { unsigned long __pr; \
  12. __asm__ __volatile__ ("movw %%dx,%1\n\t" \
  13. "rorl $16,%%edx\n\t" \
  14. "movb %%dl,%2\n\t" \
  15. "movb %%dh,%3" \
  16. :"=&d" (__pr) \
  17. :"m" (*((addr)+2)), \
  18. "m" (*((addr)+4)), \
  19. "m" (*((addr)+7)), \
  20. "0" (base) \
  21. ); } while (0)
  22. #define _set_limit(addr, limit) do { unsigned long __lr; \
  23. __asm__ __volatile__ ("movw %%dx,%1\n\t" \
  24. "rorl $16,%%edx\n\t" \
  25. "movb %2,%%dh\n\t" \
  26. "andb $0xf0,%%dh\n\t" \
  27. "orb %%dh,%%dl\n\t" \
  28. "movb %%dl,%2" \
  29. :"=&d" (__lr) \
  30. :"m" (*(addr)), \
  31. "m" (*((addr)+6)), \
  32. "0" (limit) \
  33. ); } while (0)
  34. #define set_base(ldt, base) _set_base(((char *)&(ldt)) , (base))
  35. #define set_limit(ldt, limit) _set_limit(((char *)&(ldt)) , ((limit)-1))
  36. extern void load_gs_index(unsigned);
  37. /*
  38. * Load a segment. Fall back on loading the zero
  39. * segment if something goes wrong..
  40. */
  41. #define loadsegment(seg, value) \
  42. asm volatile("\n" \
  43. "1:\t" \
  44. "movl %k0,%%" #seg "\n" \
  45. "2:\n" \
  46. ".section .fixup,\"ax\"\n" \
  47. "3:\t" \
  48. "movl %k1, %%" #seg "\n\t" \
  49. "jmp 2b\n" \
  50. ".previous\n" \
  51. ".section __ex_table,\"a\"\n\t" \
  52. _ASM_ALIGN "\n\t" \
  53. _ASM_PTR " 1b,3b\n" \
  54. ".previous" \
  55. : :"r" (value), "r" (0))
  56. /*
  57. * Save a segment register away
  58. */
  59. #define savesegment(seg, value) \
  60. asm volatile("mov %%" #seg ",%0":"=rm" (value))
  61. static inline unsigned long get_limit(unsigned long segment)
  62. {
  63. unsigned long __limit;
  64. __asm__("lsll %1,%0"
  65. :"=r" (__limit):"r" (segment));
  66. return __limit+1;
  67. }
  68. static inline void native_clts(void)
  69. {
  70. asm volatile ("clts");
  71. }
  72. /*
  73. * Volatile isn't enough to prevent the compiler from reordering the
  74. * read/write functions for the control registers and messing everything up.
  75. * A memory clobber would solve the problem, but would prevent reordering of
  76. * all loads stores around it, which can hurt performance. Solution is to
  77. * use a variable and mimic reads and writes to it to enforce serialization
  78. */
  79. static unsigned long __force_order;
  80. static inline unsigned long native_read_cr0(void)
  81. {
  82. unsigned long val;
  83. asm volatile("mov %%cr0,%0\n\t" :"=r" (val), "=m" (__force_order));
  84. return val;
  85. }
  86. static inline void native_write_cr0(unsigned long val)
  87. {
  88. asm volatile("mov %0,%%cr0": :"r" (val), "m" (__force_order));
  89. }
  90. static inline unsigned long native_read_cr2(void)
  91. {
  92. unsigned long val;
  93. asm volatile("mov %%cr2,%0\n\t" :"=r" (val), "=m" (__force_order));
  94. return val;
  95. }
  96. static inline void native_write_cr2(unsigned long val)
  97. {
  98. asm volatile("mov %0,%%cr2": :"r" (val), "m" (__force_order));
  99. }
  100. static inline unsigned long native_read_cr3(void)
  101. {
  102. unsigned long val;
  103. asm volatile("mov %%cr3,%0\n\t" :"=r" (val), "=m" (__force_order));
  104. return val;
  105. }
  106. static inline void native_write_cr3(unsigned long val)
  107. {
  108. asm volatile("mov %0,%%cr3": :"r" (val), "m" (__force_order));
  109. }
  110. static inline unsigned long native_read_cr4(void)
  111. {
  112. unsigned long val;
  113. asm volatile("mov %%cr4,%0\n\t" :"=r" (val), "=m" (__force_order));
  114. return val;
  115. }
  116. static inline unsigned long native_read_cr4_safe(void)
  117. {
  118. unsigned long val;
  119. /* This could fault if %cr4 does not exist. In x86_64, a cr4 always
  120. * exists, so it will never fail. */
  121. #ifdef CONFIG_X86_32
  122. asm volatile("1: mov %%cr4, %0 \n"
  123. "2: \n"
  124. ".section __ex_table,\"a\" \n"
  125. ".long 1b,2b \n"
  126. ".previous \n"
  127. : "=r" (val), "=m" (__force_order) : "0" (0));
  128. #else
  129. val = native_read_cr4();
  130. #endif
  131. return val;
  132. }
  133. static inline void native_write_cr4(unsigned long val)
  134. {
  135. asm volatile("mov %0,%%cr4": :"r" (val), "m" (__force_order));
  136. }
  137. static inline void native_wbinvd(void)
  138. {
  139. asm volatile("wbinvd": : :"memory");
  140. }
  141. #ifdef CONFIG_PARAVIRT
  142. #include <asm/paravirt.h>
  143. #else
  144. #define read_cr0() (native_read_cr0())
  145. #define write_cr0(x) (native_write_cr0(x))
  146. #define read_cr2() (native_read_cr2())
  147. #define write_cr2(x) (native_write_cr2(x))
  148. #define read_cr3() (native_read_cr3())
  149. #define write_cr3(x) (native_write_cr3(x))
  150. #define read_cr4() (native_read_cr4())
  151. #define read_cr4_safe() (native_read_cr4_safe())
  152. #define write_cr4(x) (native_write_cr4(x))
  153. #define wbinvd() (native_wbinvd())
  154. /* Clear the 'TS' bit */
  155. #define clts() (native_clts())
  156. #endif/* CONFIG_PARAVIRT */
  157. #define stts() write_cr0(8 | read_cr0())
  158. #endif /* __KERNEL__ */
  159. static inline void clflush(void *__p)
  160. {
  161. asm volatile("clflush %0" : "+m" (*(char __force *)__p));
  162. }
  163. #define nop() __asm__ __volatile__ ("nop")
  164. void disable_hlt(void);
  165. void enable_hlt(void);
  166. extern int es7000_plat;
  167. void cpu_idle_wait(void);
  168. extern unsigned long arch_align_stack(unsigned long sp);
  169. extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
  170. void default_idle(void);
  171. #endif