processor.h 9.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407
  1. #ifndef __ASM_X86_PROCESSOR_H
  2. #define __ASM_X86_PROCESSOR_H
  3. #include <asm/processor-flags.h>
  4. /* Forward declaration, a strange C thing */
  5. struct task_struct;
  6. struct mm_struct;
  7. #include <asm/page.h>
  8. #include <asm/percpu.h>
  9. #include <asm/system.h>
  10. /*
  11. * Default implementation of macro that returns current
  12. * instruction pointer ("program counter").
  13. */
  14. static inline void *current_text_addr(void)
  15. {
  16. void *pc;
  17. asm volatile("mov $1f,%0\n1:":"=r" (pc));
  18. return pc;
  19. }
  20. static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
  21. unsigned int *ecx, unsigned int *edx)
  22. {
  23. /* ecx is often an input as well as an output. */
  24. __asm__("cpuid"
  25. : "=a" (*eax),
  26. "=b" (*ebx),
  27. "=c" (*ecx),
  28. "=d" (*edx)
  29. : "0" (*eax), "2" (*ecx));
  30. }
  31. static inline void load_cr3(pgd_t *pgdir)
  32. {
  33. write_cr3(__pa(pgdir));
  34. }
  35. #ifdef CONFIG_X86_32
  36. /* This is the TSS defined by the hardware. */
  37. struct x86_hw_tss {
  38. unsigned short back_link, __blh;
  39. unsigned long sp0;
  40. unsigned short ss0, __ss0h;
  41. unsigned long sp1;
  42. unsigned short ss1, __ss1h; /* ss1 caches MSR_IA32_SYSENTER_CS */
  43. unsigned long sp2;
  44. unsigned short ss2, __ss2h;
  45. unsigned long __cr3;
  46. unsigned long ip;
  47. unsigned long flags;
  48. unsigned long ax, cx, dx, bx;
  49. unsigned long sp, bp, si, di;
  50. unsigned short es, __esh;
  51. unsigned short cs, __csh;
  52. unsigned short ss, __ssh;
  53. unsigned short ds, __dsh;
  54. unsigned short fs, __fsh;
  55. unsigned short gs, __gsh;
  56. unsigned short ldt, __ldth;
  57. unsigned short trace, io_bitmap_base;
  58. } __attribute__((packed));
  59. #else
  60. struct x86_hw_tss {
  61. u32 reserved1;
  62. u64 sp0;
  63. u64 sp1;
  64. u64 sp2;
  65. u64 reserved2;
  66. u64 ist[7];
  67. u32 reserved3;
  68. u32 reserved4;
  69. u16 reserved5;
  70. u16 io_bitmap_base;
  71. } __attribute__((packed)) ____cacheline_aligned;
  72. #endif
  73. /*
  74. * Size of io_bitmap.
  75. */
  76. #define IO_BITMAP_BITS 65536
  77. #define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
  78. #define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
  79. #define IO_BITMAP_OFFSET offsetof(struct tss_struct, io_bitmap)
  80. #define INVALID_IO_BITMAP_OFFSET 0x8000
  81. #define INVALID_IO_BITMAP_OFFSET_LAZY 0x9000
  82. struct tss_struct {
  83. struct x86_hw_tss x86_tss;
  84. /*
  85. * The extra 1 is there because the CPU will access an
  86. * additional byte beyond the end of the IO permission
  87. * bitmap. The extra byte must be all 1 bits, and must
  88. * be within the limit.
  89. */
  90. unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
  91. /*
  92. * Cache the current maximum and the last task that used the bitmap:
  93. */
  94. unsigned long io_bitmap_max;
  95. struct thread_struct *io_bitmap_owner;
  96. /*
  97. * pads the TSS to be cacheline-aligned (size is 0x100)
  98. */
  99. unsigned long __cacheline_filler[35];
  100. /*
  101. * .. and then another 0x100 bytes for emergency kernel stack
  102. */
  103. unsigned long stack[64];
  104. } __attribute__((packed));
  105. DECLARE_PER_CPU(struct tss_struct, init_tss);
  106. #ifdef CONFIG_X86_32
  107. # include "processor_32.h"
  108. #else
  109. # include "processor_64.h"
  110. #endif
  111. extern void print_cpu_info(struct cpuinfo_x86 *);
  112. extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c);
  113. extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
  114. extern unsigned short num_cache_leaves;
  115. static inline unsigned long native_get_debugreg(int regno)
  116. {
  117. unsigned long val = 0; /* Damn you, gcc! */
  118. switch (regno) {
  119. case 0:
  120. asm("mov %%db0, %0" :"=r" (val)); break;
  121. case 1:
  122. asm("mov %%db1, %0" :"=r" (val)); break;
  123. case 2:
  124. asm("mov %%db2, %0" :"=r" (val)); break;
  125. case 3:
  126. asm("mov %%db3, %0" :"=r" (val)); break;
  127. case 6:
  128. asm("mov %%db6, %0" :"=r" (val)); break;
  129. case 7:
  130. asm("mov %%db7, %0" :"=r" (val)); break;
  131. default:
  132. BUG();
  133. }
  134. return val;
  135. }
  136. static inline void native_set_debugreg(int regno, unsigned long value)
  137. {
  138. switch (regno) {
  139. case 0:
  140. asm("mov %0,%%db0" : /* no output */ :"r" (value));
  141. break;
  142. case 1:
  143. asm("mov %0,%%db1" : /* no output */ :"r" (value));
  144. break;
  145. case 2:
  146. asm("mov %0,%%db2" : /* no output */ :"r" (value));
  147. break;
  148. case 3:
  149. asm("mov %0,%%db3" : /* no output */ :"r" (value));
  150. break;
  151. case 6:
  152. asm("mov %0,%%db6" : /* no output */ :"r" (value));
  153. break;
  154. case 7:
  155. asm("mov %0,%%db7" : /* no output */ :"r" (value));
  156. break;
  157. default:
  158. BUG();
  159. }
  160. }
  161. /*
  162. * Set IOPL bits in EFLAGS from given mask
  163. */
  164. static inline void native_set_iopl_mask(unsigned mask)
  165. {
  166. #ifdef CONFIG_X86_32
  167. unsigned int reg;
  168. __asm__ __volatile__ ("pushfl;"
  169. "popl %0;"
  170. "andl %1, %0;"
  171. "orl %2, %0;"
  172. "pushl %0;"
  173. "popfl"
  174. : "=&r" (reg)
  175. : "i" (~X86_EFLAGS_IOPL), "r" (mask));
  176. #endif
  177. }
  178. #ifndef CONFIG_PARAVIRT
  179. #define __cpuid native_cpuid
  180. #define paravirt_enabled() 0
  181. /*
  182. * These special macros can be used to get or set a debugging register
  183. */
  184. #define get_debugreg(var, register) \
  185. (var) = native_get_debugreg(register)
  186. #define set_debugreg(value, register) \
  187. native_set_debugreg(register, value)
  188. #define set_iopl_mask native_set_iopl_mask
  189. #endif /* CONFIG_PARAVIRT */
  190. /*
  191. * Save the cr4 feature set we're using (ie
  192. * Pentium 4MB enable and PPro Global page
  193. * enable), so that any CPU's that boot up
  194. * after us can get the correct flags.
  195. */
  196. extern unsigned long mmu_cr4_features;
  197. static inline void set_in_cr4(unsigned long mask)
  198. {
  199. unsigned cr4;
  200. mmu_cr4_features |= mask;
  201. cr4 = read_cr4();
  202. cr4 |= mask;
  203. write_cr4(cr4);
  204. }
  205. static inline void clear_in_cr4(unsigned long mask)
  206. {
  207. unsigned cr4;
  208. mmu_cr4_features &= ~mask;
  209. cr4 = read_cr4();
  210. cr4 &= ~mask;
  211. write_cr4(cr4);
  212. }
  213. struct microcode_header {
  214. unsigned int hdrver;
  215. unsigned int rev;
  216. unsigned int date;
  217. unsigned int sig;
  218. unsigned int cksum;
  219. unsigned int ldrver;
  220. unsigned int pf;
  221. unsigned int datasize;
  222. unsigned int totalsize;
  223. unsigned int reserved[3];
  224. };
  225. struct microcode {
  226. struct microcode_header hdr;
  227. unsigned int bits[0];
  228. };
  229. typedef struct microcode microcode_t;
  230. typedef struct microcode_header microcode_header_t;
  231. /* microcode format is extended from prescott processors */
  232. struct extended_signature {
  233. unsigned int sig;
  234. unsigned int pf;
  235. unsigned int cksum;
  236. };
  237. struct extended_sigtable {
  238. unsigned int count;
  239. unsigned int cksum;
  240. unsigned int reserved[3];
  241. struct extended_signature sigs[0];
  242. };
  243. /*
  244. * create a kernel thread without removing it from tasklists
  245. */
  246. extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
  247. /* Free all resources held by a thread. */
  248. extern void release_thread(struct task_struct *);
  249. /* Prepare to copy thread state - unlazy all lazy status */
  250. extern void prepare_to_copy(struct task_struct *tsk);
  251. unsigned long get_wchan(struct task_struct *p);
  252. /*
  253. * Generic CPUID function
  254. * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
  255. * resulting in stale register contents being returned.
  256. */
  257. static inline void cpuid(unsigned int op,
  258. unsigned int *eax, unsigned int *ebx,
  259. unsigned int *ecx, unsigned int *edx)
  260. {
  261. *eax = op;
  262. *ecx = 0;
  263. __cpuid(eax, ebx, ecx, edx);
  264. }
  265. /* Some CPUID calls want 'count' to be placed in ecx */
  266. static inline void cpuid_count(unsigned int op, int count,
  267. unsigned int *eax, unsigned int *ebx,
  268. unsigned int *ecx, unsigned int *edx)
  269. {
  270. *eax = op;
  271. *ecx = count;
  272. __cpuid(eax, ebx, ecx, edx);
  273. }
  274. /*
  275. * CPUID functions returning a single datum
  276. */
  277. static inline unsigned int cpuid_eax(unsigned int op)
  278. {
  279. unsigned int eax, ebx, ecx, edx;
  280. cpuid(op, &eax, &ebx, &ecx, &edx);
  281. return eax;
  282. }
  283. static inline unsigned int cpuid_ebx(unsigned int op)
  284. {
  285. unsigned int eax, ebx, ecx, edx;
  286. cpuid(op, &eax, &ebx, &ecx, &edx);
  287. return ebx;
  288. }
  289. static inline unsigned int cpuid_ecx(unsigned int op)
  290. {
  291. unsigned int eax, ebx, ecx, edx;
  292. cpuid(op, &eax, &ebx, &ecx, &edx);
  293. return ecx;
  294. }
  295. static inline unsigned int cpuid_edx(unsigned int op)
  296. {
  297. unsigned int eax, ebx, ecx, edx;
  298. cpuid(op, &eax, &ebx, &ecx, &edx);
  299. return edx;
  300. }
  301. /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
  302. static inline void rep_nop(void)
  303. {
  304. __asm__ __volatile__("rep;nop": : :"memory");
  305. }
  306. /* Stop speculative execution */
  307. static inline void sync_core(void)
  308. {
  309. int tmp;
  310. asm volatile("cpuid" : "=a" (tmp) : "0" (1)
  311. : "ebx", "ecx", "edx", "memory");
  312. }
  313. #define cpu_relax() rep_nop()
  314. static inline void __monitor(const void *eax, unsigned long ecx,
  315. unsigned long edx)
  316. {
  317. /* "monitor %eax,%ecx,%edx;" */
  318. asm volatile(
  319. ".byte 0x0f,0x01,0xc8;"
  320. : :"a" (eax), "c" (ecx), "d"(edx));
  321. }
  322. static inline void __mwait(unsigned long eax, unsigned long ecx)
  323. {
  324. /* "mwait %eax,%ecx;" */
  325. asm volatile(
  326. ".byte 0x0f,0x01,0xc9;"
  327. : :"a" (eax), "c" (ecx));
  328. }
  329. static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
  330. {
  331. /* "mwait %eax,%ecx;" */
  332. asm volatile(
  333. "sti; .byte 0x0f,0x01,0xc9;"
  334. : :"a" (eax), "c" (ecx));
  335. }
  336. extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx);
  337. extern int force_mwait;
  338. extern void select_idle_routine(const struct cpuinfo_x86 *c);
  339. extern unsigned long boot_option_idle_override;
  340. /* Boot loader type from the setup header */
  341. extern int bootloader_type;
  342. #define cache_line_size() (boot_cpu_data.x86_cache_alignment)
  343. #define HAVE_ARCH_PICK_MMAP_LAYOUT 1
  344. #define ARCH_HAS_PREFETCHW
  345. #define ARCH_HAS_SPINLOCK_PREFETCH
  346. #define spin_lock_prefetch(x) prefetchw(x)
  347. /* This decides where the kernel will search for a free chunk of vm
  348. * space during mmap's.
  349. */
  350. #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
  351. #define KSTK_EIP(task) (task_pt_regs(task)->ip)
  352. #endif