processor.h 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977
  1. #ifndef _ASM_X86_PROCESSOR_H
  2. #define _ASM_X86_PROCESSOR_H
  3. #include <asm/processor-flags.h>
  4. /* Forward declaration, a strange C thing */
  5. struct task_struct;
  6. struct mm_struct;
  7. #include <asm/vm86.h>
  8. #include <asm/math_emu.h>
  9. #include <asm/segment.h>
  10. #include <asm/types.h>
  11. #include <asm/sigcontext.h>
  12. #include <asm/current.h>
  13. #include <asm/cpufeature.h>
  14. #include <asm/system.h>
  15. #include <asm/page.h>
  16. #include <asm/percpu.h>
  17. #include <asm/msr.h>
  18. #include <asm/desc_defs.h>
  19. #include <asm/nops.h>
  20. #include <asm/ds.h>
  21. #include <linux/personality.h>
  22. #include <linux/cpumask.h>
  23. #include <linux/cache.h>
  24. #include <linux/threads.h>
  25. #include <linux/init.h>
  26. /*
  27. * Default implementation of macro that returns current
  28. * instruction pointer ("program counter").
  29. */
  30. static inline void *current_text_addr(void)
  31. {
  32. void *pc;
  33. asm volatile("mov $1f, %0; 1:":"=r" (pc));
  34. return pc;
  35. }
  36. #ifdef CONFIG_X86_VSMP
  37. # define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT)
  38. # define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT)
  39. #else
  40. # define ARCH_MIN_TASKALIGN 16
  41. # define ARCH_MIN_MMSTRUCT_ALIGN 0
  42. #endif
  43. /*
  44. * CPU type and hardware bug flags. Kept separately for each CPU.
  45. * Members of this structure are referenced in head.S, so think twice
  46. * before touching them. [mj]
  47. */
  48. struct cpuinfo_x86 {
  49. __u8 x86; /* CPU family */
  50. __u8 x86_vendor; /* CPU vendor */
  51. __u8 x86_model;
  52. __u8 x86_mask;
  53. #ifdef CONFIG_X86_32
  54. char wp_works_ok; /* It doesn't on 386's */
  55. /* Problems on some 486Dx4's and old 386's: */
  56. char hlt_works_ok;
  57. char hard_math;
  58. char rfu;
  59. char fdiv_bug;
  60. char f00f_bug;
  61. char coma_bug;
  62. char pad0;
  63. #else
  64. /* Number of 4K pages in DTLB/ITLB combined(in pages): */
  65. int x86_tlbsize;
  66. __u8 x86_virt_bits;
  67. __u8 x86_phys_bits;
  68. #endif
  69. /* CPUID returned core id bits: */
  70. __u8 x86_coreid_bits;
  71. /* Max extended CPUID function supported: */
  72. __u32 extended_cpuid_level;
  73. /* Maximum supported CPUID level, -1=no CPUID: */
  74. int cpuid_level;
  75. __u32 x86_capability[NCAPINTS];
  76. char x86_vendor_id[16];
  77. char x86_model_id[64];
  78. /* in KB - valid for CPUS which support this call: */
  79. int x86_cache_size;
  80. int x86_cache_alignment; /* In bytes */
  81. int x86_power;
  82. unsigned long loops_per_jiffy;
  83. #ifdef CONFIG_SMP
  84. /* cpus sharing the last level cache: */
  85. cpumask_t llc_shared_map;
  86. #endif
  87. /* cpuid returned max cores value: */
  88. u16 x86_max_cores;
  89. u16 apicid;
  90. u16 initial_apicid;
  91. u16 x86_clflush_size;
  92. #ifdef CONFIG_SMP
  93. /* number of cores as seen by the OS: */
  94. u16 booted_cores;
  95. /* Physical processor id: */
  96. u16 phys_proc_id;
  97. /* Core id: */
  98. u16 cpu_core_id;
  99. /* Index into per_cpu list: */
  100. u16 cpu_index;
  101. #endif
  102. unsigned int x86_hyper_vendor;
  103. } __attribute__((__aligned__(SMP_CACHE_BYTES)));
  104. #define X86_VENDOR_INTEL 0
  105. #define X86_VENDOR_CYRIX 1
  106. #define X86_VENDOR_AMD 2
  107. #define X86_VENDOR_UMC 3
  108. #define X86_VENDOR_CENTAUR 5
  109. #define X86_VENDOR_TRANSMETA 7
  110. #define X86_VENDOR_NSC 8
  111. #define X86_VENDOR_NUM 9
  112. #define X86_VENDOR_UNKNOWN 0xff
  113. #define X86_HYPER_VENDOR_NONE 0
  114. #define X86_HYPER_VENDOR_VMWARE 1
  115. /*
  116. * capabilities of CPUs
  117. */
  118. extern struct cpuinfo_x86 boot_cpu_data;
  119. extern struct cpuinfo_x86 new_cpu_data;
  120. extern struct tss_struct doublefault_tss;
  121. extern __u32 cleared_cpu_caps[NCAPINTS];
  122. #ifdef CONFIG_SMP
  123. DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info);
  124. #define cpu_data(cpu) per_cpu(cpu_info, cpu)
  125. #define current_cpu_data __get_cpu_var(cpu_info)
  126. #else
  127. #define cpu_data(cpu) boot_cpu_data
  128. #define current_cpu_data boot_cpu_data
  129. #endif
  130. extern const struct seq_operations cpuinfo_op;
  131. static inline int hlt_works(int cpu)
  132. {
  133. #ifdef CONFIG_X86_32
  134. return cpu_data(cpu).hlt_works_ok;
  135. #else
  136. return 1;
  137. #endif
  138. }
  139. #define cache_line_size() (boot_cpu_data.x86_cache_alignment)
  140. extern void cpu_detect(struct cpuinfo_x86 *c);
  141. extern struct pt_regs *idle_regs(struct pt_regs *);
  142. extern void early_cpu_init(void);
  143. extern void identify_boot_cpu(void);
  144. extern void identify_secondary_cpu(struct cpuinfo_x86 *);
  145. extern void print_cpu_info(struct cpuinfo_x86 *);
  146. extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c);
  147. extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
  148. extern unsigned short num_cache_leaves;
  149. extern void detect_extended_topology(struct cpuinfo_x86 *c);
  150. extern void detect_ht(struct cpuinfo_x86 *c);
  151. static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
  152. unsigned int *ecx, unsigned int *edx)
  153. {
  154. /* ecx is often an input as well as an output. */
  155. asm("cpuid"
  156. : "=a" (*eax),
  157. "=b" (*ebx),
  158. "=c" (*ecx),
  159. "=d" (*edx)
  160. : "0" (*eax), "2" (*ecx));
  161. }
  162. static inline void load_cr3(pgd_t *pgdir)
  163. {
  164. write_cr3(__pa(pgdir));
  165. }
  166. #ifdef CONFIG_X86_32
  167. /* This is the TSS defined by the hardware. */
  168. struct x86_hw_tss {
  169. unsigned short back_link, __blh;
  170. unsigned long sp0;
  171. unsigned short ss0, __ss0h;
  172. unsigned long sp1;
  173. /* ss1 caches MSR_IA32_SYSENTER_CS: */
  174. unsigned short ss1, __ss1h;
  175. unsigned long sp2;
  176. unsigned short ss2, __ss2h;
  177. unsigned long __cr3;
  178. unsigned long ip;
  179. unsigned long flags;
  180. unsigned long ax;
  181. unsigned long cx;
  182. unsigned long dx;
  183. unsigned long bx;
  184. unsigned long sp;
  185. unsigned long bp;
  186. unsigned long si;
  187. unsigned long di;
  188. unsigned short es, __esh;
  189. unsigned short cs, __csh;
  190. unsigned short ss, __ssh;
  191. unsigned short ds, __dsh;
  192. unsigned short fs, __fsh;
  193. unsigned short gs, __gsh;
  194. unsigned short ldt, __ldth;
  195. unsigned short trace;
  196. unsigned short io_bitmap_base;
  197. } __attribute__((packed));
  198. #else
  199. struct x86_hw_tss {
  200. u32 reserved1;
  201. u64 sp0;
  202. u64 sp1;
  203. u64 sp2;
  204. u64 reserved2;
  205. u64 ist[7];
  206. u32 reserved3;
  207. u32 reserved4;
  208. u16 reserved5;
  209. u16 io_bitmap_base;
  210. } __attribute__((packed)) ____cacheline_aligned;
  211. #endif
  212. /*
  213. * IO-bitmap sizes:
  214. */
  215. #define IO_BITMAP_BITS 65536
  216. #define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
  217. #define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
  218. #define IO_BITMAP_OFFSET offsetof(struct tss_struct, io_bitmap)
  219. #define INVALID_IO_BITMAP_OFFSET 0x8000
  220. #define INVALID_IO_BITMAP_OFFSET_LAZY 0x9000
  221. struct tss_struct {
  222. /*
  223. * The hardware state:
  224. */
  225. struct x86_hw_tss x86_tss;
  226. /*
  227. * The extra 1 is there because the CPU will access an
  228. * additional byte beyond the end of the IO permission
  229. * bitmap. The extra byte must be all 1 bits, and must
  230. * be within the limit.
  231. */
  232. unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
  233. /*
  234. * Cache the current maximum and the last task that used the bitmap:
  235. */
  236. unsigned long io_bitmap_max;
  237. struct thread_struct *io_bitmap_owner;
  238. /*
  239. * .. and then another 0x100 bytes for the emergency kernel stack:
  240. */
  241. unsigned long stack[64];
  242. } ____cacheline_aligned;
  243. DECLARE_PER_CPU(struct tss_struct, init_tss);
  244. /*
  245. * Save the original ist values for checking stack pointers during debugging
  246. */
  247. struct orig_ist {
  248. unsigned long ist[7];
  249. };
  250. #define MXCSR_DEFAULT 0x1f80
  251. struct i387_fsave_struct {
  252. u32 cwd; /* FPU Control Word */
  253. u32 swd; /* FPU Status Word */
  254. u32 twd; /* FPU Tag Word */
  255. u32 fip; /* FPU IP Offset */
  256. u32 fcs; /* FPU IP Selector */
  257. u32 foo; /* FPU Operand Pointer Offset */
  258. u32 fos; /* FPU Operand Pointer Selector */
  259. /* 8*10 bytes for each FP-reg = 80 bytes: */
  260. u32 st_space[20];
  261. /* Software status information [not touched by FSAVE ]: */
  262. u32 status;
  263. };
  264. struct i387_fxsave_struct {
  265. u16 cwd; /* Control Word */
  266. u16 swd; /* Status Word */
  267. u16 twd; /* Tag Word */
  268. u16 fop; /* Last Instruction Opcode */
  269. union {
  270. struct {
  271. u64 rip; /* Instruction Pointer */
  272. u64 rdp; /* Data Pointer */
  273. };
  274. struct {
  275. u32 fip; /* FPU IP Offset */
  276. u32 fcs; /* FPU IP Selector */
  277. u32 foo; /* FPU Operand Offset */
  278. u32 fos; /* FPU Operand Selector */
  279. };
  280. };
  281. u32 mxcsr; /* MXCSR Register State */
  282. u32 mxcsr_mask; /* MXCSR Mask */
  283. /* 8*16 bytes for each FP-reg = 128 bytes: */
  284. u32 st_space[32];
  285. /* 16*16 bytes for each XMM-reg = 256 bytes: */
  286. u32 xmm_space[64];
  287. u32 padding[12];
  288. union {
  289. u32 padding1[12];
  290. u32 sw_reserved[12];
  291. };
  292. } __attribute__((aligned(16)));
  293. struct i387_soft_struct {
  294. u32 cwd;
  295. u32 swd;
  296. u32 twd;
  297. u32 fip;
  298. u32 fcs;
  299. u32 foo;
  300. u32 fos;
  301. /* 8*10 bytes for each FP-reg = 80 bytes: */
  302. u32 st_space[20];
  303. u8 ftop;
  304. u8 changed;
  305. u8 lookahead;
  306. u8 no_update;
  307. u8 rm;
  308. u8 alimit;
  309. struct math_emu_info *info;
  310. u32 entry_eip;
  311. };
  312. struct xsave_hdr_struct {
  313. u64 xstate_bv;
  314. u64 reserved1[2];
  315. u64 reserved2[5];
  316. } __attribute__((packed));
  317. struct xsave_struct {
  318. struct i387_fxsave_struct i387;
  319. struct xsave_hdr_struct xsave_hdr;
  320. /* new processor state extensions will go here */
  321. } __attribute__ ((packed, aligned (64)));
  322. union thread_xstate {
  323. struct i387_fsave_struct fsave;
  324. struct i387_fxsave_struct fxsave;
  325. struct i387_soft_struct soft;
  326. struct xsave_struct xsave;
  327. };
  328. #ifdef CONFIG_X86_64
  329. DECLARE_PER_CPU(struct orig_ist, orig_ist);
  330. union irq_stack_union {
  331. char irq_stack[IRQ_STACK_SIZE];
  332. /*
  333. * GCC hardcodes the stack canary as %gs:40. Since the
  334. * irq_stack is the object at %gs:0, we reserve the bottom
  335. * 48 bytes of the irq stack for the canary.
  336. */
  337. struct {
  338. char gs_base[40];
  339. unsigned long stack_canary;
  340. };
  341. };
  342. DECLARE_PER_CPU(union irq_stack_union, irq_stack_union);
  343. DECLARE_PER_CPU(char *, irq_stack_ptr);
  344. static inline void load_gs_base(int cpu)
  345. {
  346. /* Memory clobbers used to order pda/percpu accesses */
  347. mb();
  348. wrmsrl(MSR_GS_BASE, (unsigned long)per_cpu(irq_stack_union.gs_base, cpu));
  349. mb();
  350. }
  351. #endif
  352. extern void print_cpu_info(struct cpuinfo_x86 *);
  353. extern unsigned int xstate_size;
  354. extern void free_thread_xstate(struct task_struct *);
  355. extern struct kmem_cache *task_xstate_cachep;
  356. extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c);
  357. extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
  358. extern unsigned short num_cache_leaves;
  359. struct thread_struct {
  360. /* Cached TLS descriptors: */
  361. struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
  362. unsigned long sp0;
  363. unsigned long sp;
  364. #ifdef CONFIG_X86_32
  365. unsigned long sysenter_cs;
  366. #else
  367. unsigned long usersp; /* Copy from PDA */
  368. unsigned short es;
  369. unsigned short ds;
  370. unsigned short fsindex;
  371. unsigned short gsindex;
  372. #endif
  373. unsigned long ip;
  374. unsigned long fs;
  375. unsigned long gs;
  376. /* Hardware debugging registers: */
  377. unsigned long debugreg0;
  378. unsigned long debugreg1;
  379. unsigned long debugreg2;
  380. unsigned long debugreg3;
  381. unsigned long debugreg6;
  382. unsigned long debugreg7;
  383. /* Fault info: */
  384. unsigned long cr2;
  385. unsigned long trap_no;
  386. unsigned long error_code;
  387. /* floating point and extended processor state */
  388. union thread_xstate *xstate;
  389. #ifdef CONFIG_X86_32
  390. /* Virtual 86 mode info */
  391. struct vm86_struct __user *vm86_info;
  392. unsigned long screen_bitmap;
  393. unsigned long v86flags;
  394. unsigned long v86mask;
  395. unsigned long saved_sp0;
  396. unsigned int saved_fs;
  397. unsigned int saved_gs;
  398. #endif
  399. /* IO permissions: */
  400. unsigned long *io_bitmap_ptr;
  401. unsigned long iopl;
  402. /* Max allowed port in the bitmap, in bytes: */
  403. unsigned io_bitmap_max;
  404. /* MSR_IA32_DEBUGCTLMSR value to switch in if TIF_DEBUGCTLMSR is set. */
  405. unsigned long debugctlmsr;
  406. #ifdef CONFIG_X86_DS
  407. /* Debug Store context; see include/asm-x86/ds.h; goes into MSR_IA32_DS_AREA */
  408. struct ds_context *ds_ctx;
  409. #endif /* CONFIG_X86_DS */
  410. #ifdef CONFIG_X86_PTRACE_BTS
  411. /* the signal to send on a bts buffer overflow */
  412. unsigned int bts_ovfl_signal;
  413. #endif /* CONFIG_X86_PTRACE_BTS */
  414. };
  415. static inline unsigned long native_get_debugreg(int regno)
  416. {
  417. unsigned long val = 0; /* Damn you, gcc! */
  418. switch (regno) {
  419. case 0:
  420. asm("mov %%db0, %0" :"=r" (val));
  421. break;
  422. case 1:
  423. asm("mov %%db1, %0" :"=r" (val));
  424. break;
  425. case 2:
  426. asm("mov %%db2, %0" :"=r" (val));
  427. break;
  428. case 3:
  429. asm("mov %%db3, %0" :"=r" (val));
  430. break;
  431. case 6:
  432. asm("mov %%db6, %0" :"=r" (val));
  433. break;
  434. case 7:
  435. asm("mov %%db7, %0" :"=r" (val));
  436. break;
  437. default:
  438. BUG();
  439. }
  440. return val;
  441. }
  442. static inline void native_set_debugreg(int regno, unsigned long value)
  443. {
  444. switch (regno) {
  445. case 0:
  446. asm("mov %0, %%db0" ::"r" (value));
  447. break;
  448. case 1:
  449. asm("mov %0, %%db1" ::"r" (value));
  450. break;
  451. case 2:
  452. asm("mov %0, %%db2" ::"r" (value));
  453. break;
  454. case 3:
  455. asm("mov %0, %%db3" ::"r" (value));
  456. break;
  457. case 6:
  458. asm("mov %0, %%db6" ::"r" (value));
  459. break;
  460. case 7:
  461. asm("mov %0, %%db7" ::"r" (value));
  462. break;
  463. default:
  464. BUG();
  465. }
  466. }
  467. /*
  468. * Set IOPL bits in EFLAGS from given mask
  469. */
  470. static inline void native_set_iopl_mask(unsigned mask)
  471. {
  472. #ifdef CONFIG_X86_32
  473. unsigned int reg;
  474. asm volatile ("pushfl;"
  475. "popl %0;"
  476. "andl %1, %0;"
  477. "orl %2, %0;"
  478. "pushl %0;"
  479. "popfl"
  480. : "=&r" (reg)
  481. : "i" (~X86_EFLAGS_IOPL), "r" (mask));
  482. #endif
  483. }
  484. static inline void
  485. native_load_sp0(struct tss_struct *tss, struct thread_struct *thread)
  486. {
  487. tss->x86_tss.sp0 = thread->sp0;
  488. #ifdef CONFIG_X86_32
  489. /* Only happens when SEP is enabled, no need to test "SEP"arately: */
  490. if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) {
  491. tss->x86_tss.ss1 = thread->sysenter_cs;
  492. wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
  493. }
  494. #endif
  495. }
  496. static inline void native_swapgs(void)
  497. {
  498. #ifdef CONFIG_X86_64
  499. asm volatile("swapgs" ::: "memory");
  500. #endif
  501. }
  502. #ifdef CONFIG_PARAVIRT
  503. #include <asm/paravirt.h>
  504. #else
  505. #define __cpuid native_cpuid
  506. #define paravirt_enabled() 0
  507. /*
  508. * These special macros can be used to get or set a debugging register
  509. */
  510. #define get_debugreg(var, register) \
  511. (var) = native_get_debugreg(register)
  512. #define set_debugreg(value, register) \
  513. native_set_debugreg(register, value)
  514. static inline void load_sp0(struct tss_struct *tss,
  515. struct thread_struct *thread)
  516. {
  517. native_load_sp0(tss, thread);
  518. }
  519. #define set_iopl_mask native_set_iopl_mask
  520. #endif /* CONFIG_PARAVIRT */
  521. /*
  522. * Save the cr4 feature set we're using (ie
  523. * Pentium 4MB enable and PPro Global page
  524. * enable), so that any CPU's that boot up
  525. * after us can get the correct flags.
  526. */
  527. extern unsigned long mmu_cr4_features;
  528. static inline void set_in_cr4(unsigned long mask)
  529. {
  530. unsigned cr4;
  531. mmu_cr4_features |= mask;
  532. cr4 = read_cr4();
  533. cr4 |= mask;
  534. write_cr4(cr4);
  535. }
  536. static inline void clear_in_cr4(unsigned long mask)
  537. {
  538. unsigned cr4;
  539. mmu_cr4_features &= ~mask;
  540. cr4 = read_cr4();
  541. cr4 &= ~mask;
  542. write_cr4(cr4);
  543. }
  544. typedef struct {
  545. unsigned long seg;
  546. } mm_segment_t;
  547. /*
  548. * create a kernel thread without removing it from tasklists
  549. */
  550. extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
  551. /* Free all resources held by a thread. */
  552. extern void release_thread(struct task_struct *);
  553. /* Prepare to copy thread state - unlazy all lazy state */
  554. extern void prepare_to_copy(struct task_struct *tsk);
  555. unsigned long get_wchan(struct task_struct *p);
  556. /*
  557. * Generic CPUID function
  558. * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
  559. * resulting in stale register contents being returned.
  560. */
  561. static inline void cpuid(unsigned int op,
  562. unsigned int *eax, unsigned int *ebx,
  563. unsigned int *ecx, unsigned int *edx)
  564. {
  565. *eax = op;
  566. *ecx = 0;
  567. __cpuid(eax, ebx, ecx, edx);
  568. }
  569. /* Some CPUID calls want 'count' to be placed in ecx */
  570. static inline void cpuid_count(unsigned int op, int count,
  571. unsigned int *eax, unsigned int *ebx,
  572. unsigned int *ecx, unsigned int *edx)
  573. {
  574. *eax = op;
  575. *ecx = count;
  576. __cpuid(eax, ebx, ecx, edx);
  577. }
  578. /*
  579. * CPUID functions returning a single datum
  580. */
  581. static inline unsigned int cpuid_eax(unsigned int op)
  582. {
  583. unsigned int eax, ebx, ecx, edx;
  584. cpuid(op, &eax, &ebx, &ecx, &edx);
  585. return eax;
  586. }
  587. static inline unsigned int cpuid_ebx(unsigned int op)
  588. {
  589. unsigned int eax, ebx, ecx, edx;
  590. cpuid(op, &eax, &ebx, &ecx, &edx);
  591. return ebx;
  592. }
  593. static inline unsigned int cpuid_ecx(unsigned int op)
  594. {
  595. unsigned int eax, ebx, ecx, edx;
  596. cpuid(op, &eax, &ebx, &ecx, &edx);
  597. return ecx;
  598. }
  599. static inline unsigned int cpuid_edx(unsigned int op)
  600. {
  601. unsigned int eax, ebx, ecx, edx;
  602. cpuid(op, &eax, &ebx, &ecx, &edx);
  603. return edx;
  604. }
  605. /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
  606. static inline void rep_nop(void)
  607. {
  608. asm volatile("rep; nop" ::: "memory");
  609. }
  610. static inline void cpu_relax(void)
  611. {
  612. rep_nop();
  613. }
  614. /* Stop speculative execution: */
  615. static inline void sync_core(void)
  616. {
  617. int tmp;
  618. asm volatile("cpuid" : "=a" (tmp) : "0" (1)
  619. : "ebx", "ecx", "edx", "memory");
  620. }
  621. static inline void __monitor(const void *eax, unsigned long ecx,
  622. unsigned long edx)
  623. {
  624. /* "monitor %eax, %ecx, %edx;" */
  625. asm volatile(".byte 0x0f, 0x01, 0xc8;"
  626. :: "a" (eax), "c" (ecx), "d"(edx));
  627. }
  628. static inline void __mwait(unsigned long eax, unsigned long ecx)
  629. {
  630. /* "mwait %eax, %ecx;" */
  631. asm volatile(".byte 0x0f, 0x01, 0xc9;"
  632. :: "a" (eax), "c" (ecx));
  633. }
  634. static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
  635. {
  636. trace_hardirqs_on();
  637. /* "mwait %eax, %ecx;" */
  638. asm volatile("sti; .byte 0x0f, 0x01, 0xc9;"
  639. :: "a" (eax), "c" (ecx));
  640. }
  641. extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx);
  642. extern void select_idle_routine(const struct cpuinfo_x86 *c);
  643. extern unsigned long boot_option_idle_override;
  644. extern unsigned long idle_halt;
  645. extern unsigned long idle_nomwait;
  646. /*
  647. * on systems with caches, caches must be flashed as the absolute
  648. * last instruction before going into a suspended halt. Otherwise,
  649. * dirty data can linger in the cache and become stale on resume,
  650. * leading to strange errors.
  651. *
  652. * perform a variety of operations to guarantee that the compiler
  653. * will not reorder instructions. wbinvd itself is serializing
  654. * so the processor will not reorder.
  655. *
  656. * Systems without cache can just go into halt.
  657. */
  658. static inline void wbinvd_halt(void)
  659. {
  660. mb();
  661. /* check for clflush to determine if wbinvd is legal */
  662. if (cpu_has_clflush)
  663. asm volatile("cli; wbinvd; 1: hlt; jmp 1b" : : : "memory");
  664. else
  665. while (1)
  666. halt();
  667. }
  668. extern void enable_sep_cpu(void);
  669. extern int sysenter_setup(void);
  670. /* Defined in head.S */
  671. extern struct desc_ptr early_gdt_descr;
  672. extern void cpu_set_gdt(int);
  673. extern void switch_to_new_gdt(void);
  674. extern void cpu_init(void);
  675. extern void init_gdt(int cpu);
  676. static inline unsigned long get_debugctlmsr(void)
  677. {
  678. unsigned long debugctlmsr = 0;
  679. #ifndef CONFIG_X86_DEBUGCTLMSR
  680. if (boot_cpu_data.x86 < 6)
  681. return 0;
  682. #endif
  683. rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
  684. return debugctlmsr;
  685. }
  686. static inline void update_debugctlmsr(unsigned long debugctlmsr)
  687. {
  688. #ifndef CONFIG_X86_DEBUGCTLMSR
  689. if (boot_cpu_data.x86 < 6)
  690. return;
  691. #endif
  692. wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
  693. }
  694. /*
  695. * from system description table in BIOS. Mostly for MCA use, but
  696. * others may find it useful:
  697. */
  698. extern unsigned int machine_id;
  699. extern unsigned int machine_submodel_id;
  700. extern unsigned int BIOS_revision;
  701. /* Boot loader type from the setup header: */
  702. extern int bootloader_type;
  703. extern char ignore_fpu_irq;
  704. #define HAVE_ARCH_PICK_MMAP_LAYOUT 1
  705. #define ARCH_HAS_PREFETCHW
  706. #define ARCH_HAS_SPINLOCK_PREFETCH
  707. #ifdef CONFIG_X86_32
  708. # define BASE_PREFETCH ASM_NOP4
  709. # define ARCH_HAS_PREFETCH
  710. #else
  711. # define BASE_PREFETCH "prefetcht0 (%1)"
  712. #endif
  713. /*
  714. * Prefetch instructions for Pentium III (+) and AMD Athlon (+)
  715. *
  716. * It's not worth to care about 3dnow prefetches for the K6
  717. * because they are microcoded there and very slow.
  718. */
  719. static inline void prefetch(const void *x)
  720. {
  721. alternative_input(BASE_PREFETCH,
  722. "prefetchnta (%1)",
  723. X86_FEATURE_XMM,
  724. "r" (x));
  725. }
  726. /*
  727. * 3dnow prefetch to get an exclusive cache line.
  728. * Useful for spinlocks to avoid one state transition in the
  729. * cache coherency protocol:
  730. */
  731. static inline void prefetchw(const void *x)
  732. {
  733. alternative_input(BASE_PREFETCH,
  734. "prefetchw (%1)",
  735. X86_FEATURE_3DNOW,
  736. "r" (x));
  737. }
  738. static inline void spin_lock_prefetch(const void *x)
  739. {
  740. prefetchw(x);
  741. }
  742. #ifdef CONFIG_X86_32
  743. /*
  744. * User space process size: 3GB (default).
  745. */
  746. #define TASK_SIZE PAGE_OFFSET
  747. #define STACK_TOP TASK_SIZE
  748. #define STACK_TOP_MAX STACK_TOP
  749. #define INIT_THREAD { \
  750. .sp0 = sizeof(init_stack) + (long)&init_stack, \
  751. .vm86_info = NULL, \
  752. .sysenter_cs = __KERNEL_CS, \
  753. .io_bitmap_ptr = NULL, \
  754. .fs = __KERNEL_PERCPU, \
  755. }
  756. /*
  757. * Note that the .io_bitmap member must be extra-big. This is because
  758. * the CPU will access an additional byte beyond the end of the IO
  759. * permission bitmap. The extra byte must be all 1 bits, and must
  760. * be within the limit.
  761. */
  762. #define INIT_TSS { \
  763. .x86_tss = { \
  764. .sp0 = sizeof(init_stack) + (long)&init_stack, \
  765. .ss0 = __KERNEL_DS, \
  766. .ss1 = __KERNEL_CS, \
  767. .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
  768. }, \
  769. .io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 }, \
  770. }
  771. extern unsigned long thread_saved_pc(struct task_struct *tsk);
  772. #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
  773. #define KSTK_TOP(info) \
  774. ({ \
  775. unsigned long *__ptr = (unsigned long *)(info); \
  776. (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
  777. })
  778. /*
  779. * The below -8 is to reserve 8 bytes on top of the ring0 stack.
  780. * This is necessary to guarantee that the entire "struct pt_regs"
  781. * is accessable even if the CPU haven't stored the SS/ESP registers
  782. * on the stack (interrupt gate does not save these registers
  783. * when switching to the same priv ring).
  784. * Therefore beware: accessing the ss/esp fields of the
  785. * "struct pt_regs" is possible, but they may contain the
  786. * completely wrong values.
  787. */
  788. #define task_pt_regs(task) \
  789. ({ \
  790. struct pt_regs *__regs__; \
  791. __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
  792. __regs__ - 1; \
  793. })
  794. #define KSTK_ESP(task) (task_pt_regs(task)->sp)
  795. #else
  796. /*
  797. * User space process size. 47bits minus one guard page.
  798. */
  799. #define TASK_SIZE64 ((1UL << 47) - PAGE_SIZE)
  800. /* This decides where the kernel will search for a free chunk of vm
  801. * space during mmap's.
  802. */
  803. #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
  804. 0xc0000000 : 0xFFFFe000)
  805. #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
  806. IA32_PAGE_OFFSET : TASK_SIZE64)
  807. #define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_IA32)) ? \
  808. IA32_PAGE_OFFSET : TASK_SIZE64)
  809. #define STACK_TOP TASK_SIZE
  810. #define STACK_TOP_MAX TASK_SIZE64
  811. #define INIT_THREAD { \
  812. .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
  813. }
  814. #define INIT_TSS { \
  815. .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
  816. }
  817. /*
  818. * Return saved PC of a blocked thread.
  819. * What is this good for? it will be always the scheduler or ret_from_fork.
  820. */
  821. #define thread_saved_pc(t) (*(unsigned long *)((t)->thread.sp - 8))
  822. #define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1)
  823. #define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */
  824. #endif /* CONFIG_X86_64 */
  825. extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
  826. unsigned long new_sp);
  827. /*
  828. * This decides where the kernel will search for a free chunk of vm
  829. * space during mmap's.
  830. */
  831. #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
  832. #define KSTK_EIP(task) (task_pt_regs(task)->ip)
  833. /* Get/set a process' ability to use the timestamp counter instruction */
  834. #define GET_TSC_CTL(adr) get_tsc_mode((adr))
  835. #define SET_TSC_CTL(val) set_tsc_mode((val))
  836. extern int get_tsc_mode(unsigned long adr);
  837. extern int set_tsc_mode(unsigned int val);
  838. #endif /* _ASM_X86_PROCESSOR_H */