processor.h 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931
  1. #ifndef __ASM_X86_PROCESSOR_H
  2. #define __ASM_X86_PROCESSOR_H
  3. #include <asm/processor-flags.h>
  4. /* migration helper, for KVM - will be removed in 2.6.25: */
  5. #define Xgt_desc_struct desc_ptr
  6. /* Forward declaration, a strange C thing */
  7. struct task_struct;
  8. struct mm_struct;
  9. #include <asm/vm86.h>
  10. #include <asm/math_emu.h>
  11. #include <asm/segment.h>
  12. #include <asm/types.h>
  13. #include <asm/sigcontext.h>
  14. #include <asm/current.h>
  15. #include <asm/cpufeature.h>
  16. #include <asm/system.h>
  17. #include <asm/page.h>
  18. #include <asm/percpu.h>
  19. #include <asm/msr.h>
  20. #include <asm/desc_defs.h>
  21. #include <asm/nops.h>
  22. #include <linux/personality.h>
  23. #include <linux/cpumask.h>
  24. #include <linux/cache.h>
  25. #include <linux/threads.h>
  26. #include <linux/init.h>
  27. /*
  28. * Default implementation of macro that returns current
  29. * instruction pointer ("program counter").
  30. */
  31. static inline void *current_text_addr(void)
  32. {
  33. void *pc;
  34. asm volatile("mov $1f, %0; 1:":"=r" (pc));
  35. return pc;
  36. }
  37. #ifdef CONFIG_X86_VSMP
  38. # define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT)
  39. # define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT)
  40. #else
  41. # define ARCH_MIN_TASKALIGN 16
  42. # define ARCH_MIN_MMSTRUCT_ALIGN 0
  43. #endif
  44. /*
  45. * CPU type and hardware bug flags. Kept separately for each CPU.
  46. * Members of this structure are referenced in head.S, so think twice
  47. * before touching them. [mj]
  48. */
  49. struct cpuinfo_x86 {
  50. __u8 x86; /* CPU family */
  51. __u8 x86_vendor; /* CPU vendor */
  52. __u8 x86_model;
  53. __u8 x86_mask;
  54. #ifdef CONFIG_X86_32
  55. char wp_works_ok; /* It doesn't on 386's */
  56. /* Problems on some 486Dx4's and old 386's: */
  57. char hlt_works_ok;
  58. char hard_math;
  59. char rfu;
  60. char fdiv_bug;
  61. char f00f_bug;
  62. char coma_bug;
  63. char pad0;
  64. #else
  65. /* Number of 4K pages in DTLB/ITLB combined(in pages): */
  66. int x86_tlbsize;
  67. __u8 x86_virt_bits;
  68. __u8 x86_phys_bits;
  69. /* CPUID returned core id bits: */
  70. __u8 x86_coreid_bits;
  71. /* Max extended CPUID function supported: */
  72. __u32 extended_cpuid_level;
  73. #endif
  74. /* Maximum supported CPUID level, -1=no CPUID: */
  75. int cpuid_level;
  76. __u32 x86_capability[NCAPINTS];
  77. char x86_vendor_id[16];
  78. char x86_model_id[64];
  79. /* in KB - valid for CPUS which support this call: */
  80. int x86_cache_size;
  81. int x86_cache_alignment; /* In bytes */
  82. int x86_power;
  83. unsigned long loops_per_jiffy;
  84. #ifdef CONFIG_SMP
  85. /* cpus sharing the last level cache: */
  86. cpumask_t llc_shared_map;
  87. #endif
  88. /* cpuid returned max cores value: */
  89. u16 x86_max_cores;
  90. u16 apicid;
  91. u16 initial_apicid;
  92. u16 x86_clflush_size;
  93. #ifdef CONFIG_SMP
  94. /* number of cores as seen by the OS: */
  95. u16 booted_cores;
  96. /* Physical processor id: */
  97. u16 phys_proc_id;
  98. /* Core id: */
  99. u16 cpu_core_id;
  100. /* Index into per_cpu list: */
  101. u16 cpu_index;
  102. #endif
  103. } __attribute__((__aligned__(SMP_CACHE_BYTES)));
  104. #define X86_VENDOR_INTEL 0
  105. #define X86_VENDOR_CYRIX 1
  106. #define X86_VENDOR_AMD 2
  107. #define X86_VENDOR_UMC 3
  108. #define X86_VENDOR_CENTAUR 5
  109. #define X86_VENDOR_TRANSMETA 7
  110. #define X86_VENDOR_NSC 8
  111. #define X86_VENDOR_NUM 9
  112. #define X86_VENDOR_UNKNOWN 0xff
  113. /*
  114. * capabilities of CPUs
  115. */
  116. extern struct cpuinfo_x86 boot_cpu_data;
  117. extern struct cpuinfo_x86 new_cpu_data;
  118. extern struct tss_struct doublefault_tss;
  119. extern __u32 cleared_cpu_caps[NCAPINTS];
  120. #ifdef CONFIG_SMP
  121. DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info);
  122. #define cpu_data(cpu) per_cpu(cpu_info, cpu)
  123. #define current_cpu_data cpu_data(smp_processor_id())
  124. #else
  125. #define cpu_data(cpu) boot_cpu_data
  126. #define current_cpu_data boot_cpu_data
  127. #endif
  128. static inline int hlt_works(int cpu)
  129. {
  130. #ifdef CONFIG_X86_32
  131. return cpu_data(cpu).hlt_works_ok;
  132. #else
  133. return 1;
  134. #endif
  135. }
  136. #define cache_line_size() (boot_cpu_data.x86_cache_alignment)
  137. extern void cpu_detect(struct cpuinfo_x86 *c);
  138. extern void identify_cpu(struct cpuinfo_x86 *);
  139. extern void identify_boot_cpu(void);
  140. extern void identify_secondary_cpu(struct cpuinfo_x86 *);
  141. extern void print_cpu_info(struct cpuinfo_x86 *);
  142. extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c);
  143. extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
  144. extern unsigned short num_cache_leaves;
  145. #if defined(CONFIG_X86_HT) || defined(CONFIG_X86_64)
  146. extern void detect_ht(struct cpuinfo_x86 *c);
  147. #else
  148. static inline void detect_ht(struct cpuinfo_x86 *c) {}
  149. #endif
  150. static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
  151. unsigned int *ecx, unsigned int *edx)
  152. {
  153. /* ecx is often an input as well as an output. */
  154. asm("cpuid"
  155. : "=a" (*eax),
  156. "=b" (*ebx),
  157. "=c" (*ecx),
  158. "=d" (*edx)
  159. : "0" (*eax), "2" (*ecx));
  160. }
  161. static inline void load_cr3(pgd_t *pgdir)
  162. {
  163. write_cr3(__pa(pgdir));
  164. }
  165. #ifdef CONFIG_X86_32
  166. /* This is the TSS defined by the hardware. */
  167. struct x86_hw_tss {
  168. unsigned short back_link, __blh;
  169. unsigned long sp0;
  170. unsigned short ss0, __ss0h;
  171. unsigned long sp1;
  172. /* ss1 caches MSR_IA32_SYSENTER_CS: */
  173. unsigned short ss1, __ss1h;
  174. unsigned long sp2;
  175. unsigned short ss2, __ss2h;
  176. unsigned long __cr3;
  177. unsigned long ip;
  178. unsigned long flags;
  179. unsigned long ax;
  180. unsigned long cx;
  181. unsigned long dx;
  182. unsigned long bx;
  183. unsigned long sp;
  184. unsigned long bp;
  185. unsigned long si;
  186. unsigned long di;
  187. unsigned short es, __esh;
  188. unsigned short cs, __csh;
  189. unsigned short ss, __ssh;
  190. unsigned short ds, __dsh;
  191. unsigned short fs, __fsh;
  192. unsigned short gs, __gsh;
  193. unsigned short ldt, __ldth;
  194. unsigned short trace;
  195. unsigned short io_bitmap_base;
  196. } __attribute__((packed));
  197. #else
  198. struct x86_hw_tss {
  199. u32 reserved1;
  200. u64 sp0;
  201. u64 sp1;
  202. u64 sp2;
  203. u64 reserved2;
  204. u64 ist[7];
  205. u32 reserved3;
  206. u32 reserved4;
  207. u16 reserved5;
  208. u16 io_bitmap_base;
  209. } __attribute__((packed)) ____cacheline_aligned;
  210. #endif
  211. /*
  212. * IO-bitmap sizes:
  213. */
  214. #define IO_BITMAP_BITS 65536
  215. #define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
  216. #define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
  217. #define IO_BITMAP_OFFSET offsetof(struct tss_struct, io_bitmap)
  218. #define INVALID_IO_BITMAP_OFFSET 0x8000
  219. #define INVALID_IO_BITMAP_OFFSET_LAZY 0x9000
  220. struct tss_struct {
  221. /*
  222. * The hardware state:
  223. */
  224. struct x86_hw_tss x86_tss;
  225. /*
  226. * The extra 1 is there because the CPU will access an
  227. * additional byte beyond the end of the IO permission
  228. * bitmap. The extra byte must be all 1 bits, and must
  229. * be within the limit.
  230. */
  231. unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
  232. /*
  233. * Cache the current maximum and the last task that used the bitmap:
  234. */
  235. unsigned long io_bitmap_max;
  236. struct thread_struct *io_bitmap_owner;
  237. /*
  238. * Pad the TSS to be cacheline-aligned (size is 0x100):
  239. */
  240. unsigned long __cacheline_filler[35];
  241. /*
  242. * .. and then another 0x100 bytes for the emergency kernel stack:
  243. */
  244. unsigned long stack[64];
  245. } __attribute__((packed));
  246. DECLARE_PER_CPU(struct tss_struct, init_tss);
  247. /*
  248. * Save the original ist values for checking stack pointers during debugging
  249. */
  250. struct orig_ist {
  251. unsigned long ist[7];
  252. };
  253. #define MXCSR_DEFAULT 0x1f80
  254. struct i387_fsave_struct {
  255. u32 cwd; /* FPU Control Word */
  256. u32 swd; /* FPU Status Word */
  257. u32 twd; /* FPU Tag Word */
  258. u32 fip; /* FPU IP Offset */
  259. u32 fcs; /* FPU IP Selector */
  260. u32 foo; /* FPU Operand Pointer Offset */
  261. u32 fos; /* FPU Operand Pointer Selector */
  262. /* 8*10 bytes for each FP-reg = 80 bytes: */
  263. u32 st_space[20];
  264. /* Software status information [not touched by FSAVE ]: */
  265. u32 status;
  266. };
  267. struct i387_fxsave_struct {
  268. u16 cwd; /* Control Word */
  269. u16 swd; /* Status Word */
  270. u16 twd; /* Tag Word */
  271. u16 fop; /* Last Instruction Opcode */
  272. union {
  273. struct {
  274. u64 rip; /* Instruction Pointer */
  275. u64 rdp; /* Data Pointer */
  276. };
  277. struct {
  278. u32 fip; /* FPU IP Offset */
  279. u32 fcs; /* FPU IP Selector */
  280. u32 foo; /* FPU Operand Offset */
  281. u32 fos; /* FPU Operand Selector */
  282. };
  283. };
  284. u32 mxcsr; /* MXCSR Register State */
  285. u32 mxcsr_mask; /* MXCSR Mask */
  286. /* 8*16 bytes for each FP-reg = 128 bytes: */
  287. u32 st_space[32];
  288. /* 16*16 bytes for each XMM-reg = 256 bytes: */
  289. u32 xmm_space[64];
  290. u32 padding[24];
  291. } __attribute__((aligned(16)));
  292. struct i387_soft_struct {
  293. u32 cwd;
  294. u32 swd;
  295. u32 twd;
  296. u32 fip;
  297. u32 fcs;
  298. u32 foo;
  299. u32 fos;
  300. /* 8*10 bytes for each FP-reg = 80 bytes: */
  301. u32 st_space[20];
  302. u8 ftop;
  303. u8 changed;
  304. u8 lookahead;
  305. u8 no_update;
  306. u8 rm;
  307. u8 alimit;
  308. struct info *info;
  309. u32 entry_eip;
  310. };
  311. union thread_xstate {
  312. struct i387_fsave_struct fsave;
  313. struct i387_fxsave_struct fxsave;
  314. struct i387_soft_struct soft;
  315. };
  316. #ifdef CONFIG_X86_64
  317. DECLARE_PER_CPU(struct orig_ist, orig_ist);
  318. #endif
  319. extern void print_cpu_info(struct cpuinfo_x86 *);
  320. extern unsigned int xstate_size;
  321. extern void free_thread_xstate(struct task_struct *);
  322. extern struct kmem_cache *task_xstate_cachep;
  323. extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c);
  324. extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
  325. extern unsigned short num_cache_leaves;
  326. struct thread_struct {
  327. /* Cached TLS descriptors: */
  328. struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
  329. unsigned long sp0;
  330. unsigned long sp;
  331. #ifdef CONFIG_X86_32
  332. unsigned long sysenter_cs;
  333. #else
  334. unsigned long usersp; /* Copy from PDA */
  335. unsigned short es;
  336. unsigned short ds;
  337. unsigned short fsindex;
  338. unsigned short gsindex;
  339. #endif
  340. unsigned long ip;
  341. unsigned long fs;
  342. unsigned long gs;
  343. /* Hardware debugging registers: */
  344. unsigned long debugreg0;
  345. unsigned long debugreg1;
  346. unsigned long debugreg2;
  347. unsigned long debugreg3;
  348. unsigned long debugreg6;
  349. unsigned long debugreg7;
  350. /* Fault info: */
  351. unsigned long cr2;
  352. unsigned long trap_no;
  353. unsigned long error_code;
  354. /* floating point and extended processor state */
  355. union thread_xstate *xstate;
  356. #ifdef CONFIG_X86_32
  357. /* Virtual 86 mode info */
  358. struct vm86_struct __user *vm86_info;
  359. unsigned long screen_bitmap;
  360. unsigned long v86flags;
  361. unsigned long v86mask;
  362. unsigned long saved_sp0;
  363. unsigned int saved_fs;
  364. unsigned int saved_gs;
  365. #endif
  366. /* IO permissions: */
  367. unsigned long *io_bitmap_ptr;
  368. unsigned long iopl;
  369. /* Max allowed port in the bitmap, in bytes: */
  370. unsigned io_bitmap_max;
  371. /* MSR_IA32_DEBUGCTLMSR value to switch in if TIF_DEBUGCTLMSR is set. */
  372. unsigned long debugctlmsr;
  373. /* Debug Store - if not 0 points to a DS Save Area configuration;
  374. * goes into MSR_IA32_DS_AREA */
  375. unsigned long ds_area_msr;
  376. };
  377. static inline unsigned long native_get_debugreg(int regno)
  378. {
  379. unsigned long val = 0; /* Damn you, gcc! */
  380. switch (regno) {
  381. case 0:
  382. asm("mov %%db0, %0" :"=r" (val));
  383. break;
  384. case 1:
  385. asm("mov %%db1, %0" :"=r" (val));
  386. break;
  387. case 2:
  388. asm("mov %%db2, %0" :"=r" (val));
  389. break;
  390. case 3:
  391. asm("mov %%db3, %0" :"=r" (val));
  392. break;
  393. case 6:
  394. asm("mov %%db6, %0" :"=r" (val));
  395. break;
  396. case 7:
  397. asm("mov %%db7, %0" :"=r" (val));
  398. break;
  399. default:
  400. BUG();
  401. }
  402. return val;
  403. }
  404. static inline void native_set_debugreg(int regno, unsigned long value)
  405. {
  406. switch (regno) {
  407. case 0:
  408. asm("mov %0, %%db0" ::"r" (value));
  409. break;
  410. case 1:
  411. asm("mov %0, %%db1" ::"r" (value));
  412. break;
  413. case 2:
  414. asm("mov %0, %%db2" ::"r" (value));
  415. break;
  416. case 3:
  417. asm("mov %0, %%db3" ::"r" (value));
  418. break;
  419. case 6:
  420. asm("mov %0, %%db6" ::"r" (value));
  421. break;
  422. case 7:
  423. asm("mov %0, %%db7" ::"r" (value));
  424. break;
  425. default:
  426. BUG();
  427. }
  428. }
  429. /*
  430. * Set IOPL bits in EFLAGS from given mask
  431. */
  432. static inline void native_set_iopl_mask(unsigned mask)
  433. {
  434. #ifdef CONFIG_X86_32
  435. unsigned int reg;
  436. asm volatile ("pushfl;"
  437. "popl %0;"
  438. "andl %1, %0;"
  439. "orl %2, %0;"
  440. "pushl %0;"
  441. "popfl"
  442. : "=&r" (reg)
  443. : "i" (~X86_EFLAGS_IOPL), "r" (mask));
  444. #endif
  445. }
  446. static inline void
  447. native_load_sp0(struct tss_struct *tss, struct thread_struct *thread)
  448. {
  449. tss->x86_tss.sp0 = thread->sp0;
  450. #ifdef CONFIG_X86_32
  451. /* Only happens when SEP is enabled, no need to test "SEP"arately: */
  452. if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) {
  453. tss->x86_tss.ss1 = thread->sysenter_cs;
  454. wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
  455. }
  456. #endif
  457. }
  458. static inline void native_swapgs(void)
  459. {
  460. #ifdef CONFIG_X86_64
  461. asm volatile("swapgs" ::: "memory");
  462. #endif
  463. }
  464. #ifdef CONFIG_PARAVIRT
  465. #include <asm/paravirt.h>
  466. #else
  467. #define __cpuid native_cpuid
  468. #define paravirt_enabled() 0
  469. /*
  470. * These special macros can be used to get or set a debugging register
  471. */
  472. #define get_debugreg(var, register) \
  473. (var) = native_get_debugreg(register)
  474. #define set_debugreg(value, register) \
  475. native_set_debugreg(register, value)
  476. static inline void load_sp0(struct tss_struct *tss,
  477. struct thread_struct *thread)
  478. {
  479. native_load_sp0(tss, thread);
  480. }
  481. #define set_iopl_mask native_set_iopl_mask
  482. #define SWAPGS swapgs
  483. #endif /* CONFIG_PARAVIRT */
  484. /*
  485. * Save the cr4 feature set we're using (ie
  486. * Pentium 4MB enable and PPro Global page
  487. * enable), so that any CPU's that boot up
  488. * after us can get the correct flags.
  489. */
  490. extern unsigned long mmu_cr4_features;
  491. static inline void set_in_cr4(unsigned long mask)
  492. {
  493. unsigned cr4;
  494. mmu_cr4_features |= mask;
  495. cr4 = read_cr4();
  496. cr4 |= mask;
  497. write_cr4(cr4);
  498. }
  499. static inline void clear_in_cr4(unsigned long mask)
  500. {
  501. unsigned cr4;
  502. mmu_cr4_features &= ~mask;
  503. cr4 = read_cr4();
  504. cr4 &= ~mask;
  505. write_cr4(cr4);
  506. }
  507. struct microcode_header {
  508. unsigned int hdrver;
  509. unsigned int rev;
  510. unsigned int date;
  511. unsigned int sig;
  512. unsigned int cksum;
  513. unsigned int ldrver;
  514. unsigned int pf;
  515. unsigned int datasize;
  516. unsigned int totalsize;
  517. unsigned int reserved[3];
  518. };
  519. struct microcode {
  520. struct microcode_header hdr;
  521. unsigned int bits[0];
  522. };
  523. typedef struct microcode microcode_t;
  524. typedef struct microcode_header microcode_header_t;
  525. /* microcode format is extended from prescott processors */
  526. struct extended_signature {
  527. unsigned int sig;
  528. unsigned int pf;
  529. unsigned int cksum;
  530. };
  531. struct extended_sigtable {
  532. unsigned int count;
  533. unsigned int cksum;
  534. unsigned int reserved[3];
  535. struct extended_signature sigs[0];
  536. };
  537. typedef struct {
  538. unsigned long seg;
  539. } mm_segment_t;
  540. /*
  541. * create a kernel thread without removing it from tasklists
  542. */
  543. extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
  544. /* Free all resources held by a thread. */
  545. extern void release_thread(struct task_struct *);
  546. /* Prepare to copy thread state - unlazy all lazy state */
  547. extern void prepare_to_copy(struct task_struct *tsk);
  548. unsigned long get_wchan(struct task_struct *p);
  549. /*
  550. * Generic CPUID function
  551. * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
  552. * resulting in stale register contents being returned.
  553. */
  554. static inline void cpuid(unsigned int op,
  555. unsigned int *eax, unsigned int *ebx,
  556. unsigned int *ecx, unsigned int *edx)
  557. {
  558. *eax = op;
  559. *ecx = 0;
  560. __cpuid(eax, ebx, ecx, edx);
  561. }
  562. /* Some CPUID calls want 'count' to be placed in ecx */
  563. static inline void cpuid_count(unsigned int op, int count,
  564. unsigned int *eax, unsigned int *ebx,
  565. unsigned int *ecx, unsigned int *edx)
  566. {
  567. *eax = op;
  568. *ecx = count;
  569. __cpuid(eax, ebx, ecx, edx);
  570. }
  571. /*
  572. * CPUID functions returning a single datum
  573. */
  574. static inline unsigned int cpuid_eax(unsigned int op)
  575. {
  576. unsigned int eax, ebx, ecx, edx;
  577. cpuid(op, &eax, &ebx, &ecx, &edx);
  578. return eax;
  579. }
  580. static inline unsigned int cpuid_ebx(unsigned int op)
  581. {
  582. unsigned int eax, ebx, ecx, edx;
  583. cpuid(op, &eax, &ebx, &ecx, &edx);
  584. return ebx;
  585. }
  586. static inline unsigned int cpuid_ecx(unsigned int op)
  587. {
  588. unsigned int eax, ebx, ecx, edx;
  589. cpuid(op, &eax, &ebx, &ecx, &edx);
  590. return ecx;
  591. }
  592. static inline unsigned int cpuid_edx(unsigned int op)
  593. {
  594. unsigned int eax, ebx, ecx, edx;
  595. cpuid(op, &eax, &ebx, &ecx, &edx);
  596. return edx;
  597. }
  598. /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
  599. static inline void rep_nop(void)
  600. {
  601. asm volatile("rep; nop" ::: "memory");
  602. }
  603. static inline void cpu_relax(void)
  604. {
  605. rep_nop();
  606. }
  607. /* Stop speculative execution: */
  608. static inline void sync_core(void)
  609. {
  610. int tmp;
  611. asm volatile("cpuid" : "=a" (tmp) : "0" (1)
  612. : "ebx", "ecx", "edx", "memory");
  613. }
  614. static inline void __monitor(const void *eax, unsigned long ecx,
  615. unsigned long edx)
  616. {
  617. /* "monitor %eax, %ecx, %edx;" */
  618. asm volatile(".byte 0x0f, 0x01, 0xc8;"
  619. :: "a" (eax), "c" (ecx), "d"(edx));
  620. }
  621. static inline void __mwait(unsigned long eax, unsigned long ecx)
  622. {
  623. /* "mwait %eax, %ecx;" */
  624. asm volatile(".byte 0x0f, 0x01, 0xc9;"
  625. :: "a" (eax), "c" (ecx));
  626. }
  627. static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
  628. {
  629. trace_hardirqs_on();
  630. /* "mwait %eax, %ecx;" */
  631. asm volatile("sti; .byte 0x0f, 0x01, 0xc9;"
  632. :: "a" (eax), "c" (ecx));
  633. }
  634. extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx);
  635. extern int force_mwait;
  636. extern void select_idle_routine(const struct cpuinfo_x86 *c);
  637. extern unsigned long boot_option_idle_override;
  638. extern void enable_sep_cpu(void);
  639. extern int sysenter_setup(void);
  640. /* Defined in head.S */
  641. extern struct desc_ptr early_gdt_descr;
  642. extern void cpu_set_gdt(int);
  643. extern void switch_to_new_gdt(void);
  644. extern void cpu_init(void);
  645. extern void init_gdt(int cpu);
  646. static inline void update_debugctlmsr(unsigned long debugctlmsr)
  647. {
  648. #ifndef CONFIG_X86_DEBUGCTLMSR
  649. if (boot_cpu_data.x86 < 6)
  650. return;
  651. #endif
  652. wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
  653. }
  654. /*
  655. * from system description table in BIOS. Mostly for MCA use, but
  656. * others may find it useful:
  657. */
  658. extern unsigned int machine_id;
  659. extern unsigned int machine_submodel_id;
  660. extern unsigned int BIOS_revision;
  661. /* Boot loader type from the setup header: */
  662. extern int bootloader_type;
  663. extern char ignore_fpu_irq;
  664. #define HAVE_ARCH_PICK_MMAP_LAYOUT 1
  665. #define ARCH_HAS_PREFETCHW
  666. #define ARCH_HAS_SPINLOCK_PREFETCH
  667. #ifdef CONFIG_X86_32
  668. # define BASE_PREFETCH ASM_NOP4
  669. # define ARCH_HAS_PREFETCH
  670. #else
  671. # define BASE_PREFETCH "prefetcht0 (%1)"
  672. #endif
  673. /*
  674. * Prefetch instructions for Pentium III (+) and AMD Athlon (+)
  675. *
  676. * It's not worth to care about 3dnow prefetches for the K6
  677. * because they are microcoded there and very slow.
  678. */
  679. static inline void prefetch(const void *x)
  680. {
  681. alternative_input(BASE_PREFETCH,
  682. "prefetchnta (%1)",
  683. X86_FEATURE_XMM,
  684. "r" (x));
  685. }
  686. /*
  687. * 3dnow prefetch to get an exclusive cache line.
  688. * Useful for spinlocks to avoid one state transition in the
  689. * cache coherency protocol:
  690. */
  691. static inline void prefetchw(const void *x)
  692. {
  693. alternative_input(BASE_PREFETCH,
  694. "prefetchw (%1)",
  695. X86_FEATURE_3DNOW,
  696. "r" (x));
  697. }
  698. static inline void spin_lock_prefetch(const void *x)
  699. {
  700. prefetchw(x);
  701. }
  702. #ifdef CONFIG_X86_32
  703. /*
  704. * User space process size: 3GB (default).
  705. */
  706. #define TASK_SIZE PAGE_OFFSET
  707. #define STACK_TOP TASK_SIZE
  708. #define STACK_TOP_MAX STACK_TOP
  709. #define INIT_THREAD { \
  710. .sp0 = sizeof(init_stack) + (long)&init_stack, \
  711. .vm86_info = NULL, \
  712. .sysenter_cs = __KERNEL_CS, \
  713. .io_bitmap_ptr = NULL, \
  714. .fs = __KERNEL_PERCPU, \
  715. }
  716. /*
  717. * Note that the .io_bitmap member must be extra-big. This is because
  718. * the CPU will access an additional byte beyond the end of the IO
  719. * permission bitmap. The extra byte must be all 1 bits, and must
  720. * be within the limit.
  721. */
  722. #define INIT_TSS { \
  723. .x86_tss = { \
  724. .sp0 = sizeof(init_stack) + (long)&init_stack, \
  725. .ss0 = __KERNEL_DS, \
  726. .ss1 = __KERNEL_CS, \
  727. .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
  728. }, \
  729. .io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 }, \
  730. }
  731. extern unsigned long thread_saved_pc(struct task_struct *tsk);
  732. #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
  733. #define KSTK_TOP(info) \
  734. ({ \
  735. unsigned long *__ptr = (unsigned long *)(info); \
  736. (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
  737. })
  738. /*
  739. * The below -8 is to reserve 8 bytes on top of the ring0 stack.
  740. * This is necessary to guarantee that the entire "struct pt_regs"
  741. * is accessable even if the CPU haven't stored the SS/ESP registers
  742. * on the stack (interrupt gate does not save these registers
  743. * when switching to the same priv ring).
  744. * Therefore beware: accessing the ss/esp fields of the
  745. * "struct pt_regs" is possible, but they may contain the
  746. * completely wrong values.
  747. */
  748. #define task_pt_regs(task) \
  749. ({ \
  750. struct pt_regs *__regs__; \
  751. __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
  752. __regs__ - 1; \
  753. })
  754. #define KSTK_ESP(task) (task_pt_regs(task)->sp)
  755. #else
  756. /*
  757. * User space process size. 47bits minus one guard page.
  758. */
  759. #define TASK_SIZE64 ((1UL << 47) - PAGE_SIZE)
  760. /* This decides where the kernel will search for a free chunk of vm
  761. * space during mmap's.
  762. */
  763. #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
  764. 0xc0000000 : 0xFFFFe000)
  765. #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
  766. IA32_PAGE_OFFSET : TASK_SIZE64)
  767. #define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_IA32)) ? \
  768. IA32_PAGE_OFFSET : TASK_SIZE64)
  769. #define STACK_TOP TASK_SIZE
  770. #define STACK_TOP_MAX TASK_SIZE64
  771. #define INIT_THREAD { \
  772. .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
  773. }
  774. #define INIT_TSS { \
  775. .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
  776. }
  777. /*
  778. * Return saved PC of a blocked thread.
  779. * What is this good for? it will be always the scheduler or ret_from_fork.
  780. */
  781. #define thread_saved_pc(t) (*(unsigned long *)((t)->thread.sp - 8))
  782. #define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1)
  783. #define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */
  784. #endif /* CONFIG_X86_64 */
  785. extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
  786. unsigned long new_sp);
  787. /*
  788. * This decides where the kernel will search for a free chunk of vm
  789. * space during mmap's.
  790. */
  791. #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
  792. #define KSTK_EIP(task) (task_pt_regs(task)->ip)
  793. /* Get/set a process' ability to use the timestamp counter instruction */
  794. #define GET_TSC_CTL(adr) get_tsc_mode((adr))
  795. #define SET_TSC_CTL(val) set_tsc_mode((val))
  796. extern int get_tsc_mode(unsigned long adr);
  797. extern int set_tsc_mode(unsigned int val);
  798. #endif