processor.h 24 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022
  1. #ifndef _ASM_X86_PROCESSOR_H
  2. #define _ASM_X86_PROCESSOR_H
  3. #include <asm/processor-flags.h>
  4. /* Forward declaration, a strange C thing */
  5. struct task_struct;
  6. struct mm_struct;
  7. #include <asm/vm86.h>
  8. #include <asm/math_emu.h>
  9. #include <asm/segment.h>
  10. #include <asm/types.h>
  11. #include <asm/sigcontext.h>
  12. #include <asm/current.h>
  13. #include <asm/cpufeature.h>
  14. #include <asm/system.h>
  15. #include <asm/page.h>
  16. #include <asm/pgtable_types.h>
  17. #include <asm/percpu.h>
  18. #include <asm/msr.h>
  19. #include <asm/desc_defs.h>
  20. #include <asm/nops.h>
  21. #include <linux/personality.h>
  22. #include <linux/cpumask.h>
  23. #include <linux/cache.h>
  24. #include <linux/threads.h>
  25. #include <linux/math64.h>
  26. #include <linux/init.h>
  27. #include <linux/err.h>
  28. #define HBP_NUM 4
  29. /*
  30. * Default implementation of macro that returns current
  31. * instruction pointer ("program counter").
  32. */
  33. static inline void *current_text_addr(void)
  34. {
  35. void *pc;
  36. asm volatile("mov $1f, %0; 1:":"=r" (pc));
  37. return pc;
  38. }
  39. #ifdef CONFIG_X86_VSMP
  40. # define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT)
  41. # define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT)
  42. #else
  43. # define ARCH_MIN_TASKALIGN 16
  44. # define ARCH_MIN_MMSTRUCT_ALIGN 0
  45. #endif
  46. /*
  47. * CPU type and hardware bug flags. Kept separately for each CPU.
  48. * Members of this structure are referenced in head.S, so think twice
  49. * before touching them. [mj]
  50. */
  51. struct cpuinfo_x86 {
  52. __u8 x86; /* CPU family */
  53. __u8 x86_vendor; /* CPU vendor */
  54. __u8 x86_model;
  55. __u8 x86_mask;
  56. #ifdef CONFIG_X86_32
  57. char wp_works_ok; /* It doesn't on 386's */
  58. /* Problems on some 486Dx4's and old 386's: */
  59. char hlt_works_ok;
  60. char hard_math;
  61. char rfu;
  62. char fdiv_bug;
  63. char f00f_bug;
  64. char coma_bug;
  65. char pad0;
  66. #else
  67. /* Number of 4K pages in DTLB/ITLB combined(in pages): */
  68. int x86_tlbsize;
  69. #endif
  70. __u8 x86_virt_bits;
  71. __u8 x86_phys_bits;
  72. /* CPUID returned core id bits: */
  73. __u8 x86_coreid_bits;
  74. /* Max extended CPUID function supported: */
  75. __u32 extended_cpuid_level;
  76. /* Maximum supported CPUID level, -1=no CPUID: */
  77. int cpuid_level;
  78. __u32 x86_capability[NCAPINTS];
  79. char x86_vendor_id[16];
  80. char x86_model_id[64];
  81. /* in KB - valid for CPUS which support this call: */
  82. int x86_cache_size;
  83. int x86_cache_alignment; /* In bytes */
  84. int x86_power;
  85. unsigned long loops_per_jiffy;
  86. /* cpuid returned max cores value: */
  87. u16 x86_max_cores;
  88. u16 apicid;
  89. u16 initial_apicid;
  90. u16 x86_clflush_size;
  91. /* number of cores as seen by the OS: */
  92. u16 booted_cores;
  93. /* Physical processor id: */
  94. u16 phys_proc_id;
  95. /* Core id: */
  96. u16 cpu_core_id;
  97. /* Compute unit id */
  98. u8 compute_unit_id;
  99. /* Index into per_cpu list: */
  100. u16 cpu_index;
  101. u32 microcode;
  102. } __attribute__((__aligned__(SMP_CACHE_BYTES)));
  103. #define X86_VENDOR_INTEL 0
  104. #define X86_VENDOR_CYRIX 1
  105. #define X86_VENDOR_AMD 2
  106. #define X86_VENDOR_UMC 3
  107. #define X86_VENDOR_CENTAUR 5
  108. #define X86_VENDOR_TRANSMETA 7
  109. #define X86_VENDOR_NSC 8
  110. #define X86_VENDOR_NUM 9
  111. #define X86_VENDOR_UNKNOWN 0xff
  112. /*
  113. * capabilities of CPUs
  114. */
  115. extern struct cpuinfo_x86 boot_cpu_data;
  116. extern struct cpuinfo_x86 new_cpu_data;
  117. extern struct tss_struct doublefault_tss;
  118. extern __u32 cpu_caps_cleared[NCAPINTS];
  119. extern __u32 cpu_caps_set[NCAPINTS];
  120. #ifdef CONFIG_SMP
  121. DECLARE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
  122. #define cpu_data(cpu) per_cpu(cpu_info, cpu)
  123. #else
  124. #define cpu_info boot_cpu_data
  125. #define cpu_data(cpu) boot_cpu_data
  126. #endif
  127. extern const struct seq_operations cpuinfo_op;
  128. static inline int hlt_works(int cpu)
  129. {
  130. #ifdef CONFIG_X86_32
  131. return cpu_data(cpu).hlt_works_ok;
  132. #else
  133. return 1;
  134. #endif
  135. }
  136. #define cache_line_size() (boot_cpu_data.x86_cache_alignment)
  137. extern void cpu_detect(struct cpuinfo_x86 *c);
  138. extern struct pt_regs *idle_regs(struct pt_regs *);
  139. extern void early_cpu_init(void);
  140. extern void identify_boot_cpu(void);
  141. extern void identify_secondary_cpu(struct cpuinfo_x86 *);
  142. extern void print_cpu_info(struct cpuinfo_x86 *);
  143. extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c);
  144. extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
  145. extern unsigned short num_cache_leaves;
  146. extern void detect_extended_topology(struct cpuinfo_x86 *c);
  147. extern void detect_ht(struct cpuinfo_x86 *c);
  148. static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
  149. unsigned int *ecx, unsigned int *edx)
  150. {
  151. /* ecx is often an input as well as an output. */
  152. asm volatile("cpuid"
  153. : "=a" (*eax),
  154. "=b" (*ebx),
  155. "=c" (*ecx),
  156. "=d" (*edx)
  157. : "0" (*eax), "2" (*ecx)
  158. : "memory");
  159. }
  160. static inline void load_cr3(pgd_t *pgdir)
  161. {
  162. write_cr3(__pa(pgdir));
  163. }
  164. #ifdef CONFIG_X86_32
  165. /* This is the TSS defined by the hardware. */
  166. struct x86_hw_tss {
  167. unsigned short back_link, __blh;
  168. unsigned long sp0;
  169. unsigned short ss0, __ss0h;
  170. unsigned long sp1;
  171. /* ss1 caches MSR_IA32_SYSENTER_CS: */
  172. unsigned short ss1, __ss1h;
  173. unsigned long sp2;
  174. unsigned short ss2, __ss2h;
  175. unsigned long __cr3;
  176. unsigned long ip;
  177. unsigned long flags;
  178. unsigned long ax;
  179. unsigned long cx;
  180. unsigned long dx;
  181. unsigned long bx;
  182. unsigned long sp;
  183. unsigned long bp;
  184. unsigned long si;
  185. unsigned long di;
  186. unsigned short es, __esh;
  187. unsigned short cs, __csh;
  188. unsigned short ss, __ssh;
  189. unsigned short ds, __dsh;
  190. unsigned short fs, __fsh;
  191. unsigned short gs, __gsh;
  192. unsigned short ldt, __ldth;
  193. unsigned short trace;
  194. unsigned short io_bitmap_base;
  195. } __attribute__((packed));
  196. #else
  197. struct x86_hw_tss {
  198. u32 reserved1;
  199. u64 sp0;
  200. u64 sp1;
  201. u64 sp2;
  202. u64 reserved2;
  203. u64 ist[7];
  204. u32 reserved3;
  205. u32 reserved4;
  206. u16 reserved5;
  207. u16 io_bitmap_base;
  208. } __attribute__((packed)) ____cacheline_aligned;
  209. #endif
  210. /*
  211. * IO-bitmap sizes:
  212. */
  213. #define IO_BITMAP_BITS 65536
  214. #define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
  215. #define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
  216. #define IO_BITMAP_OFFSET offsetof(struct tss_struct, io_bitmap)
  217. #define INVALID_IO_BITMAP_OFFSET 0x8000
  218. struct tss_struct {
  219. /*
  220. * The hardware state:
  221. */
  222. struct x86_hw_tss x86_tss;
  223. /*
  224. * The extra 1 is there because the CPU will access an
  225. * additional byte beyond the end of the IO permission
  226. * bitmap. The extra byte must be all 1 bits, and must
  227. * be within the limit.
  228. */
  229. unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
  230. /*
  231. * .. and then another 0x100 bytes for the emergency kernel stack:
  232. */
  233. unsigned long stack[64];
  234. } ____cacheline_aligned;
  235. DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
  236. /*
  237. * Save the original ist values for checking stack pointers during debugging
  238. */
  239. struct orig_ist {
  240. unsigned long ist[7];
  241. };
  242. #define MXCSR_DEFAULT 0x1f80
  243. struct i387_fsave_struct {
  244. u32 cwd; /* FPU Control Word */
  245. u32 swd; /* FPU Status Word */
  246. u32 twd; /* FPU Tag Word */
  247. u32 fip; /* FPU IP Offset */
  248. u32 fcs; /* FPU IP Selector */
  249. u32 foo; /* FPU Operand Pointer Offset */
  250. u32 fos; /* FPU Operand Pointer Selector */
  251. /* 8*10 bytes for each FP-reg = 80 bytes: */
  252. u32 st_space[20];
  253. /* Software status information [not touched by FSAVE ]: */
  254. u32 status;
  255. };
  256. struct i387_fxsave_struct {
  257. u16 cwd; /* Control Word */
  258. u16 swd; /* Status Word */
  259. u16 twd; /* Tag Word */
  260. u16 fop; /* Last Instruction Opcode */
  261. union {
  262. struct {
  263. u64 rip; /* Instruction Pointer */
  264. u64 rdp; /* Data Pointer */
  265. };
  266. struct {
  267. u32 fip; /* FPU IP Offset */
  268. u32 fcs; /* FPU IP Selector */
  269. u32 foo; /* FPU Operand Offset */
  270. u32 fos; /* FPU Operand Selector */
  271. };
  272. };
  273. u32 mxcsr; /* MXCSR Register State */
  274. u32 mxcsr_mask; /* MXCSR Mask */
  275. /* 8*16 bytes for each FP-reg = 128 bytes: */
  276. u32 st_space[32];
  277. /* 16*16 bytes for each XMM-reg = 256 bytes: */
  278. u32 xmm_space[64];
  279. u32 padding[12];
  280. union {
  281. u32 padding1[12];
  282. u32 sw_reserved[12];
  283. };
  284. } __attribute__((aligned(16)));
  285. struct i387_soft_struct {
  286. u32 cwd;
  287. u32 swd;
  288. u32 twd;
  289. u32 fip;
  290. u32 fcs;
  291. u32 foo;
  292. u32 fos;
  293. /* 8*10 bytes for each FP-reg = 80 bytes: */
  294. u32 st_space[20];
  295. u8 ftop;
  296. u8 changed;
  297. u8 lookahead;
  298. u8 no_update;
  299. u8 rm;
  300. u8 alimit;
  301. struct math_emu_info *info;
  302. u32 entry_eip;
  303. };
  304. struct ymmh_struct {
  305. /* 16 * 16 bytes for each YMMH-reg = 256 bytes */
  306. u32 ymmh_space[64];
  307. };
  308. struct xsave_hdr_struct {
  309. u64 xstate_bv;
  310. u64 reserved1[2];
  311. u64 reserved2[5];
  312. } __attribute__((packed));
  313. struct xsave_struct {
  314. struct i387_fxsave_struct i387;
  315. struct xsave_hdr_struct xsave_hdr;
  316. struct ymmh_struct ymmh;
  317. /* new processor state extensions will go here */
  318. } __attribute__ ((packed, aligned (64)));
  319. union thread_xstate {
  320. struct i387_fsave_struct fsave;
  321. struct i387_fxsave_struct fxsave;
  322. struct i387_soft_struct soft;
  323. struct xsave_struct xsave;
  324. };
  325. struct fpu {
  326. union thread_xstate *state;
  327. };
  328. #ifdef CONFIG_X86_64
  329. DECLARE_PER_CPU(struct orig_ist, orig_ist);
  330. union irq_stack_union {
  331. char irq_stack[IRQ_STACK_SIZE];
  332. /*
  333. * GCC hardcodes the stack canary as %gs:40. Since the
  334. * irq_stack is the object at %gs:0, we reserve the bottom
  335. * 48 bytes of the irq stack for the canary.
  336. */
  337. struct {
  338. char gs_base[40];
  339. unsigned long stack_canary;
  340. };
  341. };
  342. DECLARE_PER_CPU_FIRST(union irq_stack_union, irq_stack_union);
  343. DECLARE_INIT_PER_CPU(irq_stack_union);
  344. DECLARE_PER_CPU(char *, irq_stack_ptr);
  345. DECLARE_PER_CPU(unsigned int, irq_count);
  346. extern unsigned long kernel_eflags;
  347. extern asmlinkage void ignore_sysret(void);
  348. #else /* X86_64 */
  349. #ifdef CONFIG_CC_STACKPROTECTOR
  350. /*
  351. * Make sure stack canary segment base is cached-aligned:
  352. * "For Intel Atom processors, avoid non zero segment base address
  353. * that is not aligned to cache line boundary at all cost."
  354. * (Optim Ref Manual Assembly/Compiler Coding Rule 15.)
  355. */
  356. struct stack_canary {
  357. char __pad[20]; /* canary at %gs:20 */
  358. unsigned long canary;
  359. };
  360. DECLARE_PER_CPU_ALIGNED(struct stack_canary, stack_canary);
  361. #endif
  362. #endif /* X86_64 */
  363. extern unsigned int xstate_size;
  364. extern void free_thread_xstate(struct task_struct *);
  365. extern struct kmem_cache *task_xstate_cachep;
  366. struct perf_event;
  367. struct thread_struct {
  368. /* Cached TLS descriptors: */
  369. struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
  370. unsigned long sp0;
  371. unsigned long sp;
  372. #ifdef CONFIG_X86_32
  373. unsigned long sysenter_cs;
  374. #else
  375. unsigned long usersp; /* Copy from PDA */
  376. unsigned short es;
  377. unsigned short ds;
  378. unsigned short fsindex;
  379. unsigned short gsindex;
  380. #endif
  381. #ifdef CONFIG_X86_32
  382. unsigned long ip;
  383. #endif
  384. #ifdef CONFIG_X86_64
  385. unsigned long fs;
  386. #endif
  387. unsigned long gs;
  388. /* Save middle states of ptrace breakpoints */
  389. struct perf_event *ptrace_bps[HBP_NUM];
  390. /* Debug status used for traps, single steps, etc... */
  391. unsigned long debugreg6;
  392. /* Keep track of the exact dr7 value set by the user */
  393. unsigned long ptrace_dr7;
  394. /* Fault info: */
  395. unsigned long cr2;
  396. unsigned long trap_no;
  397. unsigned long error_code;
  398. /* floating point and extended processor state */
  399. struct fpu fpu;
  400. #ifdef CONFIG_X86_32
  401. /* Virtual 86 mode info */
  402. struct vm86_struct __user *vm86_info;
  403. unsigned long screen_bitmap;
  404. unsigned long v86flags;
  405. unsigned long v86mask;
  406. unsigned long saved_sp0;
  407. unsigned int saved_fs;
  408. unsigned int saved_gs;
  409. #endif
  410. /* IO permissions: */
  411. unsigned long *io_bitmap_ptr;
  412. unsigned long iopl;
  413. /* Max allowed port in the bitmap, in bytes: */
  414. unsigned io_bitmap_max;
  415. };
  416. static inline unsigned long native_get_debugreg(int regno)
  417. {
  418. unsigned long val = 0; /* Damn you, gcc! */
  419. switch (regno) {
  420. case 0:
  421. asm("mov %%db0, %0" :"=r" (val));
  422. break;
  423. case 1:
  424. asm("mov %%db1, %0" :"=r" (val));
  425. break;
  426. case 2:
  427. asm("mov %%db2, %0" :"=r" (val));
  428. break;
  429. case 3:
  430. asm("mov %%db3, %0" :"=r" (val));
  431. break;
  432. case 6:
  433. asm("mov %%db6, %0" :"=r" (val));
  434. break;
  435. case 7:
  436. asm("mov %%db7, %0" :"=r" (val));
  437. break;
  438. default:
  439. BUG();
  440. }
  441. return val;
  442. }
  443. static inline void native_set_debugreg(int regno, unsigned long value)
  444. {
  445. switch (regno) {
  446. case 0:
  447. asm("mov %0, %%db0" ::"r" (value));
  448. break;
  449. case 1:
  450. asm("mov %0, %%db1" ::"r" (value));
  451. break;
  452. case 2:
  453. asm("mov %0, %%db2" ::"r" (value));
  454. break;
  455. case 3:
  456. asm("mov %0, %%db3" ::"r" (value));
  457. break;
  458. case 6:
  459. asm("mov %0, %%db6" ::"r" (value));
  460. break;
  461. case 7:
  462. asm("mov %0, %%db7" ::"r" (value));
  463. break;
  464. default:
  465. BUG();
  466. }
  467. }
  468. /*
  469. * Set IOPL bits in EFLAGS from given mask
  470. */
  471. static inline void native_set_iopl_mask(unsigned mask)
  472. {
  473. #ifdef CONFIG_X86_32
  474. unsigned int reg;
  475. asm volatile ("pushfl;"
  476. "popl %0;"
  477. "andl %1, %0;"
  478. "orl %2, %0;"
  479. "pushl %0;"
  480. "popfl"
  481. : "=&r" (reg)
  482. : "i" (~X86_EFLAGS_IOPL), "r" (mask));
  483. #endif
  484. }
  485. static inline void
  486. native_load_sp0(struct tss_struct *tss, struct thread_struct *thread)
  487. {
  488. tss->x86_tss.sp0 = thread->sp0;
  489. #ifdef CONFIG_X86_32
  490. /* Only happens when SEP is enabled, no need to test "SEP"arately: */
  491. if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) {
  492. tss->x86_tss.ss1 = thread->sysenter_cs;
  493. wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
  494. }
  495. #endif
  496. }
  497. static inline void native_swapgs(void)
  498. {
  499. #ifdef CONFIG_X86_64
  500. asm volatile("swapgs" ::: "memory");
  501. #endif
  502. }
  503. #ifdef CONFIG_PARAVIRT
  504. #include <asm/paravirt.h>
  505. #else
  506. #define __cpuid native_cpuid
  507. #define paravirt_enabled() 0
  508. /*
  509. * These special macros can be used to get or set a debugging register
  510. */
  511. #define get_debugreg(var, register) \
  512. (var) = native_get_debugreg(register)
  513. #define set_debugreg(value, register) \
  514. native_set_debugreg(register, value)
  515. static inline void load_sp0(struct tss_struct *tss,
  516. struct thread_struct *thread)
  517. {
  518. native_load_sp0(tss, thread);
  519. }
  520. #define set_iopl_mask native_set_iopl_mask
  521. #endif /* CONFIG_PARAVIRT */
  522. /*
  523. * Save the cr4 feature set we're using (ie
  524. * Pentium 4MB enable and PPro Global page
  525. * enable), so that any CPU's that boot up
  526. * after us can get the correct flags.
  527. */
  528. extern unsigned long mmu_cr4_features;
  529. static inline void set_in_cr4(unsigned long mask)
  530. {
  531. unsigned long cr4;
  532. mmu_cr4_features |= mask;
  533. cr4 = read_cr4();
  534. cr4 |= mask;
  535. write_cr4(cr4);
  536. }
  537. static inline void clear_in_cr4(unsigned long mask)
  538. {
  539. unsigned long cr4;
  540. mmu_cr4_features &= ~mask;
  541. cr4 = read_cr4();
  542. cr4 &= ~mask;
  543. write_cr4(cr4);
  544. }
  545. typedef struct {
  546. unsigned long seg;
  547. } mm_segment_t;
  548. /*
  549. * create a kernel thread without removing it from tasklists
  550. */
  551. extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
  552. /* Free all resources held by a thread. */
  553. extern void release_thread(struct task_struct *);
  554. /* Prepare to copy thread state - unlazy all lazy state */
  555. extern void prepare_to_copy(struct task_struct *tsk);
  556. unsigned long get_wchan(struct task_struct *p);
  557. /*
  558. * Generic CPUID function
  559. * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
  560. * resulting in stale register contents being returned.
  561. */
  562. static inline void cpuid(unsigned int op,
  563. unsigned int *eax, unsigned int *ebx,
  564. unsigned int *ecx, unsigned int *edx)
  565. {
  566. *eax = op;
  567. *ecx = 0;
  568. __cpuid(eax, ebx, ecx, edx);
  569. }
  570. /* Some CPUID calls want 'count' to be placed in ecx */
  571. static inline void cpuid_count(unsigned int op, int count,
  572. unsigned int *eax, unsigned int *ebx,
  573. unsigned int *ecx, unsigned int *edx)
  574. {
  575. *eax = op;
  576. *ecx = count;
  577. __cpuid(eax, ebx, ecx, edx);
  578. }
  579. /*
  580. * CPUID functions returning a single datum
  581. */
  582. static inline unsigned int cpuid_eax(unsigned int op)
  583. {
  584. unsigned int eax, ebx, ecx, edx;
  585. cpuid(op, &eax, &ebx, &ecx, &edx);
  586. return eax;
  587. }
  588. static inline unsigned int cpuid_ebx(unsigned int op)
  589. {
  590. unsigned int eax, ebx, ecx, edx;
  591. cpuid(op, &eax, &ebx, &ecx, &edx);
  592. return ebx;
  593. }
  594. static inline unsigned int cpuid_ecx(unsigned int op)
  595. {
  596. unsigned int eax, ebx, ecx, edx;
  597. cpuid(op, &eax, &ebx, &ecx, &edx);
  598. return ecx;
  599. }
  600. static inline unsigned int cpuid_edx(unsigned int op)
  601. {
  602. unsigned int eax, ebx, ecx, edx;
  603. cpuid(op, &eax, &ebx, &ecx, &edx);
  604. return edx;
  605. }
  606. /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
  607. static inline void rep_nop(void)
  608. {
  609. asm volatile("rep; nop" ::: "memory");
  610. }
  611. static inline void cpu_relax(void)
  612. {
  613. rep_nop();
  614. }
  615. /* Stop speculative execution and prefetching of modified code. */
  616. static inline void sync_core(void)
  617. {
  618. int tmp;
  619. #if defined(CONFIG_M386) || defined(CONFIG_M486)
  620. if (boot_cpu_data.x86 < 5)
  621. /* There is no speculative execution.
  622. * jmp is a barrier to prefetching. */
  623. asm volatile("jmp 1f\n1:\n" ::: "memory");
  624. else
  625. #endif
  626. /* cpuid is a barrier to speculative execution.
  627. * Prefetched instructions are automatically
  628. * invalidated when modified. */
  629. asm volatile("cpuid" : "=a" (tmp) : "0" (1)
  630. : "ebx", "ecx", "edx", "memory");
  631. }
  632. static inline void __monitor(const void *eax, unsigned long ecx,
  633. unsigned long edx)
  634. {
  635. /* "monitor %eax, %ecx, %edx;" */
  636. asm volatile(".byte 0x0f, 0x01, 0xc8;"
  637. :: "a" (eax), "c" (ecx), "d"(edx));
  638. }
  639. static inline void __mwait(unsigned long eax, unsigned long ecx)
  640. {
  641. /* "mwait %eax, %ecx;" */
  642. asm volatile(".byte 0x0f, 0x01, 0xc9;"
  643. :: "a" (eax), "c" (ecx));
  644. }
  645. static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
  646. {
  647. trace_hardirqs_on();
  648. /* "mwait %eax, %ecx;" */
  649. asm volatile("sti; .byte 0x0f, 0x01, 0xc9;"
  650. :: "a" (eax), "c" (ecx));
  651. }
  652. extern void select_idle_routine(const struct cpuinfo_x86 *c);
  653. extern void init_amd_e400_c1e_mask(void);
  654. extern unsigned long boot_option_idle_override;
  655. extern bool amd_e400_c1e_detected;
  656. enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT,
  657. IDLE_POLL, IDLE_FORCE_MWAIT};
  658. extern void enable_sep_cpu(void);
  659. extern int sysenter_setup(void);
  660. extern void early_trap_init(void);
  661. /* Defined in head.S */
  662. extern struct desc_ptr early_gdt_descr;
  663. extern void cpu_set_gdt(int);
  664. extern void switch_to_new_gdt(int);
  665. extern void load_percpu_segment(int);
  666. extern void cpu_init(void);
  667. static inline unsigned long get_debugctlmsr(void)
  668. {
  669. unsigned long debugctlmsr = 0;
  670. #ifndef CONFIG_X86_DEBUGCTLMSR
  671. if (boot_cpu_data.x86 < 6)
  672. return 0;
  673. #endif
  674. rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
  675. return debugctlmsr;
  676. }
  677. static inline void update_debugctlmsr(unsigned long debugctlmsr)
  678. {
  679. #ifndef CONFIG_X86_DEBUGCTLMSR
  680. if (boot_cpu_data.x86 < 6)
  681. return;
  682. #endif
  683. wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
  684. }
  685. /*
  686. * from system description table in BIOS. Mostly for MCA use, but
  687. * others may find it useful:
  688. */
  689. extern unsigned int machine_id;
  690. extern unsigned int machine_submodel_id;
  691. extern unsigned int BIOS_revision;
  692. /* Boot loader type from the setup header: */
  693. extern int bootloader_type;
  694. extern int bootloader_version;
  695. extern char ignore_fpu_irq;
  696. #define HAVE_ARCH_PICK_MMAP_LAYOUT 1
  697. #define ARCH_HAS_PREFETCHW
  698. #define ARCH_HAS_SPINLOCK_PREFETCH
  699. #ifdef CONFIG_X86_32
  700. # define BASE_PREFETCH ASM_NOP4
  701. # define ARCH_HAS_PREFETCH
  702. #else
  703. # define BASE_PREFETCH "prefetcht0 (%1)"
  704. #endif
  705. /*
  706. * Prefetch instructions for Pentium III (+) and AMD Athlon (+)
  707. *
  708. * It's not worth to care about 3dnow prefetches for the K6
  709. * because they are microcoded there and very slow.
  710. */
  711. static inline void prefetch(const void *x)
  712. {
  713. alternative_input(BASE_PREFETCH,
  714. "prefetchnta (%1)",
  715. X86_FEATURE_XMM,
  716. "r" (x));
  717. }
  718. /*
  719. * 3dnow prefetch to get an exclusive cache line.
  720. * Useful for spinlocks to avoid one state transition in the
  721. * cache coherency protocol:
  722. */
  723. static inline void prefetchw(const void *x)
  724. {
  725. alternative_input(BASE_PREFETCH,
  726. "prefetchw (%1)",
  727. X86_FEATURE_3DNOW,
  728. "r" (x));
  729. }
  730. static inline void spin_lock_prefetch(const void *x)
  731. {
  732. prefetchw(x);
  733. }
  734. #ifdef CONFIG_X86_32
  735. /*
  736. * User space process size: 3GB (default).
  737. */
  738. #define TASK_SIZE PAGE_OFFSET
  739. #define TASK_SIZE_MAX TASK_SIZE
  740. #define STACK_TOP TASK_SIZE
  741. #define STACK_TOP_MAX STACK_TOP
  742. #define INIT_THREAD { \
  743. .sp0 = sizeof(init_stack) + (long)&init_stack, \
  744. .vm86_info = NULL, \
  745. .sysenter_cs = __KERNEL_CS, \
  746. .io_bitmap_ptr = NULL, \
  747. }
  748. /*
  749. * Note that the .io_bitmap member must be extra-big. This is because
  750. * the CPU will access an additional byte beyond the end of the IO
  751. * permission bitmap. The extra byte must be all 1 bits, and must
  752. * be within the limit.
  753. */
  754. #define INIT_TSS { \
  755. .x86_tss = { \
  756. .sp0 = sizeof(init_stack) + (long)&init_stack, \
  757. .ss0 = __KERNEL_DS, \
  758. .ss1 = __KERNEL_CS, \
  759. .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
  760. }, \
  761. .io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 }, \
  762. }
  763. extern unsigned long thread_saved_pc(struct task_struct *tsk);
  764. #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
  765. #define KSTK_TOP(info) \
  766. ({ \
  767. unsigned long *__ptr = (unsigned long *)(info); \
  768. (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
  769. })
  770. /*
  771. * The below -8 is to reserve 8 bytes on top of the ring0 stack.
  772. * This is necessary to guarantee that the entire "struct pt_regs"
  773. * is accessible even if the CPU haven't stored the SS/ESP registers
  774. * on the stack (interrupt gate does not save these registers
  775. * when switching to the same priv ring).
  776. * Therefore beware: accessing the ss/esp fields of the
  777. * "struct pt_regs" is possible, but they may contain the
  778. * completely wrong values.
  779. */
  780. #define task_pt_regs(task) \
  781. ({ \
  782. struct pt_regs *__regs__; \
  783. __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
  784. __regs__ - 1; \
  785. })
  786. #define KSTK_ESP(task) (task_pt_regs(task)->sp)
  787. #else
  788. /*
  789. * User space process size. 47bits minus one guard page.
  790. */
  791. #define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
  792. /* This decides where the kernel will search for a free chunk of vm
  793. * space during mmap's.
  794. */
  795. #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
  796. 0xc0000000 : 0xFFFFe000)
  797. #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
  798. IA32_PAGE_OFFSET : TASK_SIZE_MAX)
  799. #define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_IA32)) ? \
  800. IA32_PAGE_OFFSET : TASK_SIZE_MAX)
  801. #define STACK_TOP TASK_SIZE
  802. #define STACK_TOP_MAX TASK_SIZE_MAX
  803. #define INIT_THREAD { \
  804. .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
  805. }
  806. #define INIT_TSS { \
  807. .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
  808. }
  809. /*
  810. * Return saved PC of a blocked thread.
  811. * What is this good for? it will be always the scheduler or ret_from_fork.
  812. */
  813. #define thread_saved_pc(t) (*(unsigned long *)((t)->thread.sp - 8))
  814. #define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1)
  815. extern unsigned long KSTK_ESP(struct task_struct *task);
  816. #endif /* CONFIG_X86_64 */
  817. extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
  818. unsigned long new_sp);
  819. /*
  820. * This decides where the kernel will search for a free chunk of vm
  821. * space during mmap's.
  822. */
  823. #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
  824. #define KSTK_EIP(task) (task_pt_regs(task)->ip)
  825. /* Get/set a process' ability to use the timestamp counter instruction */
  826. #define GET_TSC_CTL(adr) get_tsc_mode((adr))
  827. #define SET_TSC_CTL(val) set_tsc_mode((val))
  828. extern int get_tsc_mode(unsigned long adr);
  829. extern int set_tsc_mode(unsigned int val);
  830. extern int amd_get_nb_id(int cpu);
  831. struct aperfmperf {
  832. u64 aperf, mperf;
  833. };
  834. static inline void get_aperfmperf(struct aperfmperf *am)
  835. {
  836. WARN_ON_ONCE(!boot_cpu_has(X86_FEATURE_APERFMPERF));
  837. rdmsrl(MSR_IA32_APERF, am->aperf);
  838. rdmsrl(MSR_IA32_MPERF, am->mperf);
  839. }
  840. #define APERFMPERF_SHIFT 10
  841. static inline
  842. unsigned long calc_aperfmperf_ratio(struct aperfmperf *old,
  843. struct aperfmperf *new)
  844. {
  845. u64 aperf = new->aperf - old->aperf;
  846. u64 mperf = new->mperf - old->mperf;
  847. unsigned long ratio = aperf;
  848. mperf >>= APERFMPERF_SHIFT;
  849. if (mperf)
  850. ratio = div64_u64(aperf, mperf);
  851. return ratio;
  852. }
  853. /*
  854. * AMD errata checking
  855. */
  856. #ifdef CONFIG_CPU_SUP_AMD
  857. extern const int amd_erratum_383[];
  858. extern const int amd_erratum_400[];
  859. extern bool cpu_has_amd_erratum(const int *);
  860. #define AMD_LEGACY_ERRATUM(...) { -1, __VA_ARGS__, 0 }
  861. #define AMD_OSVW_ERRATUM(osvw_id, ...) { osvw_id, __VA_ARGS__, 0 }
  862. #define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \
  863. ((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end))
  864. #define AMD_MODEL_RANGE_FAMILY(range) (((range) >> 24) & 0xff)
  865. #define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff)
  866. #define AMD_MODEL_RANGE_END(range) ((range) & 0xfff)
  867. #else
  868. #define cpu_has_amd_erratum(x) (false)
  869. #endif /* CONFIG_CPU_SUP_AMD */
  870. #endif /* _ASM_X86_PROCESSOR_H */