processor_64.h 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156
  1. /*
  2. * Copyright (C) 1994 Linus Torvalds
  3. */
  4. #ifndef __ASM_X86_64_PROCESSOR_H
  5. #define __ASM_X86_64_PROCESSOR_H
  6. #include <asm/segment.h>
  7. #include <asm/page.h>
  8. #include <asm/types.h>
  9. #include <asm/sigcontext.h>
  10. #include <asm/cpufeature.h>
  11. #include <linux/threads.h>
  12. #include <asm/msr.h>
  13. #include <asm/current.h>
  14. #include <asm/system.h>
  15. #include <asm/mmsegment.h>
  16. #include <linux/personality.h>
  17. #include <asm/desc_defs.h>
  18. extern char ignore_irq13;
  19. extern void identify_cpu(struct cpuinfo_x86 *);
  20. /*
  21. * User space process size. 47bits minus one guard page.
  22. */
  23. #define TASK_SIZE64 (0x800000000000UL - 4096)
  24. /* This decides where the kernel will search for a free chunk of vm
  25. * space during mmap's.
  26. */
  27. #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? 0xc0000000 : 0xFFFFe000)
  28. #define TASK_SIZE (test_thread_flag(TIF_IA32) ? IA32_PAGE_OFFSET : TASK_SIZE64)
  29. #define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_IA32)) ? IA32_PAGE_OFFSET : TASK_SIZE64)
  30. struct i387_fxsave_struct {
  31. u16 cwd;
  32. u16 swd;
  33. u16 twd;
  34. u16 fop;
  35. u64 rip;
  36. u64 rdp;
  37. u32 mxcsr;
  38. u32 mxcsr_mask;
  39. u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
  40. u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 256 bytes */
  41. u32 padding[24];
  42. } __attribute__ ((aligned (16)));
  43. union i387_union {
  44. struct i387_fxsave_struct fxsave;
  45. };
  46. /* Save the original ist values for checking stack pointers during debugging */
  47. struct orig_ist {
  48. unsigned long ist[7];
  49. };
  50. DECLARE_PER_CPU(struct orig_ist, orig_ist);
  51. #define INIT_THREAD { \
  52. .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
  53. }
  54. #define INIT_TSS { \
  55. .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
  56. }
  57. #define INIT_MMAP \
  58. { &init_mm, 0, 0, NULL, PAGE_SHARED, VM_READ | VM_WRITE | VM_EXEC, 1, NULL, NULL }
  59. #define start_thread(regs,new_rip,new_rsp) do { \
  60. asm volatile("movl %0,%%fs; movl %0,%%es; movl %0,%%ds": :"r" (0)); \
  61. load_gs_index(0); \
  62. (regs)->ip = (new_rip); \
  63. (regs)->sp = (new_rsp); \
  64. write_pda(oldrsp, (new_rsp)); \
  65. (regs)->cs = __USER_CS; \
  66. (regs)->ss = __USER_DS; \
  67. (regs)->flags = 0x200; \
  68. set_fs(USER_DS); \
  69. } while(0)
  70. /*
  71. * Return saved PC of a blocked thread.
  72. * What is this good for? it will be always the scheduler or ret_from_fork.
  73. */
  74. #define thread_saved_pc(t) (*(unsigned long *)((t)->thread.sp - 8))
  75. #define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1)
  76. #define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */
  77. #if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2)
  78. #define ASM_NOP1 P6_NOP1
  79. #define ASM_NOP2 P6_NOP2
  80. #define ASM_NOP3 P6_NOP3
  81. #define ASM_NOP4 P6_NOP4
  82. #define ASM_NOP5 P6_NOP5
  83. #define ASM_NOP6 P6_NOP6
  84. #define ASM_NOP7 P6_NOP7
  85. #define ASM_NOP8 P6_NOP8
  86. #else
  87. #define ASM_NOP1 K8_NOP1
  88. #define ASM_NOP2 K8_NOP2
  89. #define ASM_NOP3 K8_NOP3
  90. #define ASM_NOP4 K8_NOP4
  91. #define ASM_NOP5 K8_NOP5
  92. #define ASM_NOP6 K8_NOP6
  93. #define ASM_NOP7 K8_NOP7
  94. #define ASM_NOP8 K8_NOP8
  95. #endif
  96. /* Opteron nops */
  97. #define K8_NOP1 ".byte 0x90\n"
  98. #define K8_NOP2 ".byte 0x66,0x90\n"
  99. #define K8_NOP3 ".byte 0x66,0x66,0x90\n"
  100. #define K8_NOP4 ".byte 0x66,0x66,0x66,0x90\n"
  101. #define K8_NOP5 K8_NOP3 K8_NOP2
  102. #define K8_NOP6 K8_NOP3 K8_NOP3
  103. #define K8_NOP7 K8_NOP4 K8_NOP3
  104. #define K8_NOP8 K8_NOP4 K8_NOP4
  105. /* P6 nops */
  106. /* uses eax dependencies (Intel-recommended choice) */
  107. #define P6_NOP1 ".byte 0x90\n"
  108. #define P6_NOP2 ".byte 0x66,0x90\n"
  109. #define P6_NOP3 ".byte 0x0f,0x1f,0x00\n"
  110. #define P6_NOP4 ".byte 0x0f,0x1f,0x40,0\n"
  111. #define P6_NOP5 ".byte 0x0f,0x1f,0x44,0x00,0\n"
  112. #define P6_NOP6 ".byte 0x66,0x0f,0x1f,0x44,0x00,0\n"
  113. #define P6_NOP7 ".byte 0x0f,0x1f,0x80,0,0,0,0\n"
  114. #define P6_NOP8 ".byte 0x0f,0x1f,0x84,0x00,0,0,0,0\n"
  115. #define ASM_NOP_MAX 8
  116. static inline void prefetchw(void *x)
  117. {
  118. alternative_input("prefetcht0 (%1)",
  119. "prefetchw (%1)",
  120. X86_FEATURE_3DNOW,
  121. "r" (x));
  122. }
  123. #define stack_current() \
  124. ({ \
  125. struct thread_info *ti; \
  126. asm("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
  127. ti->task; \
  128. })
  129. #endif /* __ASM_X86_64_PROCESSOR_H */