interface.h 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188
  1. /******************************************************************************
  2. * arch-x86_32.h
  3. *
  4. * Guest OS interface to x86 32-bit Xen.
  5. *
  6. * Copyright (c) 2004, K A Fraser
  7. */
  8. #ifndef __XEN_PUBLIC_ARCH_X86_32_H__
  9. #define __XEN_PUBLIC_ARCH_X86_32_H__
  10. #ifdef __XEN__
  11. #define __DEFINE_GUEST_HANDLE(name, type) \
  12. typedef struct { type *p; } __guest_handle_ ## name
  13. #else
  14. #define __DEFINE_GUEST_HANDLE(name, type) \
  15. typedef type * __guest_handle_ ## name
  16. #endif
  17. #define DEFINE_GUEST_HANDLE_STRUCT(name) \
  18. __DEFINE_GUEST_HANDLE(name, struct name)
  19. #define DEFINE_GUEST_HANDLE(name) __DEFINE_GUEST_HANDLE(name, name)
  20. #define GUEST_HANDLE(name) __guest_handle_ ## name
  21. #ifndef __ASSEMBLY__
  22. /* Guest handles for primitive C types. */
  23. __DEFINE_GUEST_HANDLE(uchar, unsigned char);
  24. __DEFINE_GUEST_HANDLE(uint, unsigned int);
  25. __DEFINE_GUEST_HANDLE(ulong, unsigned long);
  26. DEFINE_GUEST_HANDLE(char);
  27. DEFINE_GUEST_HANDLE(int);
  28. DEFINE_GUEST_HANDLE(long);
  29. DEFINE_GUEST_HANDLE(void);
  30. #endif
  31. /*
  32. * SEGMENT DESCRIPTOR TABLES
  33. */
  34. /*
  35. * A number of GDT entries are reserved by Xen. These are not situated at the
  36. * start of the GDT because some stupid OSes export hard-coded selector values
  37. * in their ABI. These hard-coded values are always near the start of the GDT,
  38. * so Xen places itself out of the way, at the far end of the GDT.
  39. */
  40. #define FIRST_RESERVED_GDT_PAGE 14
  41. #define FIRST_RESERVED_GDT_BYTE (FIRST_RESERVED_GDT_PAGE * 4096)
  42. #define FIRST_RESERVED_GDT_ENTRY (FIRST_RESERVED_GDT_BYTE / 8)
  43. /*
  44. * These flat segments are in the Xen-private section of every GDT. Since these
  45. * are also present in the initial GDT, many OSes will be able to avoid
  46. * installing their own GDT.
  47. */
  48. #define FLAT_RING1_CS 0xe019 /* GDT index 259 */
  49. #define FLAT_RING1_DS 0xe021 /* GDT index 260 */
  50. #define FLAT_RING1_SS 0xe021 /* GDT index 260 */
  51. #define FLAT_RING3_CS 0xe02b /* GDT index 261 */
  52. #define FLAT_RING3_DS 0xe033 /* GDT index 262 */
  53. #define FLAT_RING3_SS 0xe033 /* GDT index 262 */
  54. #define FLAT_KERNEL_CS FLAT_RING1_CS
  55. #define FLAT_KERNEL_DS FLAT_RING1_DS
  56. #define FLAT_KERNEL_SS FLAT_RING1_SS
  57. #define FLAT_USER_CS FLAT_RING3_CS
  58. #define FLAT_USER_DS FLAT_RING3_DS
  59. #define FLAT_USER_SS FLAT_RING3_SS
  60. /* And the trap vector is... */
  61. #define TRAP_INSTR "int $0x82"
  62. /*
  63. * Virtual addresses beyond this are not modifiable by guest OSes. The
  64. * machine->physical mapping table starts at this address, read-only.
  65. */
  66. #ifdef CONFIG_X86_PAE
  67. #define __HYPERVISOR_VIRT_START 0xF5800000
  68. #else
  69. #define __HYPERVISOR_VIRT_START 0xFC000000
  70. #endif
  71. #ifndef HYPERVISOR_VIRT_START
  72. #define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START)
  73. #endif
  74. #ifndef machine_to_phys_mapping
  75. #define machine_to_phys_mapping ((unsigned long *)HYPERVISOR_VIRT_START)
  76. #endif
  77. /* Maximum number of virtual CPUs in multi-processor guests. */
  78. #define MAX_VIRT_CPUS 32
  79. #ifndef __ASSEMBLY__
  80. /*
  81. * Send an array of these to HYPERVISOR_set_trap_table()
  82. */
  83. #define TI_GET_DPL(_ti) ((_ti)->flags & 3)
  84. #define TI_GET_IF(_ti) ((_ti)->flags & 4)
  85. #define TI_SET_DPL(_ti, _dpl) ((_ti)->flags |= (_dpl))
  86. #define TI_SET_IF(_ti, _if) ((_ti)->flags |= ((!!(_if))<<2))
  87. struct trap_info {
  88. uint8_t vector; /* exception vector */
  89. uint8_t flags; /* 0-3: privilege level; 4: clear event enable? */
  90. uint16_t cs; /* code selector */
  91. unsigned long address; /* code offset */
  92. };
  93. DEFINE_GUEST_HANDLE_STRUCT(trap_info);
  94. struct cpu_user_regs {
  95. uint32_t ebx;
  96. uint32_t ecx;
  97. uint32_t edx;
  98. uint32_t esi;
  99. uint32_t edi;
  100. uint32_t ebp;
  101. uint32_t eax;
  102. uint16_t error_code; /* private */
  103. uint16_t entry_vector; /* private */
  104. uint32_t eip;
  105. uint16_t cs;
  106. uint8_t saved_upcall_mask;
  107. uint8_t _pad0;
  108. uint32_t eflags; /* eflags.IF == !saved_upcall_mask */
  109. uint32_t esp;
  110. uint16_t ss, _pad1;
  111. uint16_t es, _pad2;
  112. uint16_t ds, _pad3;
  113. uint16_t fs, _pad4;
  114. uint16_t gs, _pad5;
  115. };
  116. DEFINE_GUEST_HANDLE_STRUCT(cpu_user_regs);
  117. typedef uint64_t tsc_timestamp_t; /* RDTSC timestamp */
  118. /*
  119. * The following is all CPU context. Note that the fpu_ctxt block is filled
  120. * in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used.
  121. */
  122. struct vcpu_guest_context {
  123. /* FPU registers come first so they can be aligned for FXSAVE/FXRSTOR. */
  124. struct { char x[512]; } fpu_ctxt; /* User-level FPU registers */
  125. #define VGCF_I387_VALID (1<<0)
  126. #define VGCF_HVM_GUEST (1<<1)
  127. #define VGCF_IN_KERNEL (1<<2)
  128. unsigned long flags; /* VGCF_* flags */
  129. struct cpu_user_regs user_regs; /* User-level CPU registers */
  130. struct trap_info trap_ctxt[256]; /* Virtual IDT */
  131. unsigned long ldt_base, ldt_ents; /* LDT (linear address, # ents) */
  132. unsigned long gdt_frames[16], gdt_ents; /* GDT (machine frames, # ents) */
  133. unsigned long kernel_ss, kernel_sp; /* Virtual TSS (only SS1/SP1) */
  134. unsigned long ctrlreg[8]; /* CR0-CR7 (control registers) */
  135. unsigned long debugreg[8]; /* DB0-DB7 (debug registers) */
  136. unsigned long event_callback_cs; /* CS:EIP of event callback */
  137. unsigned long event_callback_eip;
  138. unsigned long failsafe_callback_cs; /* CS:EIP of failsafe callback */
  139. unsigned long failsafe_callback_eip;
  140. unsigned long vm_assist; /* VMASST_TYPE_* bitmap */
  141. };
  142. DEFINE_GUEST_HANDLE_STRUCT(vcpu_guest_context);
  143. struct arch_shared_info {
  144. unsigned long max_pfn; /* max pfn that appears in table */
  145. /* Frame containing list of mfns containing list of mfns containing p2m. */
  146. unsigned long pfn_to_mfn_frame_list_list;
  147. unsigned long nmi_reason;
  148. };
  149. struct arch_vcpu_info {
  150. unsigned long cr2;
  151. unsigned long pad[5]; /* sizeof(struct vcpu_info) == 64 */
  152. };
  153. #endif /* !__ASSEMBLY__ */
  154. /*
  155. * Prefix forces emulation of some non-trapping instructions.
  156. * Currently only CPUID.
  157. */
  158. #ifdef __ASSEMBLY__
  159. #define XEN_EMULATE_PREFIX .byte 0x0f,0x0b,0x78,0x65,0x6e ;
  160. #define XEN_CPUID XEN_EMULATE_PREFIX cpuid
  161. #else
  162. #define XEN_EMULATE_PREFIX ".byte 0x0f,0x0b,0x78,0x65,0x6e ; "
  163. #define XEN_CPUID XEN_EMULATE_PREFIX "cpuid"
  164. #endif
  165. #endif