paravirt.h 42 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560
  1. #ifndef __ASM_PARAVIRT_H
  2. #define __ASM_PARAVIRT_H
  3. /* Various instructions on x86 need to be replaced for
  4. * para-virtualization: those hooks are defined here. */
  5. #ifdef CONFIG_PARAVIRT
  6. #include <asm/page.h>
  7. #include <asm/asm.h>
  8. /* Bitmask of what can be clobbered: usually at least eax. */
  9. #define CLBR_NONE 0
  10. #define CLBR_EAX (1 << 0)
  11. #define CLBR_ECX (1 << 1)
  12. #define CLBR_EDX (1 << 2)
  13. #ifdef CONFIG_X86_64
  14. #define CLBR_RSI (1 << 3)
  15. #define CLBR_RDI (1 << 4)
  16. #define CLBR_R8 (1 << 5)
  17. #define CLBR_R9 (1 << 6)
  18. #define CLBR_R10 (1 << 7)
  19. #define CLBR_R11 (1 << 8)
  20. #define CLBR_ANY ((1 << 9) - 1)
  21. #include <asm/desc_defs.h>
  22. #else
  23. /* CLBR_ANY should match all regs platform has. For i386, that's just it */
  24. #define CLBR_ANY ((1 << 3) - 1)
  25. #endif /* X86_64 */
  26. #ifndef __ASSEMBLY__
  27. #include <linux/types.h>
  28. #include <linux/cpumask.h>
  29. #include <asm/kmap_types.h>
  30. #include <asm/desc_defs.h>
  31. struct page;
  32. struct thread_struct;
  33. struct desc_ptr;
  34. struct tss_struct;
  35. struct mm_struct;
  36. struct desc_struct;
  37. /* general info */
  38. struct pv_info {
  39. unsigned int kernel_rpl;
  40. int shared_kernel_pmd;
  41. int paravirt_enabled;
  42. const char *name;
  43. };
  44. struct pv_init_ops {
  45. /*
  46. * Patch may replace one of the defined code sequences with
  47. * arbitrary code, subject to the same register constraints.
  48. * This generally means the code is not free to clobber any
  49. * registers other than EAX. The patch function should return
  50. * the number of bytes of code generated, as we nop pad the
  51. * rest in generic code.
  52. */
  53. unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
  54. unsigned long addr, unsigned len);
  55. /* Basic arch-specific setup */
  56. void (*arch_setup)(void);
  57. char *(*memory_setup)(void);
  58. void (*post_allocator_init)(void);
  59. /* Print a banner to identify the environment */
  60. void (*banner)(void);
  61. };
  62. struct pv_lazy_ops {
  63. /* Set deferred update mode, used for batching operations. */
  64. void (*enter)(void);
  65. void (*leave)(void);
  66. };
  67. struct pv_time_ops {
  68. void (*time_init)(void);
  69. /* Set and set time of day */
  70. unsigned long (*get_wallclock)(void);
  71. int (*set_wallclock)(unsigned long);
  72. unsigned long long (*sched_clock)(void);
  73. unsigned long (*get_cpu_khz)(void);
  74. };
  75. struct pv_cpu_ops {
  76. /* hooks for various privileged instructions */
  77. unsigned long (*get_debugreg)(int regno);
  78. void (*set_debugreg)(int regno, unsigned long value);
  79. void (*clts)(void);
  80. unsigned long (*read_cr0)(void);
  81. void (*write_cr0)(unsigned long);
  82. unsigned long (*read_cr4_safe)(void);
  83. unsigned long (*read_cr4)(void);
  84. void (*write_cr4)(unsigned long);
  85. #ifdef CONFIG_X86_64
  86. unsigned long (*read_cr8)(void);
  87. void (*write_cr8)(unsigned long);
  88. #endif
  89. /* Segment descriptor handling */
  90. void (*load_tr_desc)(void);
  91. void (*load_gdt)(const struct desc_ptr *);
  92. void (*load_idt)(const struct desc_ptr *);
  93. void (*store_gdt)(struct desc_ptr *);
  94. void (*store_idt)(struct desc_ptr *);
  95. void (*set_ldt)(const void *desc, unsigned entries);
  96. unsigned long (*store_tr)(void);
  97. void (*load_tls)(struct thread_struct *t, unsigned int cpu);
  98. void (*write_ldt_entry)(struct desc_struct *ldt, int entrynum,
  99. const void *desc);
  100. void (*write_gdt_entry)(struct desc_struct *,
  101. int entrynum, const void *desc, int size);
  102. void (*write_idt_entry)(gate_desc *,
  103. int entrynum, const gate_desc *gate);
  104. void (*load_sp0)(struct tss_struct *tss, struct thread_struct *t);
  105. void (*set_iopl_mask)(unsigned mask);
  106. void (*wbinvd)(void);
  107. void (*io_delay)(void);
  108. /* cpuid emulation, mostly so that caps bits can be disabled */
  109. void (*cpuid)(unsigned int *eax, unsigned int *ebx,
  110. unsigned int *ecx, unsigned int *edx);
  111. /* MSR, PMC and TSR operations.
  112. err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */
  113. u64 (*read_msr)(unsigned int msr, int *err);
  114. int (*write_msr)(unsigned int msr, unsigned low, unsigned high);
  115. u64 (*read_tsc)(void);
  116. u64 (*read_pmc)(int counter);
  117. unsigned long long (*read_tscp)(unsigned int *aux);
  118. /*
  119. * Atomically enable interrupts and return to userspace. This
  120. * is only ever used to return to 32-bit processes; in a
  121. * 64-bit kernel, it's used for 32-on-64 compat processes, but
  122. * never native 64-bit processes. (Jump, not call.)
  123. */
  124. void (*irq_enable_sysexit)(void);
  125. /*
  126. * Switch to usermode gs and return to 64-bit usermode using
  127. * sysret. Only used in 64-bit kernels to return to 64-bit
  128. * processes. Usermode register state, including %rsp, must
  129. * already be restored.
  130. */
  131. void (*usergs_sysret64)(void);
  132. /*
  133. * Switch to usermode gs and return to 32-bit usermode using
  134. * sysret. Used to return to 32-on-64 compat processes.
  135. * Other usermode register state, including %esp, must already
  136. * be restored.
  137. */
  138. void (*usergs_sysret32)(void);
  139. /* Normal iret. Jump to this with the standard iret stack
  140. frame set up. */
  141. void (*iret)(void);
  142. void (*swapgs)(void);
  143. struct pv_lazy_ops lazy_mode;
  144. };
  145. struct pv_irq_ops {
  146. void (*init_IRQ)(void);
  147. /*
  148. * Get/set interrupt state. save_fl and restore_fl are only
  149. * expected to use X86_EFLAGS_IF; all other bits
  150. * returned from save_fl are undefined, and may be ignored by
  151. * restore_fl.
  152. */
  153. unsigned long (*save_fl)(void);
  154. void (*restore_fl)(unsigned long);
  155. void (*irq_disable)(void);
  156. void (*irq_enable)(void);
  157. void (*safe_halt)(void);
  158. void (*halt)(void);
  159. };
  160. struct pv_apic_ops {
  161. #ifdef CONFIG_X86_LOCAL_APIC
  162. /*
  163. * Direct APIC operations, principally for VMI. Ideally
  164. * these shouldn't be in this interface.
  165. */
  166. void (*apic_write)(unsigned long reg, u32 v);
  167. void (*apic_write_atomic)(unsigned long reg, u32 v);
  168. u32 (*apic_read)(unsigned long reg);
  169. void (*setup_boot_clock)(void);
  170. void (*setup_secondary_clock)(void);
  171. void (*startup_ipi_hook)(int phys_apicid,
  172. unsigned long start_eip,
  173. unsigned long start_esp);
  174. #endif
  175. };
  176. struct pv_mmu_ops {
  177. /*
  178. * Called before/after init_mm pagetable setup. setup_start
  179. * may reset %cr3, and may pre-install parts of the pagetable;
  180. * pagetable setup is expected to preserve any existing
  181. * mapping.
  182. */
  183. void (*pagetable_setup_start)(pgd_t *pgd_base);
  184. void (*pagetable_setup_done)(pgd_t *pgd_base);
  185. unsigned long (*read_cr2)(void);
  186. void (*write_cr2)(unsigned long);
  187. unsigned long (*read_cr3)(void);
  188. void (*write_cr3)(unsigned long);
  189. /*
  190. * Hooks for intercepting the creation/use/destruction of an
  191. * mm_struct.
  192. */
  193. void (*activate_mm)(struct mm_struct *prev,
  194. struct mm_struct *next);
  195. void (*dup_mmap)(struct mm_struct *oldmm,
  196. struct mm_struct *mm);
  197. void (*exit_mmap)(struct mm_struct *mm);
  198. /* TLB operations */
  199. void (*flush_tlb_user)(void);
  200. void (*flush_tlb_kernel)(void);
  201. void (*flush_tlb_single)(unsigned long addr);
  202. void (*flush_tlb_others)(const cpumask_t *cpus, struct mm_struct *mm,
  203. unsigned long va);
  204. /* Hooks for allocating and freeing a pagetable top-level */
  205. int (*pgd_alloc)(struct mm_struct *mm);
  206. void (*pgd_free)(struct mm_struct *mm, pgd_t *pgd);
  207. /*
  208. * Hooks for allocating/releasing pagetable pages when they're
  209. * attached to a pagetable
  210. */
  211. void (*alloc_pte)(struct mm_struct *mm, u32 pfn);
  212. void (*alloc_pmd)(struct mm_struct *mm, u32 pfn);
  213. void (*alloc_pmd_clone)(u32 pfn, u32 clonepfn, u32 start, u32 count);
  214. void (*alloc_pud)(struct mm_struct *mm, u32 pfn);
  215. void (*release_pte)(u32 pfn);
  216. void (*release_pmd)(u32 pfn);
  217. void (*release_pud)(u32 pfn);
  218. /* Pagetable manipulation functions */
  219. void (*set_pte)(pte_t *ptep, pte_t pteval);
  220. void (*set_pte_at)(struct mm_struct *mm, unsigned long addr,
  221. pte_t *ptep, pte_t pteval);
  222. void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval);
  223. void (*pte_update)(struct mm_struct *mm, unsigned long addr,
  224. pte_t *ptep);
  225. void (*pte_update_defer)(struct mm_struct *mm,
  226. unsigned long addr, pte_t *ptep);
  227. pte_t (*ptep_modify_prot_start)(struct mm_struct *mm, unsigned long addr,
  228. pte_t *ptep);
  229. void (*ptep_modify_prot_commit)(struct mm_struct *mm, unsigned long addr,
  230. pte_t *ptep, pte_t pte);
  231. pteval_t (*pte_val)(pte_t);
  232. pteval_t (*pte_flags)(pte_t);
  233. pte_t (*make_pte)(pteval_t pte);
  234. pgdval_t (*pgd_val)(pgd_t);
  235. pgd_t (*make_pgd)(pgdval_t pgd);
  236. #if PAGETABLE_LEVELS >= 3
  237. #ifdef CONFIG_X86_PAE
  238. void (*set_pte_atomic)(pte_t *ptep, pte_t pteval);
  239. void (*set_pte_present)(struct mm_struct *mm, unsigned long addr,
  240. pte_t *ptep, pte_t pte);
  241. void (*pte_clear)(struct mm_struct *mm, unsigned long addr,
  242. pte_t *ptep);
  243. void (*pmd_clear)(pmd_t *pmdp);
  244. #endif /* CONFIG_X86_PAE */
  245. void (*set_pud)(pud_t *pudp, pud_t pudval);
  246. pmdval_t (*pmd_val)(pmd_t);
  247. pmd_t (*make_pmd)(pmdval_t pmd);
  248. #if PAGETABLE_LEVELS == 4
  249. pudval_t (*pud_val)(pud_t);
  250. pud_t (*make_pud)(pudval_t pud);
  251. void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
  252. #endif /* PAGETABLE_LEVELS == 4 */
  253. #endif /* PAGETABLE_LEVELS >= 3 */
  254. #ifdef CONFIG_HIGHPTE
  255. void *(*kmap_atomic_pte)(struct page *page, enum km_type type);
  256. #endif
  257. struct pv_lazy_ops lazy_mode;
  258. /* dom0 ops */
  259. /* Sometimes the physical address is a pfn, and sometimes its
  260. an mfn. We can tell which is which from the index. */
  261. void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
  262. unsigned long phys, pgprot_t flags);
  263. };
  264. /* This contains all the paravirt structures: we get a convenient
  265. * number for each function using the offset which we use to indicate
  266. * what to patch. */
  267. struct paravirt_patch_template {
  268. struct pv_init_ops pv_init_ops;
  269. struct pv_time_ops pv_time_ops;
  270. struct pv_cpu_ops pv_cpu_ops;
  271. struct pv_irq_ops pv_irq_ops;
  272. struct pv_apic_ops pv_apic_ops;
  273. struct pv_mmu_ops pv_mmu_ops;
  274. };
  275. extern struct pv_info pv_info;
  276. extern struct pv_init_ops pv_init_ops;
  277. extern struct pv_time_ops pv_time_ops;
  278. extern struct pv_cpu_ops pv_cpu_ops;
  279. extern struct pv_irq_ops pv_irq_ops;
  280. extern struct pv_apic_ops pv_apic_ops;
  281. extern struct pv_mmu_ops pv_mmu_ops;
  282. #define PARAVIRT_PATCH(x) \
  283. (offsetof(struct paravirt_patch_template, x) / sizeof(void *))
  284. #define paravirt_type(op) \
  285. [paravirt_typenum] "i" (PARAVIRT_PATCH(op)), \
  286. [paravirt_opptr] "m" (op)
  287. #define paravirt_clobber(clobber) \
  288. [paravirt_clobber] "i" (clobber)
  289. /*
  290. * Generate some code, and mark it as patchable by the
  291. * apply_paravirt() alternate instruction patcher.
  292. */
  293. #define _paravirt_alt(insn_string, type, clobber) \
  294. "771:\n\t" insn_string "\n" "772:\n" \
  295. ".pushsection .parainstructions,\"a\"\n" \
  296. _ASM_ALIGN "\n" \
  297. _ASM_PTR " 771b\n" \
  298. " .byte " type "\n" \
  299. " .byte 772b-771b\n" \
  300. " .short " clobber "\n" \
  301. ".popsection\n"
  302. /* Generate patchable code, with the default asm parameters. */
  303. #define paravirt_alt(insn_string) \
  304. _paravirt_alt(insn_string, "%c[paravirt_typenum]", "%c[paravirt_clobber]")
  305. /* Simple instruction patching code. */
  306. #define DEF_NATIVE(ops, name, code) \
  307. extern const char start_##ops##_##name[], end_##ops##_##name[]; \
  308. asm("start_" #ops "_" #name ": " code "; end_" #ops "_" #name ":")
  309. unsigned paravirt_patch_nop(void);
  310. unsigned paravirt_patch_ignore(unsigned len);
  311. unsigned paravirt_patch_call(void *insnbuf,
  312. const void *target, u16 tgt_clobbers,
  313. unsigned long addr, u16 site_clobbers,
  314. unsigned len);
  315. unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
  316. unsigned long addr, unsigned len);
  317. unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
  318. unsigned long addr, unsigned len);
  319. unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
  320. const char *start, const char *end);
  321. unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
  322. unsigned long addr, unsigned len);
  323. int paravirt_disable_iospace(void);
  324. /*
  325. * This generates an indirect call based on the operation type number.
  326. * The type number, computed in PARAVIRT_PATCH, is derived from the
  327. * offset into the paravirt_patch_template structure, and can therefore be
  328. * freely converted back into a structure offset.
  329. */
  330. #define PARAVIRT_CALL "call *%[paravirt_opptr];"
  331. /*
  332. * These macros are intended to wrap calls through one of the paravirt
  333. * ops structs, so that they can be later identified and patched at
  334. * runtime.
  335. *
  336. * Normally, a call to a pv_op function is a simple indirect call:
  337. * (pv_op_struct.operations)(args...).
  338. *
  339. * Unfortunately, this is a relatively slow operation for modern CPUs,
  340. * because it cannot necessarily determine what the destination
  341. * address is. In this case, the address is a runtime constant, so at
  342. * the very least we can patch the call to e a simple direct call, or
  343. * ideally, patch an inline implementation into the callsite. (Direct
  344. * calls are essentially free, because the call and return addresses
  345. * are completely predictable.)
  346. *
  347. * For i386, these macros rely on the standard gcc "regparm(3)" calling
  348. * convention, in which the first three arguments are placed in %eax,
  349. * %edx, %ecx (in that order), and the remaining arguments are placed
  350. * on the stack. All caller-save registers (eax,edx,ecx) are expected
  351. * to be modified (either clobbered or used for return values).
  352. * X86_64, on the other hand, already specifies a register-based calling
  353. * conventions, returning at %rax, with parameteres going on %rdi, %rsi,
  354. * %rdx, and %rcx. Note that for this reason, x86_64 does not need any
  355. * special handling for dealing with 4 arguments, unlike i386.
  356. * However, x86_64 also have to clobber all caller saved registers, which
  357. * unfortunately, are quite a bit (r8 - r11)
  358. *
  359. * The call instruction itself is marked by placing its start address
  360. * and size into the .parainstructions section, so that
  361. * apply_paravirt() in arch/i386/kernel/alternative.c can do the
  362. * appropriate patching under the control of the backend pv_init_ops
  363. * implementation.
  364. *
  365. * Unfortunately there's no way to get gcc to generate the args setup
  366. * for the call, and then allow the call itself to be generated by an
  367. * inline asm. Because of this, we must do the complete arg setup and
  368. * return value handling from within these macros. This is fairly
  369. * cumbersome.
  370. *
  371. * There are 5 sets of PVOP_* macros for dealing with 0-4 arguments.
  372. * It could be extended to more arguments, but there would be little
  373. * to be gained from that. For each number of arguments, there are
  374. * the two VCALL and CALL variants for void and non-void functions.
  375. *
  376. * When there is a return value, the invoker of the macro must specify
  377. * the return type. The macro then uses sizeof() on that type to
  378. * determine whether its a 32 or 64 bit value, and places the return
  379. * in the right register(s) (just %eax for 32-bit, and %edx:%eax for
  380. * 64-bit). For x86_64 machines, it just returns at %rax regardless of
  381. * the return value size.
  382. *
  383. * 64-bit arguments are passed as a pair of adjacent 32-bit arguments
  384. * i386 also passes 64-bit arguments as a pair of adjacent 32-bit arguments
  385. * in low,high order
  386. *
  387. * Small structures are passed and returned in registers. The macro
  388. * calling convention can't directly deal with this, so the wrapper
  389. * functions must do this.
  390. *
  391. * These PVOP_* macros are only defined within this header. This
  392. * means that all uses must be wrapped in inline functions. This also
  393. * makes sure the incoming and outgoing types are always correct.
  394. */
  395. #ifdef CONFIG_X86_32
  396. #define PVOP_VCALL_ARGS unsigned long __eax, __edx, __ecx
  397. #define PVOP_CALL_ARGS PVOP_VCALL_ARGS
  398. #define PVOP_VCALL_CLOBBERS "=a" (__eax), "=d" (__edx), \
  399. "=c" (__ecx)
  400. #define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS
  401. #define EXTRA_CLOBBERS
  402. #define VEXTRA_CLOBBERS
  403. #else
  404. #define PVOP_VCALL_ARGS unsigned long __edi, __esi, __edx, __ecx
  405. #define PVOP_CALL_ARGS PVOP_VCALL_ARGS, __eax
  406. #define PVOP_VCALL_CLOBBERS "=D" (__edi), \
  407. "=S" (__esi), "=d" (__edx), \
  408. "=c" (__ecx)
  409. #define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS, "=a" (__eax)
  410. #define EXTRA_CLOBBERS , "r8", "r9", "r10", "r11"
  411. #define VEXTRA_CLOBBERS , "rax", "r8", "r9", "r10", "r11"
  412. #endif
  413. #ifdef CONFIG_PARAVIRT_DEBUG
  414. #define PVOP_TEST_NULL(op) BUG_ON(op == NULL)
  415. #else
  416. #define PVOP_TEST_NULL(op) ((void)op)
  417. #endif
  418. #define __PVOP_CALL(rettype, op, pre, post, ...) \
  419. ({ \
  420. rettype __ret; \
  421. PVOP_CALL_ARGS; \
  422. PVOP_TEST_NULL(op); \
  423. /* This is 32-bit specific, but is okay in 64-bit */ \
  424. /* since this condition will never hold */ \
  425. if (sizeof(rettype) > sizeof(unsigned long)) { \
  426. asm volatile(pre \
  427. paravirt_alt(PARAVIRT_CALL) \
  428. post \
  429. : PVOP_CALL_CLOBBERS \
  430. : paravirt_type(op), \
  431. paravirt_clobber(CLBR_ANY), \
  432. ##__VA_ARGS__ \
  433. : "memory", "cc" EXTRA_CLOBBERS); \
  434. __ret = (rettype)((((u64)__edx) << 32) | __eax); \
  435. } else { \
  436. asm volatile(pre \
  437. paravirt_alt(PARAVIRT_CALL) \
  438. post \
  439. : PVOP_CALL_CLOBBERS \
  440. : paravirt_type(op), \
  441. paravirt_clobber(CLBR_ANY), \
  442. ##__VA_ARGS__ \
  443. : "memory", "cc" EXTRA_CLOBBERS); \
  444. __ret = (rettype)__eax; \
  445. } \
  446. __ret; \
  447. })
  448. #define __PVOP_VCALL(op, pre, post, ...) \
  449. ({ \
  450. PVOP_VCALL_ARGS; \
  451. PVOP_TEST_NULL(op); \
  452. asm volatile(pre \
  453. paravirt_alt(PARAVIRT_CALL) \
  454. post \
  455. : PVOP_VCALL_CLOBBERS \
  456. : paravirt_type(op), \
  457. paravirt_clobber(CLBR_ANY), \
  458. ##__VA_ARGS__ \
  459. : "memory", "cc" VEXTRA_CLOBBERS); \
  460. })
  461. #define PVOP_CALL0(rettype, op) \
  462. __PVOP_CALL(rettype, op, "", "")
  463. #define PVOP_VCALL0(op) \
  464. __PVOP_VCALL(op, "", "")
  465. #define PVOP_CALL1(rettype, op, arg1) \
  466. __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)))
  467. #define PVOP_VCALL1(op, arg1) \
  468. __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)))
  469. #define PVOP_CALL2(rettype, op, arg1, arg2) \
  470. __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)), \
  471. "1" ((unsigned long)(arg2)))
  472. #define PVOP_VCALL2(op, arg1, arg2) \
  473. __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)), \
  474. "1" ((unsigned long)(arg2)))
  475. #define PVOP_CALL3(rettype, op, arg1, arg2, arg3) \
  476. __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)), \
  477. "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)))
  478. #define PVOP_VCALL3(op, arg1, arg2, arg3) \
  479. __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)), \
  480. "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)))
  481. /* This is the only difference in x86_64. We can make it much simpler */
  482. #ifdef CONFIG_X86_32
  483. #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
  484. __PVOP_CALL(rettype, op, \
  485. "push %[_arg4];", "lea 4(%%esp),%%esp;", \
  486. "0" ((u32)(arg1)), "1" ((u32)(arg2)), \
  487. "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
  488. #define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
  489. __PVOP_VCALL(op, \
  490. "push %[_arg4];", "lea 4(%%esp),%%esp;", \
  491. "0" ((u32)(arg1)), "1" ((u32)(arg2)), \
  492. "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
  493. #else
  494. #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
  495. __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)), \
  496. "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)), \
  497. "3"((unsigned long)(arg4)))
  498. #define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
  499. __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)), \
  500. "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)), \
  501. "3"((unsigned long)(arg4)))
  502. #endif
  503. static inline int paravirt_enabled(void)
  504. {
  505. return pv_info.paravirt_enabled;
  506. }
  507. static inline void load_sp0(struct tss_struct *tss,
  508. struct thread_struct *thread)
  509. {
  510. PVOP_VCALL2(pv_cpu_ops.load_sp0, tss, thread);
  511. }
  512. #define ARCH_SETUP pv_init_ops.arch_setup();
  513. static inline unsigned long get_wallclock(void)
  514. {
  515. return PVOP_CALL0(unsigned long, pv_time_ops.get_wallclock);
  516. }
  517. static inline int set_wallclock(unsigned long nowtime)
  518. {
  519. return PVOP_CALL1(int, pv_time_ops.set_wallclock, nowtime);
  520. }
  521. static inline void (*choose_time_init(void))(void)
  522. {
  523. return pv_time_ops.time_init;
  524. }
  525. /* The paravirtualized CPUID instruction. */
  526. static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
  527. unsigned int *ecx, unsigned int *edx)
  528. {
  529. PVOP_VCALL4(pv_cpu_ops.cpuid, eax, ebx, ecx, edx);
  530. }
  531. /*
  532. * These special macros can be used to get or set a debugging register
  533. */
  534. static inline unsigned long paravirt_get_debugreg(int reg)
  535. {
  536. return PVOP_CALL1(unsigned long, pv_cpu_ops.get_debugreg, reg);
  537. }
  538. #define get_debugreg(var, reg) var = paravirt_get_debugreg(reg)
  539. static inline void set_debugreg(unsigned long val, int reg)
  540. {
  541. PVOP_VCALL2(pv_cpu_ops.set_debugreg, reg, val);
  542. }
  543. static inline void clts(void)
  544. {
  545. PVOP_VCALL0(pv_cpu_ops.clts);
  546. }
  547. static inline unsigned long read_cr0(void)
  548. {
  549. return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr0);
  550. }
  551. static inline void write_cr0(unsigned long x)
  552. {
  553. PVOP_VCALL1(pv_cpu_ops.write_cr0, x);
  554. }
  555. static inline unsigned long read_cr2(void)
  556. {
  557. return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr2);
  558. }
  559. static inline void write_cr2(unsigned long x)
  560. {
  561. PVOP_VCALL1(pv_mmu_ops.write_cr2, x);
  562. }
  563. static inline unsigned long read_cr3(void)
  564. {
  565. return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr3);
  566. }
  567. static inline void write_cr3(unsigned long x)
  568. {
  569. PVOP_VCALL1(pv_mmu_ops.write_cr3, x);
  570. }
  571. static inline unsigned long read_cr4(void)
  572. {
  573. return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4);
  574. }
  575. static inline unsigned long read_cr4_safe(void)
  576. {
  577. return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4_safe);
  578. }
  579. static inline void write_cr4(unsigned long x)
  580. {
  581. PVOP_VCALL1(pv_cpu_ops.write_cr4, x);
  582. }
  583. #ifdef CONFIG_X86_64
  584. static inline unsigned long read_cr8(void)
  585. {
  586. return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr8);
  587. }
  588. static inline void write_cr8(unsigned long x)
  589. {
  590. PVOP_VCALL1(pv_cpu_ops.write_cr8, x);
  591. }
  592. #endif
  593. static inline void raw_safe_halt(void)
  594. {
  595. PVOP_VCALL0(pv_irq_ops.safe_halt);
  596. }
  597. static inline void halt(void)
  598. {
  599. PVOP_VCALL0(pv_irq_ops.safe_halt);
  600. }
  601. static inline void wbinvd(void)
  602. {
  603. PVOP_VCALL0(pv_cpu_ops.wbinvd);
  604. }
  605. #define get_kernel_rpl() (pv_info.kernel_rpl)
  606. static inline u64 paravirt_read_msr(unsigned msr, int *err)
  607. {
  608. return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err);
  609. }
  610. static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high)
  611. {
  612. return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high);
  613. }
  614. /* These should all do BUG_ON(_err), but our headers are too tangled. */
  615. #define rdmsr(msr, val1, val2) \
  616. do { \
  617. int _err; \
  618. u64 _l = paravirt_read_msr(msr, &_err); \
  619. val1 = (u32)_l; \
  620. val2 = _l >> 32; \
  621. } while (0)
  622. #define wrmsr(msr, val1, val2) \
  623. do { \
  624. paravirt_write_msr(msr, val1, val2); \
  625. } while (0)
  626. #define rdmsrl(msr, val) \
  627. do { \
  628. int _err; \
  629. val = paravirt_read_msr(msr, &_err); \
  630. } while (0)
  631. #define wrmsrl(msr, val) wrmsr(msr, (u32)((u64)(val)), ((u64)(val))>>32)
  632. #define wrmsr_safe(msr, a, b) paravirt_write_msr(msr, a, b)
  633. /* rdmsr with exception handling */
  634. #define rdmsr_safe(msr, a, b) \
  635. ({ \
  636. int _err; \
  637. u64 _l = paravirt_read_msr(msr, &_err); \
  638. (*a) = (u32)_l; \
  639. (*b) = _l >> 32; \
  640. _err; \
  641. })
  642. static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
  643. {
  644. int err;
  645. *p = paravirt_read_msr(msr, &err);
  646. return err;
  647. }
  648. static inline u64 paravirt_read_tsc(void)
  649. {
  650. return PVOP_CALL0(u64, pv_cpu_ops.read_tsc);
  651. }
  652. #define rdtscl(low) \
  653. do { \
  654. u64 _l = paravirt_read_tsc(); \
  655. low = (int)_l; \
  656. } while (0)
  657. #define rdtscll(val) (val = paravirt_read_tsc())
  658. static inline unsigned long long paravirt_sched_clock(void)
  659. {
  660. return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock);
  661. }
  662. #define calculate_cpu_khz() (pv_time_ops.get_cpu_khz())
  663. static inline unsigned long long paravirt_read_pmc(int counter)
  664. {
  665. return PVOP_CALL1(u64, pv_cpu_ops.read_pmc, counter);
  666. }
  667. #define rdpmc(counter, low, high) \
  668. do { \
  669. u64 _l = paravirt_read_pmc(counter); \
  670. low = (u32)_l; \
  671. high = _l >> 32; \
  672. } while (0)
  673. static inline unsigned long long paravirt_rdtscp(unsigned int *aux)
  674. {
  675. return PVOP_CALL1(u64, pv_cpu_ops.read_tscp, aux);
  676. }
  677. #define rdtscp(low, high, aux) \
  678. do { \
  679. int __aux; \
  680. unsigned long __val = paravirt_rdtscp(&__aux); \
  681. (low) = (u32)__val; \
  682. (high) = (u32)(__val >> 32); \
  683. (aux) = __aux; \
  684. } while (0)
  685. #define rdtscpll(val, aux) \
  686. do { \
  687. unsigned long __aux; \
  688. val = paravirt_rdtscp(&__aux); \
  689. (aux) = __aux; \
  690. } while (0)
  691. static inline void load_TR_desc(void)
  692. {
  693. PVOP_VCALL0(pv_cpu_ops.load_tr_desc);
  694. }
  695. static inline void load_gdt(const struct desc_ptr *dtr)
  696. {
  697. PVOP_VCALL1(pv_cpu_ops.load_gdt, dtr);
  698. }
  699. static inline void load_idt(const struct desc_ptr *dtr)
  700. {
  701. PVOP_VCALL1(pv_cpu_ops.load_idt, dtr);
  702. }
  703. static inline void set_ldt(const void *addr, unsigned entries)
  704. {
  705. PVOP_VCALL2(pv_cpu_ops.set_ldt, addr, entries);
  706. }
  707. static inline void store_gdt(struct desc_ptr *dtr)
  708. {
  709. PVOP_VCALL1(pv_cpu_ops.store_gdt, dtr);
  710. }
  711. static inline void store_idt(struct desc_ptr *dtr)
  712. {
  713. PVOP_VCALL1(pv_cpu_ops.store_idt, dtr);
  714. }
  715. static inline unsigned long paravirt_store_tr(void)
  716. {
  717. return PVOP_CALL0(unsigned long, pv_cpu_ops.store_tr);
  718. }
  719. #define store_tr(tr) ((tr) = paravirt_store_tr())
  720. static inline void load_TLS(struct thread_struct *t, unsigned cpu)
  721. {
  722. PVOP_VCALL2(pv_cpu_ops.load_tls, t, cpu);
  723. }
  724. static inline void write_ldt_entry(struct desc_struct *dt, int entry,
  725. const void *desc)
  726. {
  727. PVOP_VCALL3(pv_cpu_ops.write_ldt_entry, dt, entry, desc);
  728. }
  729. static inline void write_gdt_entry(struct desc_struct *dt, int entry,
  730. void *desc, int type)
  731. {
  732. PVOP_VCALL4(pv_cpu_ops.write_gdt_entry, dt, entry, desc, type);
  733. }
  734. static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
  735. {
  736. PVOP_VCALL3(pv_cpu_ops.write_idt_entry, dt, entry, g);
  737. }
  738. static inline void set_iopl_mask(unsigned mask)
  739. {
  740. PVOP_VCALL1(pv_cpu_ops.set_iopl_mask, mask);
  741. }
  742. /* The paravirtualized I/O functions */
  743. static inline void slow_down_io(void)
  744. {
  745. pv_cpu_ops.io_delay();
  746. #ifdef REALLY_SLOW_IO
  747. pv_cpu_ops.io_delay();
  748. pv_cpu_ops.io_delay();
  749. pv_cpu_ops.io_delay();
  750. #endif
  751. }
  752. #ifdef CONFIG_X86_LOCAL_APIC
  753. /*
  754. * Basic functions accessing APICs.
  755. */
  756. static inline void apic_write(unsigned long reg, u32 v)
  757. {
  758. PVOP_VCALL2(pv_apic_ops.apic_write, reg, v);
  759. }
  760. static inline void apic_write_atomic(unsigned long reg, u32 v)
  761. {
  762. PVOP_VCALL2(pv_apic_ops.apic_write_atomic, reg, v);
  763. }
  764. static inline u32 apic_read(unsigned long reg)
  765. {
  766. return PVOP_CALL1(unsigned long, pv_apic_ops.apic_read, reg);
  767. }
  768. static inline void setup_boot_clock(void)
  769. {
  770. PVOP_VCALL0(pv_apic_ops.setup_boot_clock);
  771. }
  772. static inline void setup_secondary_clock(void)
  773. {
  774. PVOP_VCALL0(pv_apic_ops.setup_secondary_clock);
  775. }
  776. #endif
  777. static inline void paravirt_post_allocator_init(void)
  778. {
  779. if (pv_init_ops.post_allocator_init)
  780. (*pv_init_ops.post_allocator_init)();
  781. }
  782. static inline void paravirt_pagetable_setup_start(pgd_t *base)
  783. {
  784. (*pv_mmu_ops.pagetable_setup_start)(base);
  785. }
  786. static inline void paravirt_pagetable_setup_done(pgd_t *base)
  787. {
  788. (*pv_mmu_ops.pagetable_setup_done)(base);
  789. }
  790. #ifdef CONFIG_SMP
  791. static inline void startup_ipi_hook(int phys_apicid, unsigned long start_eip,
  792. unsigned long start_esp)
  793. {
  794. PVOP_VCALL3(pv_apic_ops.startup_ipi_hook,
  795. phys_apicid, start_eip, start_esp);
  796. }
  797. #endif
  798. static inline void paravirt_activate_mm(struct mm_struct *prev,
  799. struct mm_struct *next)
  800. {
  801. PVOP_VCALL2(pv_mmu_ops.activate_mm, prev, next);
  802. }
  803. static inline void arch_dup_mmap(struct mm_struct *oldmm,
  804. struct mm_struct *mm)
  805. {
  806. PVOP_VCALL2(pv_mmu_ops.dup_mmap, oldmm, mm);
  807. }
  808. static inline void arch_exit_mmap(struct mm_struct *mm)
  809. {
  810. PVOP_VCALL1(pv_mmu_ops.exit_mmap, mm);
  811. }
  812. static inline void __flush_tlb(void)
  813. {
  814. PVOP_VCALL0(pv_mmu_ops.flush_tlb_user);
  815. }
  816. static inline void __flush_tlb_global(void)
  817. {
  818. PVOP_VCALL0(pv_mmu_ops.flush_tlb_kernel);
  819. }
  820. static inline void __flush_tlb_single(unsigned long addr)
  821. {
  822. PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr);
  823. }
  824. static inline void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
  825. unsigned long va)
  826. {
  827. PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, &cpumask, mm, va);
  828. }
  829. static inline int paravirt_pgd_alloc(struct mm_struct *mm)
  830. {
  831. return PVOP_CALL1(int, pv_mmu_ops.pgd_alloc, mm);
  832. }
  833. static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd)
  834. {
  835. PVOP_VCALL2(pv_mmu_ops.pgd_free, mm, pgd);
  836. }
  837. static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned pfn)
  838. {
  839. PVOP_VCALL2(pv_mmu_ops.alloc_pte, mm, pfn);
  840. }
  841. static inline void paravirt_release_pte(unsigned pfn)
  842. {
  843. PVOP_VCALL1(pv_mmu_ops.release_pte, pfn);
  844. }
  845. static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned pfn)
  846. {
  847. PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn);
  848. }
  849. static inline void paravirt_alloc_pmd_clone(unsigned pfn, unsigned clonepfn,
  850. unsigned start, unsigned count)
  851. {
  852. PVOP_VCALL4(pv_mmu_ops.alloc_pmd_clone, pfn, clonepfn, start, count);
  853. }
  854. static inline void paravirt_release_pmd(unsigned pfn)
  855. {
  856. PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn);
  857. }
  858. static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned pfn)
  859. {
  860. PVOP_VCALL2(pv_mmu_ops.alloc_pud, mm, pfn);
  861. }
  862. static inline void paravirt_release_pud(unsigned pfn)
  863. {
  864. PVOP_VCALL1(pv_mmu_ops.release_pud, pfn);
  865. }
  866. #ifdef CONFIG_HIGHPTE
  867. static inline void *kmap_atomic_pte(struct page *page, enum km_type type)
  868. {
  869. unsigned long ret;
  870. ret = PVOP_CALL2(unsigned long, pv_mmu_ops.kmap_atomic_pte, page, type);
  871. return (void *)ret;
  872. }
  873. #endif
  874. static inline void pte_update(struct mm_struct *mm, unsigned long addr,
  875. pte_t *ptep)
  876. {
  877. PVOP_VCALL3(pv_mmu_ops.pte_update, mm, addr, ptep);
  878. }
  879. static inline void pte_update_defer(struct mm_struct *mm, unsigned long addr,
  880. pte_t *ptep)
  881. {
  882. PVOP_VCALL3(pv_mmu_ops.pte_update_defer, mm, addr, ptep);
  883. }
  884. static inline pte_t __pte(pteval_t val)
  885. {
  886. pteval_t ret;
  887. if (sizeof(pteval_t) > sizeof(long))
  888. ret = PVOP_CALL2(pteval_t,
  889. pv_mmu_ops.make_pte,
  890. val, (u64)val >> 32);
  891. else
  892. ret = PVOP_CALL1(pteval_t,
  893. pv_mmu_ops.make_pte,
  894. val);
  895. return (pte_t) { .pte = ret };
  896. }
  897. static inline pteval_t pte_val(pte_t pte)
  898. {
  899. pteval_t ret;
  900. if (sizeof(pteval_t) > sizeof(long))
  901. ret = PVOP_CALL2(pteval_t, pv_mmu_ops.pte_val,
  902. pte.pte, (u64)pte.pte >> 32);
  903. else
  904. ret = PVOP_CALL1(pteval_t, pv_mmu_ops.pte_val,
  905. pte.pte);
  906. return ret;
  907. }
  908. static inline pteval_t pte_flags(pte_t pte)
  909. {
  910. pteval_t ret;
  911. if (sizeof(pteval_t) > sizeof(long))
  912. ret = PVOP_CALL2(pteval_t, pv_mmu_ops.pte_flags,
  913. pte.pte, (u64)pte.pte >> 32);
  914. else
  915. ret = PVOP_CALL1(pteval_t, pv_mmu_ops.pte_flags,
  916. pte.pte);
  917. return ret;
  918. }
  919. static inline pgd_t __pgd(pgdval_t val)
  920. {
  921. pgdval_t ret;
  922. if (sizeof(pgdval_t) > sizeof(long))
  923. ret = PVOP_CALL2(pgdval_t, pv_mmu_ops.make_pgd,
  924. val, (u64)val >> 32);
  925. else
  926. ret = PVOP_CALL1(pgdval_t, pv_mmu_ops.make_pgd,
  927. val);
  928. return (pgd_t) { ret };
  929. }
  930. static inline pgdval_t pgd_val(pgd_t pgd)
  931. {
  932. pgdval_t ret;
  933. if (sizeof(pgdval_t) > sizeof(long))
  934. ret = PVOP_CALL2(pgdval_t, pv_mmu_ops.pgd_val,
  935. pgd.pgd, (u64)pgd.pgd >> 32);
  936. else
  937. ret = PVOP_CALL1(pgdval_t, pv_mmu_ops.pgd_val,
  938. pgd.pgd);
  939. return ret;
  940. }
  941. #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
  942. static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr,
  943. pte_t *ptep)
  944. {
  945. pteval_t ret;
  946. ret = PVOP_CALL3(pteval_t, pv_mmu_ops.ptep_modify_prot_start,
  947. mm, addr, ptep);
  948. return (pte_t) { .pte = ret };
  949. }
  950. static inline void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
  951. pte_t *ptep, pte_t pte)
  952. {
  953. if (sizeof(pteval_t) > sizeof(long))
  954. /* 5 arg words */
  955. pv_mmu_ops.ptep_modify_prot_commit(mm, addr, ptep, pte);
  956. else
  957. PVOP_VCALL4(pv_mmu_ops.ptep_modify_prot_commit,
  958. mm, addr, ptep, pte.pte);
  959. }
  960. static inline void set_pte(pte_t *ptep, pte_t pte)
  961. {
  962. if (sizeof(pteval_t) > sizeof(long))
  963. PVOP_VCALL3(pv_mmu_ops.set_pte, ptep,
  964. pte.pte, (u64)pte.pte >> 32);
  965. else
  966. PVOP_VCALL2(pv_mmu_ops.set_pte, ptep,
  967. pte.pte);
  968. }
  969. static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
  970. pte_t *ptep, pte_t pte)
  971. {
  972. if (sizeof(pteval_t) > sizeof(long))
  973. /* 5 arg words */
  974. pv_mmu_ops.set_pte_at(mm, addr, ptep, pte);
  975. else
  976. PVOP_VCALL4(pv_mmu_ops.set_pte_at, mm, addr, ptep, pte.pte);
  977. }
  978. static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
  979. {
  980. pmdval_t val = native_pmd_val(pmd);
  981. if (sizeof(pmdval_t) > sizeof(long))
  982. PVOP_VCALL3(pv_mmu_ops.set_pmd, pmdp, val, (u64)val >> 32);
  983. else
  984. PVOP_VCALL2(pv_mmu_ops.set_pmd, pmdp, val);
  985. }
  986. #if PAGETABLE_LEVELS >= 3
  987. static inline pmd_t __pmd(pmdval_t val)
  988. {
  989. pmdval_t ret;
  990. if (sizeof(pmdval_t) > sizeof(long))
  991. ret = PVOP_CALL2(pmdval_t, pv_mmu_ops.make_pmd,
  992. val, (u64)val >> 32);
  993. else
  994. ret = PVOP_CALL1(pmdval_t, pv_mmu_ops.make_pmd,
  995. val);
  996. return (pmd_t) { ret };
  997. }
  998. static inline pmdval_t pmd_val(pmd_t pmd)
  999. {
  1000. pmdval_t ret;
  1001. if (sizeof(pmdval_t) > sizeof(long))
  1002. ret = PVOP_CALL2(pmdval_t, pv_mmu_ops.pmd_val,
  1003. pmd.pmd, (u64)pmd.pmd >> 32);
  1004. else
  1005. ret = PVOP_CALL1(pmdval_t, pv_mmu_ops.pmd_val,
  1006. pmd.pmd);
  1007. return ret;
  1008. }
  1009. static inline void set_pud(pud_t *pudp, pud_t pud)
  1010. {
  1011. pudval_t val = native_pud_val(pud);
  1012. if (sizeof(pudval_t) > sizeof(long))
  1013. PVOP_VCALL3(pv_mmu_ops.set_pud, pudp,
  1014. val, (u64)val >> 32);
  1015. else
  1016. PVOP_VCALL2(pv_mmu_ops.set_pud, pudp,
  1017. val);
  1018. }
  1019. #if PAGETABLE_LEVELS == 4
  1020. static inline pud_t __pud(pudval_t val)
  1021. {
  1022. pudval_t ret;
  1023. if (sizeof(pudval_t) > sizeof(long))
  1024. ret = PVOP_CALL2(pudval_t, pv_mmu_ops.make_pud,
  1025. val, (u64)val >> 32);
  1026. else
  1027. ret = PVOP_CALL1(pudval_t, pv_mmu_ops.make_pud,
  1028. val);
  1029. return (pud_t) { ret };
  1030. }
  1031. static inline pudval_t pud_val(pud_t pud)
  1032. {
  1033. pudval_t ret;
  1034. if (sizeof(pudval_t) > sizeof(long))
  1035. ret = PVOP_CALL2(pudval_t, pv_mmu_ops.pud_val,
  1036. pud.pud, (u64)pud.pud >> 32);
  1037. else
  1038. ret = PVOP_CALL1(pudval_t, pv_mmu_ops.pud_val,
  1039. pud.pud);
  1040. return ret;
  1041. }
  1042. static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
  1043. {
  1044. pgdval_t val = native_pgd_val(pgd);
  1045. if (sizeof(pgdval_t) > sizeof(long))
  1046. PVOP_VCALL3(pv_mmu_ops.set_pgd, pgdp,
  1047. val, (u64)val >> 32);
  1048. else
  1049. PVOP_VCALL2(pv_mmu_ops.set_pgd, pgdp,
  1050. val);
  1051. }
  1052. static inline void pgd_clear(pgd_t *pgdp)
  1053. {
  1054. set_pgd(pgdp, __pgd(0));
  1055. }
  1056. static inline void pud_clear(pud_t *pudp)
  1057. {
  1058. set_pud(pudp, __pud(0));
  1059. }
  1060. #endif /* PAGETABLE_LEVELS == 4 */
  1061. #endif /* PAGETABLE_LEVELS >= 3 */
  1062. #ifdef CONFIG_X86_PAE
  1063. /* Special-case pte-setting operations for PAE, which can't update a
  1064. 64-bit pte atomically */
  1065. static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
  1066. {
  1067. PVOP_VCALL3(pv_mmu_ops.set_pte_atomic, ptep,
  1068. pte.pte, pte.pte >> 32);
  1069. }
  1070. static inline void set_pte_present(struct mm_struct *mm, unsigned long addr,
  1071. pte_t *ptep, pte_t pte)
  1072. {
  1073. /* 5 arg words */
  1074. pv_mmu_ops.set_pte_present(mm, addr, ptep, pte);
  1075. }
  1076. static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
  1077. pte_t *ptep)
  1078. {
  1079. PVOP_VCALL3(pv_mmu_ops.pte_clear, mm, addr, ptep);
  1080. }
  1081. static inline void pmd_clear(pmd_t *pmdp)
  1082. {
  1083. PVOP_VCALL1(pv_mmu_ops.pmd_clear, pmdp);
  1084. }
  1085. #else /* !CONFIG_X86_PAE */
  1086. static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
  1087. {
  1088. set_pte(ptep, pte);
  1089. }
  1090. static inline void set_pte_present(struct mm_struct *mm, unsigned long addr,
  1091. pte_t *ptep, pte_t pte)
  1092. {
  1093. set_pte(ptep, pte);
  1094. }
  1095. static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
  1096. pte_t *ptep)
  1097. {
  1098. set_pte_at(mm, addr, ptep, __pte(0));
  1099. }
  1100. static inline void pmd_clear(pmd_t *pmdp)
  1101. {
  1102. set_pmd(pmdp, __pmd(0));
  1103. }
  1104. #endif /* CONFIG_X86_PAE */
  1105. /* Lazy mode for batching updates / context switch */
  1106. enum paravirt_lazy_mode {
  1107. PARAVIRT_LAZY_NONE,
  1108. PARAVIRT_LAZY_MMU,
  1109. PARAVIRT_LAZY_CPU,
  1110. };
  1111. enum paravirt_lazy_mode paravirt_get_lazy_mode(void);
  1112. void paravirt_enter_lazy_cpu(void);
  1113. void paravirt_leave_lazy_cpu(void);
  1114. void paravirt_enter_lazy_mmu(void);
  1115. void paravirt_leave_lazy_mmu(void);
  1116. void paravirt_leave_lazy(enum paravirt_lazy_mode mode);
  1117. #define __HAVE_ARCH_ENTER_LAZY_CPU_MODE
  1118. static inline void arch_enter_lazy_cpu_mode(void)
  1119. {
  1120. PVOP_VCALL0(pv_cpu_ops.lazy_mode.enter);
  1121. }
  1122. static inline void arch_leave_lazy_cpu_mode(void)
  1123. {
  1124. PVOP_VCALL0(pv_cpu_ops.lazy_mode.leave);
  1125. }
  1126. static inline void arch_flush_lazy_cpu_mode(void)
  1127. {
  1128. if (unlikely(paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU)) {
  1129. arch_leave_lazy_cpu_mode();
  1130. arch_enter_lazy_cpu_mode();
  1131. }
  1132. }
  1133. #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
  1134. static inline void arch_enter_lazy_mmu_mode(void)
  1135. {
  1136. PVOP_VCALL0(pv_mmu_ops.lazy_mode.enter);
  1137. }
  1138. static inline void arch_leave_lazy_mmu_mode(void)
  1139. {
  1140. PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
  1141. }
  1142. static inline void arch_flush_lazy_mmu_mode(void)
  1143. {
  1144. if (unlikely(paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU)) {
  1145. arch_leave_lazy_mmu_mode();
  1146. arch_enter_lazy_mmu_mode();
  1147. }
  1148. }
  1149. static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
  1150. unsigned long phys, pgprot_t flags)
  1151. {
  1152. pv_mmu_ops.set_fixmap(idx, phys, flags);
  1153. }
  1154. void _paravirt_nop(void);
  1155. #define paravirt_nop ((void *)_paravirt_nop)
  1156. /* These all sit in the .parainstructions section to tell us what to patch. */
  1157. struct paravirt_patch_site {
  1158. u8 *instr; /* original instructions */
  1159. u8 instrtype; /* type of this instruction */
  1160. u8 len; /* length of original instruction */
  1161. u16 clobbers; /* what registers you may clobber */
  1162. };
  1163. extern struct paravirt_patch_site __parainstructions[],
  1164. __parainstructions_end[];
  1165. #ifdef CONFIG_X86_32
  1166. #define PV_SAVE_REGS "pushl %%ecx; pushl %%edx;"
  1167. #define PV_RESTORE_REGS "popl %%edx; popl %%ecx"
  1168. #define PV_FLAGS_ARG "0"
  1169. #define PV_EXTRA_CLOBBERS
  1170. #define PV_VEXTRA_CLOBBERS
  1171. #else
  1172. /* We save some registers, but all of them, that's too much. We clobber all
  1173. * caller saved registers but the argument parameter */
  1174. #define PV_SAVE_REGS "pushq %%rdi;"
  1175. #define PV_RESTORE_REGS "popq %%rdi;"
  1176. #define PV_EXTRA_CLOBBERS EXTRA_CLOBBERS, "rcx" , "rdx"
  1177. #define PV_VEXTRA_CLOBBERS EXTRA_CLOBBERS, "rdi", "rcx" , "rdx"
  1178. #define PV_FLAGS_ARG "D"
  1179. #endif
  1180. static inline unsigned long __raw_local_save_flags(void)
  1181. {
  1182. unsigned long f;
  1183. asm volatile(paravirt_alt(PV_SAVE_REGS
  1184. PARAVIRT_CALL
  1185. PV_RESTORE_REGS)
  1186. : "=a"(f)
  1187. : paravirt_type(pv_irq_ops.save_fl),
  1188. paravirt_clobber(CLBR_EAX)
  1189. : "memory", "cc" PV_VEXTRA_CLOBBERS);
  1190. return f;
  1191. }
  1192. static inline void raw_local_irq_restore(unsigned long f)
  1193. {
  1194. asm volatile(paravirt_alt(PV_SAVE_REGS
  1195. PARAVIRT_CALL
  1196. PV_RESTORE_REGS)
  1197. : "=a"(f)
  1198. : PV_FLAGS_ARG(f),
  1199. paravirt_type(pv_irq_ops.restore_fl),
  1200. paravirt_clobber(CLBR_EAX)
  1201. : "memory", "cc" PV_EXTRA_CLOBBERS);
  1202. }
  1203. static inline void raw_local_irq_disable(void)
  1204. {
  1205. asm volatile(paravirt_alt(PV_SAVE_REGS
  1206. PARAVIRT_CALL
  1207. PV_RESTORE_REGS)
  1208. :
  1209. : paravirt_type(pv_irq_ops.irq_disable),
  1210. paravirt_clobber(CLBR_EAX)
  1211. : "memory", "eax", "cc" PV_EXTRA_CLOBBERS);
  1212. }
  1213. static inline void raw_local_irq_enable(void)
  1214. {
  1215. asm volatile(paravirt_alt(PV_SAVE_REGS
  1216. PARAVIRT_CALL
  1217. PV_RESTORE_REGS)
  1218. :
  1219. : paravirt_type(pv_irq_ops.irq_enable),
  1220. paravirt_clobber(CLBR_EAX)
  1221. : "memory", "eax", "cc" PV_EXTRA_CLOBBERS);
  1222. }
  1223. static inline unsigned long __raw_local_irq_save(void)
  1224. {
  1225. unsigned long f;
  1226. f = __raw_local_save_flags();
  1227. raw_local_irq_disable();
  1228. return f;
  1229. }
  1230. /* Make sure as little as possible of this mess escapes. */
  1231. #undef PARAVIRT_CALL
  1232. #undef __PVOP_CALL
  1233. #undef __PVOP_VCALL
  1234. #undef PVOP_VCALL0
  1235. #undef PVOP_CALL0
  1236. #undef PVOP_VCALL1
  1237. #undef PVOP_CALL1
  1238. #undef PVOP_VCALL2
  1239. #undef PVOP_CALL2
  1240. #undef PVOP_VCALL3
  1241. #undef PVOP_CALL3
  1242. #undef PVOP_VCALL4
  1243. #undef PVOP_CALL4
  1244. #else /* __ASSEMBLY__ */
  1245. #define _PVSITE(ptype, clobbers, ops, word, algn) \
  1246. 771:; \
  1247. ops; \
  1248. 772:; \
  1249. .pushsection .parainstructions,"a"; \
  1250. .align algn; \
  1251. word 771b; \
  1252. .byte ptype; \
  1253. .byte 772b-771b; \
  1254. .short clobbers; \
  1255. .popsection
  1256. #ifdef CONFIG_X86_64
  1257. #define PV_SAVE_REGS pushq %rax; pushq %rdi; pushq %rcx; pushq %rdx
  1258. #define PV_RESTORE_REGS popq %rdx; popq %rcx; popq %rdi; popq %rax
  1259. #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 8)
  1260. #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8)
  1261. #define PARA_INDIRECT(addr) *addr(%rip)
  1262. #else
  1263. #define PV_SAVE_REGS pushl %eax; pushl %edi; pushl %ecx; pushl %edx
  1264. #define PV_RESTORE_REGS popl %edx; popl %ecx; popl %edi; popl %eax
  1265. #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
  1266. #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
  1267. #define PARA_INDIRECT(addr) *%cs:addr
  1268. #endif
  1269. #define INTERRUPT_RETURN \
  1270. PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE, \
  1271. jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret))
  1272. #define DISABLE_INTERRUPTS(clobbers) \
  1273. PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
  1274. PV_SAVE_REGS; \
  1275. call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable); \
  1276. PV_RESTORE_REGS;) \
  1277. #define ENABLE_INTERRUPTS(clobbers) \
  1278. PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers, \
  1279. PV_SAVE_REGS; \
  1280. call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable); \
  1281. PV_RESTORE_REGS;)
  1282. #define USERGS_SYSRET32 \
  1283. PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret32), \
  1284. CLBR_NONE, \
  1285. jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret32))
  1286. #ifdef CONFIG_X86_32
  1287. #define GET_CR0_INTO_EAX \
  1288. push %ecx; push %edx; \
  1289. call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
  1290. pop %edx; pop %ecx
  1291. #define ENABLE_INTERRUPTS_SYSEXIT \
  1292. PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
  1293. CLBR_NONE, \
  1294. jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
  1295. #else /* !CONFIG_X86_32 */
  1296. /*
  1297. * If swapgs is used while the userspace stack is still current,
  1298. * there's no way to call a pvop. The PV replacement *must* be
  1299. * inlined, or the swapgs instruction must be trapped and emulated.
  1300. */
  1301. #define SWAPGS_UNSAFE_STACK \
  1302. PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
  1303. swapgs)
  1304. #define SWAPGS \
  1305. PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
  1306. PV_SAVE_REGS; \
  1307. call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs); \
  1308. PV_RESTORE_REGS \
  1309. )
  1310. #define GET_CR2_INTO_RCX \
  1311. call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2); \
  1312. movq %rax, %rcx; \
  1313. xorq %rax, %rax;
  1314. #define USERGS_SYSRET64 \
  1315. PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64), \
  1316. CLBR_NONE, \
  1317. jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64))
  1318. #define ENABLE_INTERRUPTS_SYSEXIT32 \
  1319. PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
  1320. CLBR_NONE, \
  1321. jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
  1322. #endif /* CONFIG_X86_32 */
  1323. #endif /* __ASSEMBLY__ */
  1324. #endif /* CONFIG_PARAVIRT */
  1325. #endif /* __ASM_PARAVIRT_H */