paravirt.h 38 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428
  1. #ifndef __ASM_PARAVIRT_H
  2. #define __ASM_PARAVIRT_H
  3. /* Various instructions on x86 need to be replaced for
  4. * para-virtualization: those hooks are defined here. */
  5. #ifdef CONFIG_PARAVIRT
  6. #include <asm/page.h>
  7. #include <asm/asm.h>
  8. /* Bitmask of what can be clobbered: usually at least eax. */
  9. #define CLBR_NONE 0
  10. #define CLBR_EAX (1 << 0)
  11. #define CLBR_ECX (1 << 1)
  12. #define CLBR_EDX (1 << 2)
  13. #ifdef CONFIG_X86_64
  14. #define CLBR_RSI (1 << 3)
  15. #define CLBR_RDI (1 << 4)
  16. #define CLBR_R8 (1 << 5)
  17. #define CLBR_R9 (1 << 6)
  18. #define CLBR_R10 (1 << 7)
  19. #define CLBR_R11 (1 << 8)
  20. #define CLBR_ANY ((1 << 9) - 1)
  21. #include <asm/desc_defs.h>
  22. #else
  23. /* CLBR_ANY should match all regs platform has. For i386, that's just it */
  24. #define CLBR_ANY ((1 << 3) - 1)
  25. #endif /* X86_64 */
  26. #ifndef __ASSEMBLY__
  27. #include <linux/types.h>
  28. #include <linux/cpumask.h>
  29. #include <asm/kmap_types.h>
  30. #include <asm/desc_defs.h>
  31. struct page;
  32. struct thread_struct;
  33. struct desc_ptr;
  34. struct tss_struct;
  35. struct mm_struct;
  36. struct desc_struct;
  37. /* general info */
  38. struct pv_info {
  39. unsigned int kernel_rpl;
  40. int shared_kernel_pmd;
  41. int paravirt_enabled;
  42. const char *name;
  43. };
  44. struct pv_init_ops {
  45. /*
  46. * Patch may replace one of the defined code sequences with
  47. * arbitrary code, subject to the same register constraints.
  48. * This generally means the code is not free to clobber any
  49. * registers other than EAX. The patch function should return
  50. * the number of bytes of code generated, as we nop pad the
  51. * rest in generic code.
  52. */
  53. unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
  54. unsigned long addr, unsigned len);
  55. /* Basic arch-specific setup */
  56. void (*arch_setup)(void);
  57. char *(*memory_setup)(void);
  58. void (*post_allocator_init)(void);
  59. /* Print a banner to identify the environment */
  60. void (*banner)(void);
  61. };
  62. struct pv_lazy_ops {
  63. /* Set deferred update mode, used for batching operations. */
  64. void (*enter)(void);
  65. void (*leave)(void);
  66. };
  67. struct pv_time_ops {
  68. void (*time_init)(void);
  69. /* Set and set time of day */
  70. unsigned long (*get_wallclock)(void);
  71. int (*set_wallclock)(unsigned long);
  72. unsigned long long (*sched_clock)(void);
  73. unsigned long (*get_cpu_khz)(void);
  74. };
  75. struct pv_cpu_ops {
  76. /* hooks for various privileged instructions */
  77. unsigned long (*get_debugreg)(int regno);
  78. void (*set_debugreg)(int regno, unsigned long value);
  79. void (*clts)(void);
  80. unsigned long (*read_cr0)(void);
  81. void (*write_cr0)(unsigned long);
  82. unsigned long (*read_cr4_safe)(void);
  83. unsigned long (*read_cr4)(void);
  84. void (*write_cr4)(unsigned long);
  85. #ifdef CONFIG_X86_64
  86. unsigned long (*read_cr8)(void);
  87. void (*write_cr8)(unsigned long);
  88. #endif
  89. /* Segment descriptor handling */
  90. void (*load_tr_desc)(void);
  91. void (*load_gdt)(const struct desc_ptr *);
  92. void (*load_idt)(const struct desc_ptr *);
  93. void (*store_gdt)(struct desc_ptr *);
  94. void (*store_idt)(struct desc_ptr *);
  95. void (*set_ldt)(const void *desc, unsigned entries);
  96. unsigned long (*store_tr)(void);
  97. void (*load_tls)(struct thread_struct *t, unsigned int cpu);
  98. void (*write_ldt_entry)(struct desc_struct *ldt, int entrynum,
  99. const void *desc);
  100. void (*write_gdt_entry)(struct desc_struct *,
  101. int entrynum, const void *desc, int size);
  102. void (*write_idt_entry)(gate_desc *,
  103. int entrynum, const gate_desc *gate);
  104. void (*load_sp0)(struct tss_struct *tss, struct thread_struct *t);
  105. void (*set_iopl_mask)(unsigned mask);
  106. void (*wbinvd)(void);
  107. void (*io_delay)(void);
  108. /* cpuid emulation, mostly so that caps bits can be disabled */
  109. void (*cpuid)(unsigned int *eax, unsigned int *ebx,
  110. unsigned int *ecx, unsigned int *edx);
  111. /* MSR, PMC and TSR operations.
  112. err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */
  113. u64 (*read_msr)(unsigned int msr, int *err);
  114. int (*write_msr)(unsigned int msr, unsigned low, unsigned high);
  115. u64 (*read_tsc)(void);
  116. u64 (*read_pmc)(int counter);
  117. unsigned long long (*read_tscp)(unsigned int *aux);
  118. /* These two are jmp to, not actually called. */
  119. void (*irq_enable_syscall_ret)(void);
  120. void (*iret)(void);
  121. void (*swapgs)(void);
  122. struct pv_lazy_ops lazy_mode;
  123. };
  124. struct pv_irq_ops {
  125. void (*init_IRQ)(void);
  126. /*
  127. * Get/set interrupt state. save_fl and restore_fl are only
  128. * expected to use X86_EFLAGS_IF; all other bits
  129. * returned from save_fl are undefined, and may be ignored by
  130. * restore_fl.
  131. */
  132. unsigned long (*save_fl)(void);
  133. void (*restore_fl)(unsigned long);
  134. void (*irq_disable)(void);
  135. void (*irq_enable)(void);
  136. void (*safe_halt)(void);
  137. void (*halt)(void);
  138. };
  139. struct pv_apic_ops {
  140. #ifdef CONFIG_X86_LOCAL_APIC
  141. /*
  142. * Direct APIC operations, principally for VMI. Ideally
  143. * these shouldn't be in this interface.
  144. */
  145. void (*apic_write)(unsigned long reg, u32 v);
  146. void (*apic_write_atomic)(unsigned long reg, u32 v);
  147. u32 (*apic_read)(unsigned long reg);
  148. void (*setup_boot_clock)(void);
  149. void (*setup_secondary_clock)(void);
  150. void (*startup_ipi_hook)(int phys_apicid,
  151. unsigned long start_eip,
  152. unsigned long start_esp);
  153. #endif
  154. };
  155. struct pv_mmu_ops {
  156. /*
  157. * Called before/after init_mm pagetable setup. setup_start
  158. * may reset %cr3, and may pre-install parts of the pagetable;
  159. * pagetable setup is expected to preserve any existing
  160. * mapping.
  161. */
  162. void (*pagetable_setup_start)(pgd_t *pgd_base);
  163. void (*pagetable_setup_done)(pgd_t *pgd_base);
  164. unsigned long (*read_cr2)(void);
  165. void (*write_cr2)(unsigned long);
  166. unsigned long (*read_cr3)(void);
  167. void (*write_cr3)(unsigned long);
  168. /*
  169. * Hooks for intercepting the creation/use/destruction of an
  170. * mm_struct.
  171. */
  172. void (*activate_mm)(struct mm_struct *prev,
  173. struct mm_struct *next);
  174. void (*dup_mmap)(struct mm_struct *oldmm,
  175. struct mm_struct *mm);
  176. void (*exit_mmap)(struct mm_struct *mm);
  177. /* TLB operations */
  178. void (*flush_tlb_user)(void);
  179. void (*flush_tlb_kernel)(void);
  180. void (*flush_tlb_single)(unsigned long addr);
  181. void (*flush_tlb_others)(const cpumask_t *cpus, struct mm_struct *mm,
  182. unsigned long va);
  183. /* Hooks for allocating/releasing pagetable pages */
  184. void (*alloc_pte)(struct mm_struct *mm, u32 pfn);
  185. void (*alloc_pmd)(struct mm_struct *mm, u32 pfn);
  186. void (*alloc_pmd_clone)(u32 pfn, u32 clonepfn, u32 start, u32 count);
  187. void (*alloc_pud)(struct mm_struct *mm, u32 pfn);
  188. void (*release_pte)(u32 pfn);
  189. void (*release_pmd)(u32 pfn);
  190. void (*release_pud)(u32 pfn);
  191. /* Pagetable manipulation functions */
  192. void (*set_pte)(pte_t *ptep, pte_t pteval);
  193. void (*set_pte_at)(struct mm_struct *mm, unsigned long addr,
  194. pte_t *ptep, pte_t pteval);
  195. void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval);
  196. void (*pte_update)(struct mm_struct *mm, unsigned long addr,
  197. pte_t *ptep);
  198. void (*pte_update_defer)(struct mm_struct *mm,
  199. unsigned long addr, pte_t *ptep);
  200. pteval_t (*pte_val)(pte_t);
  201. pte_t (*make_pte)(pteval_t pte);
  202. pgdval_t (*pgd_val)(pgd_t);
  203. pgd_t (*make_pgd)(pgdval_t pgd);
  204. #if PAGETABLE_LEVELS >= 3
  205. #ifdef CONFIG_X86_PAE
  206. void (*set_pte_atomic)(pte_t *ptep, pte_t pteval);
  207. void (*set_pte_present)(struct mm_struct *mm, unsigned long addr,
  208. pte_t *ptep, pte_t pte);
  209. void (*pte_clear)(struct mm_struct *mm, unsigned long addr,
  210. pte_t *ptep);
  211. void (*pmd_clear)(pmd_t *pmdp);
  212. #endif /* CONFIG_X86_PAE */
  213. void (*set_pud)(pud_t *pudp, pud_t pudval);
  214. pmdval_t (*pmd_val)(pmd_t);
  215. pmd_t (*make_pmd)(pmdval_t pmd);
  216. #if PAGETABLE_LEVELS == 4
  217. pudval_t (*pud_val)(pud_t);
  218. pud_t (*make_pud)(pudval_t pud);
  219. void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
  220. #endif /* PAGETABLE_LEVELS == 4 */
  221. #endif /* PAGETABLE_LEVELS >= 3 */
  222. #ifdef CONFIG_HIGHPTE
  223. void *(*kmap_atomic_pte)(struct page *page, enum km_type type);
  224. #endif
  225. struct pv_lazy_ops lazy_mode;
  226. };
  227. /* This contains all the paravirt structures: we get a convenient
  228. * number for each function using the offset which we use to indicate
  229. * what to patch. */
  230. struct paravirt_patch_template {
  231. struct pv_init_ops pv_init_ops;
  232. struct pv_time_ops pv_time_ops;
  233. struct pv_cpu_ops pv_cpu_ops;
  234. struct pv_irq_ops pv_irq_ops;
  235. struct pv_apic_ops pv_apic_ops;
  236. struct pv_mmu_ops pv_mmu_ops;
  237. };
  238. extern struct pv_info pv_info;
  239. extern struct pv_init_ops pv_init_ops;
  240. extern struct pv_time_ops pv_time_ops;
  241. extern struct pv_cpu_ops pv_cpu_ops;
  242. extern struct pv_irq_ops pv_irq_ops;
  243. extern struct pv_apic_ops pv_apic_ops;
  244. extern struct pv_mmu_ops pv_mmu_ops;
  245. #define PARAVIRT_PATCH(x) \
  246. (offsetof(struct paravirt_patch_template, x) / sizeof(void *))
  247. #define paravirt_type(op) \
  248. [paravirt_typenum] "i" (PARAVIRT_PATCH(op)), \
  249. [paravirt_opptr] "m" (op)
  250. #define paravirt_clobber(clobber) \
  251. [paravirt_clobber] "i" (clobber)
  252. /*
  253. * Generate some code, and mark it as patchable by the
  254. * apply_paravirt() alternate instruction patcher.
  255. */
  256. #define _paravirt_alt(insn_string, type, clobber) \
  257. "771:\n\t" insn_string "\n" "772:\n" \
  258. ".pushsection .parainstructions,\"a\"\n" \
  259. _ASM_ALIGN "\n" \
  260. _ASM_PTR " 771b\n" \
  261. " .byte " type "\n" \
  262. " .byte 772b-771b\n" \
  263. " .short " clobber "\n" \
  264. ".popsection\n"
  265. /* Generate patchable code, with the default asm parameters. */
  266. #define paravirt_alt(insn_string) \
  267. _paravirt_alt(insn_string, "%c[paravirt_typenum]", "%c[paravirt_clobber]")
  268. /* Simple instruction patching code. */
  269. #define DEF_NATIVE(ops, name, code) \
  270. extern const char start_##ops##_##name[], end_##ops##_##name[]; \
  271. asm("start_" #ops "_" #name ": " code "; end_" #ops "_" #name ":")
  272. unsigned paravirt_patch_nop(void);
  273. unsigned paravirt_patch_ignore(unsigned len);
  274. unsigned paravirt_patch_call(void *insnbuf,
  275. const void *target, u16 tgt_clobbers,
  276. unsigned long addr, u16 site_clobbers,
  277. unsigned len);
  278. unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
  279. unsigned long addr, unsigned len);
  280. unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
  281. unsigned long addr, unsigned len);
  282. unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
  283. const char *start, const char *end);
  284. unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
  285. unsigned long addr, unsigned len);
  286. int paravirt_disable_iospace(void);
  287. /*
  288. * This generates an indirect call based on the operation type number.
  289. * The type number, computed in PARAVIRT_PATCH, is derived from the
  290. * offset into the paravirt_patch_template structure, and can therefore be
  291. * freely converted back into a structure offset.
  292. */
  293. #define PARAVIRT_CALL "call *%[paravirt_opptr];"
  294. /*
  295. * These macros are intended to wrap calls through one of the paravirt
  296. * ops structs, so that they can be later identified and patched at
  297. * runtime.
  298. *
  299. * Normally, a call to a pv_op function is a simple indirect call:
  300. * (pv_op_struct.operations)(args...).
  301. *
  302. * Unfortunately, this is a relatively slow operation for modern CPUs,
  303. * because it cannot necessarily determine what the destination
  304. * address is. In this case, the address is a runtime constant, so at
  305. * the very least we can patch the call to e a simple direct call, or
  306. * ideally, patch an inline implementation into the callsite. (Direct
  307. * calls are essentially free, because the call and return addresses
  308. * are completely predictable.)
  309. *
  310. * For i386, these macros rely on the standard gcc "regparm(3)" calling
  311. * convention, in which the first three arguments are placed in %eax,
  312. * %edx, %ecx (in that order), and the remaining arguments are placed
  313. * on the stack. All caller-save registers (eax,edx,ecx) are expected
  314. * to be modified (either clobbered or used for return values).
  315. * X86_64, on the other hand, already specifies a register-based calling
  316. * conventions, returning at %rax, with parameteres going on %rdi, %rsi,
  317. * %rdx, and %rcx. Note that for this reason, x86_64 does not need any
  318. * special handling for dealing with 4 arguments, unlike i386.
  319. * However, x86_64 also have to clobber all caller saved registers, which
  320. * unfortunately, are quite a bit (r8 - r11)
  321. *
  322. * The call instruction itself is marked by placing its start address
  323. * and size into the .parainstructions section, so that
  324. * apply_paravirt() in arch/i386/kernel/alternative.c can do the
  325. * appropriate patching under the control of the backend pv_init_ops
  326. * implementation.
  327. *
  328. * Unfortunately there's no way to get gcc to generate the args setup
  329. * for the call, and then allow the call itself to be generated by an
  330. * inline asm. Because of this, we must do the complete arg setup and
  331. * return value handling from within these macros. This is fairly
  332. * cumbersome.
  333. *
  334. * There are 5 sets of PVOP_* macros for dealing with 0-4 arguments.
  335. * It could be extended to more arguments, but there would be little
  336. * to be gained from that. For each number of arguments, there are
  337. * the two VCALL and CALL variants for void and non-void functions.
  338. *
  339. * When there is a return value, the invoker of the macro must specify
  340. * the return type. The macro then uses sizeof() on that type to
  341. * determine whether its a 32 or 64 bit value, and places the return
  342. * in the right register(s) (just %eax for 32-bit, and %edx:%eax for
  343. * 64-bit). For x86_64 machines, it just returns at %rax regardless of
  344. * the return value size.
  345. *
  346. * 64-bit arguments are passed as a pair of adjacent 32-bit arguments
  347. * i386 also passes 64-bit arguments as a pair of adjacent 32-bit arguments
  348. * in low,high order
  349. *
  350. * Small structures are passed and returned in registers. The macro
  351. * calling convention can't directly deal with this, so the wrapper
  352. * functions must do this.
  353. *
  354. * These PVOP_* macros are only defined within this header. This
  355. * means that all uses must be wrapped in inline functions. This also
  356. * makes sure the incoming and outgoing types are always correct.
  357. */
  358. #ifdef CONFIG_X86_32
  359. #define PVOP_VCALL_ARGS unsigned long __eax, __edx, __ecx
  360. #define PVOP_CALL_ARGS PVOP_VCALL_ARGS
  361. #define PVOP_VCALL_CLOBBERS "=a" (__eax), "=d" (__edx), \
  362. "=c" (__ecx)
  363. #define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS
  364. #define EXTRA_CLOBBERS
  365. #define VEXTRA_CLOBBERS
  366. #else
  367. #define PVOP_VCALL_ARGS unsigned long __edi, __esi, __edx, __ecx
  368. #define PVOP_CALL_ARGS PVOP_VCALL_ARGS, __eax
  369. #define PVOP_VCALL_CLOBBERS "=D" (__edi), \
  370. "=S" (__esi), "=d" (__edx), \
  371. "=c" (__ecx)
  372. #define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS, "=a" (__eax)
  373. #define EXTRA_CLOBBERS , "r8", "r9", "r10", "r11"
  374. #define VEXTRA_CLOBBERS , "rax", "r8", "r9", "r10", "r11"
  375. #endif
  376. #define __PVOP_CALL(rettype, op, pre, post, ...) \
  377. ({ \
  378. rettype __ret; \
  379. PVOP_CALL_ARGS; \
  380. /* This is 32-bit specific, but is okay in 64-bit */ \
  381. /* since this condition will never hold */ \
  382. if (sizeof(rettype) > sizeof(unsigned long)) { \
  383. asm volatile(pre \
  384. paravirt_alt(PARAVIRT_CALL) \
  385. post \
  386. : PVOP_CALL_CLOBBERS \
  387. : paravirt_type(op), \
  388. paravirt_clobber(CLBR_ANY), \
  389. ##__VA_ARGS__ \
  390. : "memory", "cc" EXTRA_CLOBBERS); \
  391. __ret = (rettype)((((u64)__edx) << 32) | __eax); \
  392. } else { \
  393. asm volatile(pre \
  394. paravirt_alt(PARAVIRT_CALL) \
  395. post \
  396. : PVOP_CALL_CLOBBERS \
  397. : paravirt_type(op), \
  398. paravirt_clobber(CLBR_ANY), \
  399. ##__VA_ARGS__ \
  400. : "memory", "cc" EXTRA_CLOBBERS); \
  401. __ret = (rettype)__eax; \
  402. } \
  403. __ret; \
  404. })
  405. #define __PVOP_VCALL(op, pre, post, ...) \
  406. ({ \
  407. PVOP_VCALL_ARGS; \
  408. asm volatile(pre \
  409. paravirt_alt(PARAVIRT_CALL) \
  410. post \
  411. : PVOP_VCALL_CLOBBERS \
  412. : paravirt_type(op), \
  413. paravirt_clobber(CLBR_ANY), \
  414. ##__VA_ARGS__ \
  415. : "memory", "cc" VEXTRA_CLOBBERS); \
  416. })
  417. #define PVOP_CALL0(rettype, op) \
  418. __PVOP_CALL(rettype, op, "", "")
  419. #define PVOP_VCALL0(op) \
  420. __PVOP_VCALL(op, "", "")
  421. #define PVOP_CALL1(rettype, op, arg1) \
  422. __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)))
  423. #define PVOP_VCALL1(op, arg1) \
  424. __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)))
  425. #define PVOP_CALL2(rettype, op, arg1, arg2) \
  426. __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)), \
  427. "1" ((unsigned long)(arg2)))
  428. #define PVOP_VCALL2(op, arg1, arg2) \
  429. __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)), \
  430. "1" ((unsigned long)(arg2)))
  431. #define PVOP_CALL3(rettype, op, arg1, arg2, arg3) \
  432. __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)), \
  433. "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)))
  434. #define PVOP_VCALL3(op, arg1, arg2, arg3) \
  435. __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)), \
  436. "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)))
  437. /* This is the only difference in x86_64. We can make it much simpler */
  438. #ifdef CONFIG_X86_32
  439. #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
  440. __PVOP_CALL(rettype, op, \
  441. "push %[_arg4];", "lea 4(%%esp),%%esp;", \
  442. "0" ((u32)(arg1)), "1" ((u32)(arg2)), \
  443. "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
  444. #define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
  445. __PVOP_VCALL(op, \
  446. "push %[_arg4];", "lea 4(%%esp),%%esp;", \
  447. "0" ((u32)(arg1)), "1" ((u32)(arg2)), \
  448. "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
  449. #else
  450. #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
  451. __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)), \
  452. "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)), \
  453. "3"((unsigned long)(arg4)))
  454. #define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
  455. __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)), \
  456. "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)), \
  457. "3"((unsigned long)(arg4)))
  458. #endif
  459. static inline int paravirt_enabled(void)
  460. {
  461. return pv_info.paravirt_enabled;
  462. }
  463. static inline void load_sp0(struct tss_struct *tss,
  464. struct thread_struct *thread)
  465. {
  466. PVOP_VCALL2(pv_cpu_ops.load_sp0, tss, thread);
  467. }
  468. #define ARCH_SETUP pv_init_ops.arch_setup();
  469. static inline unsigned long get_wallclock(void)
  470. {
  471. return PVOP_CALL0(unsigned long, pv_time_ops.get_wallclock);
  472. }
  473. static inline int set_wallclock(unsigned long nowtime)
  474. {
  475. return PVOP_CALL1(int, pv_time_ops.set_wallclock, nowtime);
  476. }
  477. static inline void (*choose_time_init(void))(void)
  478. {
  479. return pv_time_ops.time_init;
  480. }
  481. /* The paravirtualized CPUID instruction. */
  482. static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
  483. unsigned int *ecx, unsigned int *edx)
  484. {
  485. PVOP_VCALL4(pv_cpu_ops.cpuid, eax, ebx, ecx, edx);
  486. }
  487. /*
  488. * These special macros can be used to get or set a debugging register
  489. */
  490. static inline unsigned long paravirt_get_debugreg(int reg)
  491. {
  492. return PVOP_CALL1(unsigned long, pv_cpu_ops.get_debugreg, reg);
  493. }
  494. #define get_debugreg(var, reg) var = paravirt_get_debugreg(reg)
  495. static inline void set_debugreg(unsigned long val, int reg)
  496. {
  497. PVOP_VCALL2(pv_cpu_ops.set_debugreg, reg, val);
  498. }
  499. static inline void clts(void)
  500. {
  501. PVOP_VCALL0(pv_cpu_ops.clts);
  502. }
  503. static inline unsigned long read_cr0(void)
  504. {
  505. return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr0);
  506. }
  507. static inline void write_cr0(unsigned long x)
  508. {
  509. PVOP_VCALL1(pv_cpu_ops.write_cr0, x);
  510. }
  511. static inline unsigned long read_cr2(void)
  512. {
  513. return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr2);
  514. }
  515. static inline void write_cr2(unsigned long x)
  516. {
  517. PVOP_VCALL1(pv_mmu_ops.write_cr2, x);
  518. }
  519. static inline unsigned long read_cr3(void)
  520. {
  521. return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr3);
  522. }
  523. static inline void write_cr3(unsigned long x)
  524. {
  525. PVOP_VCALL1(pv_mmu_ops.write_cr3, x);
  526. }
  527. static inline unsigned long read_cr4(void)
  528. {
  529. return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4);
  530. }
  531. static inline unsigned long read_cr4_safe(void)
  532. {
  533. return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4_safe);
  534. }
  535. static inline void write_cr4(unsigned long x)
  536. {
  537. PVOP_VCALL1(pv_cpu_ops.write_cr4, x);
  538. }
  539. #ifdef CONFIG_X86_64
  540. static inline unsigned long read_cr8(void)
  541. {
  542. return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr8);
  543. }
  544. static inline void write_cr8(unsigned long x)
  545. {
  546. PVOP_VCALL1(pv_cpu_ops.write_cr8, x);
  547. }
  548. #endif
  549. static inline void raw_safe_halt(void)
  550. {
  551. PVOP_VCALL0(pv_irq_ops.safe_halt);
  552. }
  553. static inline void halt(void)
  554. {
  555. PVOP_VCALL0(pv_irq_ops.safe_halt);
  556. }
  557. static inline void wbinvd(void)
  558. {
  559. PVOP_VCALL0(pv_cpu_ops.wbinvd);
  560. }
  561. #define get_kernel_rpl() (pv_info.kernel_rpl)
  562. static inline u64 paravirt_read_msr(unsigned msr, int *err)
  563. {
  564. return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err);
  565. }
  566. static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high)
  567. {
  568. return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high);
  569. }
  570. /* These should all do BUG_ON(_err), but our headers are too tangled. */
  571. #define rdmsr(msr, val1, val2) \
  572. do { \
  573. int _err; \
  574. u64 _l = paravirt_read_msr(msr, &_err); \
  575. val1 = (u32)_l; \
  576. val2 = _l >> 32; \
  577. } while (0)
  578. #define wrmsr(msr, val1, val2) \
  579. do { \
  580. paravirt_write_msr(msr, val1, val2); \
  581. } while (0)
  582. #define rdmsrl(msr, val) \
  583. do { \
  584. int _err; \
  585. val = paravirt_read_msr(msr, &_err); \
  586. } while (0)
  587. #define wrmsrl(msr, val) wrmsr(msr, (u32)((u64)(val)), ((u64)(val))>>32)
  588. #define wrmsr_safe(msr, a, b) paravirt_write_msr(msr, a, b)
  589. /* rdmsr with exception handling */
  590. #define rdmsr_safe(msr, a, b) \
  591. ({ \
  592. int _err; \
  593. u64 _l = paravirt_read_msr(msr, &_err); \
  594. (*a) = (u32)_l; \
  595. (*b) = _l >> 32; \
  596. _err; \
  597. })
  598. static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
  599. {
  600. int err;
  601. *p = paravirt_read_msr(msr, &err);
  602. return err;
  603. }
  604. static inline u64 paravirt_read_tsc(void)
  605. {
  606. return PVOP_CALL0(u64, pv_cpu_ops.read_tsc);
  607. }
  608. #define rdtscl(low) \
  609. do { \
  610. u64 _l = paravirt_read_tsc(); \
  611. low = (int)_l; \
  612. } while (0)
  613. #define rdtscll(val) (val = paravirt_read_tsc())
  614. static inline unsigned long long paravirt_sched_clock(void)
  615. {
  616. return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock);
  617. }
  618. #define calculate_cpu_khz() (pv_time_ops.get_cpu_khz())
  619. static inline unsigned long long paravirt_read_pmc(int counter)
  620. {
  621. return PVOP_CALL1(u64, pv_cpu_ops.read_pmc, counter);
  622. }
  623. #define rdpmc(counter, low, high) \
  624. do { \
  625. u64 _l = paravirt_read_pmc(counter); \
  626. low = (u32)_l; \
  627. high = _l >> 32; \
  628. } while (0)
  629. static inline unsigned long long paravirt_rdtscp(unsigned int *aux)
  630. {
  631. return PVOP_CALL1(u64, pv_cpu_ops.read_tscp, aux);
  632. }
  633. #define rdtscp(low, high, aux) \
  634. do { \
  635. int __aux; \
  636. unsigned long __val = paravirt_rdtscp(&__aux); \
  637. (low) = (u32)__val; \
  638. (high) = (u32)(__val >> 32); \
  639. (aux) = __aux; \
  640. } while (0)
  641. #define rdtscpll(val, aux) \
  642. do { \
  643. unsigned long __aux; \
  644. val = paravirt_rdtscp(&__aux); \
  645. (aux) = __aux; \
  646. } while (0)
  647. static inline void load_TR_desc(void)
  648. {
  649. PVOP_VCALL0(pv_cpu_ops.load_tr_desc);
  650. }
  651. static inline void load_gdt(const struct desc_ptr *dtr)
  652. {
  653. PVOP_VCALL1(pv_cpu_ops.load_gdt, dtr);
  654. }
  655. static inline void load_idt(const struct desc_ptr *dtr)
  656. {
  657. PVOP_VCALL1(pv_cpu_ops.load_idt, dtr);
  658. }
  659. static inline void set_ldt(const void *addr, unsigned entries)
  660. {
  661. PVOP_VCALL2(pv_cpu_ops.set_ldt, addr, entries);
  662. }
  663. static inline void store_gdt(struct desc_ptr *dtr)
  664. {
  665. PVOP_VCALL1(pv_cpu_ops.store_gdt, dtr);
  666. }
  667. static inline void store_idt(struct desc_ptr *dtr)
  668. {
  669. PVOP_VCALL1(pv_cpu_ops.store_idt, dtr);
  670. }
  671. static inline unsigned long paravirt_store_tr(void)
  672. {
  673. return PVOP_CALL0(unsigned long, pv_cpu_ops.store_tr);
  674. }
  675. #define store_tr(tr) ((tr) = paravirt_store_tr())
  676. static inline void load_TLS(struct thread_struct *t, unsigned cpu)
  677. {
  678. PVOP_VCALL2(pv_cpu_ops.load_tls, t, cpu);
  679. }
  680. static inline void write_ldt_entry(struct desc_struct *dt, int entry,
  681. const void *desc)
  682. {
  683. PVOP_VCALL3(pv_cpu_ops.write_ldt_entry, dt, entry, desc);
  684. }
  685. static inline void write_gdt_entry(struct desc_struct *dt, int entry,
  686. void *desc, int type)
  687. {
  688. PVOP_VCALL4(pv_cpu_ops.write_gdt_entry, dt, entry, desc, type);
  689. }
  690. static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
  691. {
  692. PVOP_VCALL3(pv_cpu_ops.write_idt_entry, dt, entry, g);
  693. }
  694. static inline void set_iopl_mask(unsigned mask)
  695. {
  696. PVOP_VCALL1(pv_cpu_ops.set_iopl_mask, mask);
  697. }
  698. /* The paravirtualized I/O functions */
  699. static inline void slow_down_io(void)
  700. {
  701. pv_cpu_ops.io_delay();
  702. #ifdef REALLY_SLOW_IO
  703. pv_cpu_ops.io_delay();
  704. pv_cpu_ops.io_delay();
  705. pv_cpu_ops.io_delay();
  706. #endif
  707. }
  708. #ifdef CONFIG_X86_LOCAL_APIC
  709. /*
  710. * Basic functions accessing APICs.
  711. */
  712. static inline void apic_write(unsigned long reg, u32 v)
  713. {
  714. PVOP_VCALL2(pv_apic_ops.apic_write, reg, v);
  715. }
  716. static inline void apic_write_atomic(unsigned long reg, u32 v)
  717. {
  718. PVOP_VCALL2(pv_apic_ops.apic_write_atomic, reg, v);
  719. }
  720. static inline u32 apic_read(unsigned long reg)
  721. {
  722. return PVOP_CALL1(unsigned long, pv_apic_ops.apic_read, reg);
  723. }
  724. static inline void setup_boot_clock(void)
  725. {
  726. PVOP_VCALL0(pv_apic_ops.setup_boot_clock);
  727. }
  728. static inline void setup_secondary_clock(void)
  729. {
  730. PVOP_VCALL0(pv_apic_ops.setup_secondary_clock);
  731. }
  732. #endif
  733. static inline void paravirt_post_allocator_init(void)
  734. {
  735. if (pv_init_ops.post_allocator_init)
  736. (*pv_init_ops.post_allocator_init)();
  737. }
  738. static inline void paravirt_pagetable_setup_start(pgd_t *base)
  739. {
  740. (*pv_mmu_ops.pagetable_setup_start)(base);
  741. }
  742. static inline void paravirt_pagetable_setup_done(pgd_t *base)
  743. {
  744. (*pv_mmu_ops.pagetable_setup_done)(base);
  745. }
  746. #ifdef CONFIG_SMP
  747. static inline void startup_ipi_hook(int phys_apicid, unsigned long start_eip,
  748. unsigned long start_esp)
  749. {
  750. PVOP_VCALL3(pv_apic_ops.startup_ipi_hook,
  751. phys_apicid, start_eip, start_esp);
  752. }
  753. #endif
  754. static inline void paravirt_activate_mm(struct mm_struct *prev,
  755. struct mm_struct *next)
  756. {
  757. PVOP_VCALL2(pv_mmu_ops.activate_mm, prev, next);
  758. }
  759. static inline void arch_dup_mmap(struct mm_struct *oldmm,
  760. struct mm_struct *mm)
  761. {
  762. PVOP_VCALL2(pv_mmu_ops.dup_mmap, oldmm, mm);
  763. }
  764. static inline void arch_exit_mmap(struct mm_struct *mm)
  765. {
  766. PVOP_VCALL1(pv_mmu_ops.exit_mmap, mm);
  767. }
  768. static inline void __flush_tlb(void)
  769. {
  770. PVOP_VCALL0(pv_mmu_ops.flush_tlb_user);
  771. }
  772. static inline void __flush_tlb_global(void)
  773. {
  774. PVOP_VCALL0(pv_mmu_ops.flush_tlb_kernel);
  775. }
  776. static inline void __flush_tlb_single(unsigned long addr)
  777. {
  778. PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr);
  779. }
  780. static inline void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
  781. unsigned long va)
  782. {
  783. PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, &cpumask, mm, va);
  784. }
  785. static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned pfn)
  786. {
  787. PVOP_VCALL2(pv_mmu_ops.alloc_pte, mm, pfn);
  788. }
  789. static inline void paravirt_release_pte(unsigned pfn)
  790. {
  791. PVOP_VCALL1(pv_mmu_ops.release_pte, pfn);
  792. }
  793. static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned pfn)
  794. {
  795. PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn);
  796. }
  797. static inline void paravirt_alloc_pmd_clone(unsigned pfn, unsigned clonepfn,
  798. unsigned start, unsigned count)
  799. {
  800. PVOP_VCALL4(pv_mmu_ops.alloc_pmd_clone, pfn, clonepfn, start, count);
  801. }
  802. static inline void paravirt_release_pmd(unsigned pfn)
  803. {
  804. PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn);
  805. }
  806. static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned pfn)
  807. {
  808. PVOP_VCALL2(pv_mmu_ops.alloc_pud, mm, pfn);
  809. }
  810. static inline void paravirt_release_pud(unsigned pfn)
  811. {
  812. PVOP_VCALL1(pv_mmu_ops.release_pud, pfn);
  813. }
  814. #ifdef CONFIG_HIGHPTE
  815. static inline void *kmap_atomic_pte(struct page *page, enum km_type type)
  816. {
  817. unsigned long ret;
  818. ret = PVOP_CALL2(unsigned long, pv_mmu_ops.kmap_atomic_pte, page, type);
  819. return (void *)ret;
  820. }
  821. #endif
  822. static inline void pte_update(struct mm_struct *mm, unsigned long addr,
  823. pte_t *ptep)
  824. {
  825. PVOP_VCALL3(pv_mmu_ops.pte_update, mm, addr, ptep);
  826. }
  827. static inline void pte_update_defer(struct mm_struct *mm, unsigned long addr,
  828. pte_t *ptep)
  829. {
  830. PVOP_VCALL3(pv_mmu_ops.pte_update_defer, mm, addr, ptep);
  831. }
  832. static inline pte_t __pte(pteval_t val)
  833. {
  834. pteval_t ret;
  835. if (sizeof(pteval_t) > sizeof(long))
  836. ret = PVOP_CALL2(pteval_t,
  837. pv_mmu_ops.make_pte,
  838. val, (u64)val >> 32);
  839. else
  840. ret = PVOP_CALL1(pteval_t,
  841. pv_mmu_ops.make_pte,
  842. val);
  843. return (pte_t) { .pte = ret };
  844. }
  845. static inline pteval_t pte_val(pte_t pte)
  846. {
  847. pteval_t ret;
  848. if (sizeof(pteval_t) > sizeof(long))
  849. ret = PVOP_CALL2(pteval_t, pv_mmu_ops.pte_val,
  850. pte.pte, (u64)pte.pte >> 32);
  851. else
  852. ret = PVOP_CALL1(pteval_t, pv_mmu_ops.pte_val,
  853. pte.pte);
  854. return ret;
  855. }
  856. static inline pgd_t __pgd(pgdval_t val)
  857. {
  858. pgdval_t ret;
  859. if (sizeof(pgdval_t) > sizeof(long))
  860. ret = PVOP_CALL2(pgdval_t, pv_mmu_ops.make_pgd,
  861. val, (u64)val >> 32);
  862. else
  863. ret = PVOP_CALL1(pgdval_t, pv_mmu_ops.make_pgd,
  864. val);
  865. return (pgd_t) { ret };
  866. }
  867. static inline pgdval_t pgd_val(pgd_t pgd)
  868. {
  869. pgdval_t ret;
  870. if (sizeof(pgdval_t) > sizeof(long))
  871. ret = PVOP_CALL2(pgdval_t, pv_mmu_ops.pgd_val,
  872. pgd.pgd, (u64)pgd.pgd >> 32);
  873. else
  874. ret = PVOP_CALL1(pgdval_t, pv_mmu_ops.pgd_val,
  875. pgd.pgd);
  876. return ret;
  877. }
  878. static inline void set_pte(pte_t *ptep, pte_t pte)
  879. {
  880. if (sizeof(pteval_t) > sizeof(long))
  881. PVOP_VCALL3(pv_mmu_ops.set_pte, ptep,
  882. pte.pte, (u64)pte.pte >> 32);
  883. else
  884. PVOP_VCALL2(pv_mmu_ops.set_pte, ptep,
  885. pte.pte);
  886. }
  887. static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
  888. pte_t *ptep, pte_t pte)
  889. {
  890. if (sizeof(pteval_t) > sizeof(long))
  891. /* 5 arg words */
  892. pv_mmu_ops.set_pte_at(mm, addr, ptep, pte);
  893. else
  894. PVOP_VCALL4(pv_mmu_ops.set_pte_at, mm, addr, ptep, pte.pte);
  895. }
  896. static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
  897. {
  898. pmdval_t val = native_pmd_val(pmd);
  899. if (sizeof(pmdval_t) > sizeof(long))
  900. PVOP_VCALL3(pv_mmu_ops.set_pmd, pmdp, val, (u64)val >> 32);
  901. else
  902. PVOP_VCALL2(pv_mmu_ops.set_pmd, pmdp, val);
  903. }
  904. #if PAGETABLE_LEVELS >= 3
  905. static inline pmd_t __pmd(pmdval_t val)
  906. {
  907. pmdval_t ret;
  908. if (sizeof(pmdval_t) > sizeof(long))
  909. ret = PVOP_CALL2(pmdval_t, pv_mmu_ops.make_pmd,
  910. val, (u64)val >> 32);
  911. else
  912. ret = PVOP_CALL1(pmdval_t, pv_mmu_ops.make_pmd,
  913. val);
  914. return (pmd_t) { ret };
  915. }
  916. static inline pmdval_t pmd_val(pmd_t pmd)
  917. {
  918. pmdval_t ret;
  919. if (sizeof(pmdval_t) > sizeof(long))
  920. ret = PVOP_CALL2(pmdval_t, pv_mmu_ops.pmd_val,
  921. pmd.pmd, (u64)pmd.pmd >> 32);
  922. else
  923. ret = PVOP_CALL1(pmdval_t, pv_mmu_ops.pmd_val,
  924. pmd.pmd);
  925. return ret;
  926. }
  927. static inline void set_pud(pud_t *pudp, pud_t pud)
  928. {
  929. pudval_t val = native_pud_val(pud);
  930. if (sizeof(pudval_t) > sizeof(long))
  931. PVOP_VCALL3(pv_mmu_ops.set_pud, pudp,
  932. val, (u64)val >> 32);
  933. else
  934. PVOP_VCALL2(pv_mmu_ops.set_pud, pudp,
  935. val);
  936. }
  937. #if PAGETABLE_LEVELS == 4
  938. static inline pud_t __pud(pudval_t val)
  939. {
  940. pudval_t ret;
  941. if (sizeof(pudval_t) > sizeof(long))
  942. ret = PVOP_CALL2(pudval_t, pv_mmu_ops.make_pud,
  943. val, (u64)val >> 32);
  944. else
  945. ret = PVOP_CALL1(pudval_t, pv_mmu_ops.make_pud,
  946. val);
  947. return (pud_t) { ret };
  948. }
  949. static inline pudval_t pud_val(pud_t pud)
  950. {
  951. pudval_t ret;
  952. if (sizeof(pudval_t) > sizeof(long))
  953. ret = PVOP_CALL2(pudval_t, pv_mmu_ops.pud_val,
  954. pud.pud, (u64)pud.pud >> 32);
  955. else
  956. ret = PVOP_CALL1(pudval_t, pv_mmu_ops.pud_val,
  957. pud.pud);
  958. return ret;
  959. }
  960. static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
  961. {
  962. pgdval_t val = native_pgd_val(pgd);
  963. if (sizeof(pgdval_t) > sizeof(long))
  964. PVOP_VCALL3(pv_mmu_ops.set_pgd, pgdp,
  965. val, (u64)val >> 32);
  966. else
  967. PVOP_VCALL2(pv_mmu_ops.set_pgd, pgdp,
  968. val);
  969. }
  970. static inline void pgd_clear(pgd_t *pgdp)
  971. {
  972. set_pgd(pgdp, __pgd(0));
  973. }
  974. static inline void pud_clear(pud_t *pudp)
  975. {
  976. set_pud(pudp, __pud(0));
  977. }
  978. #endif /* PAGETABLE_LEVELS == 4 */
  979. #endif /* PAGETABLE_LEVELS >= 3 */
  980. #ifdef CONFIG_X86_PAE
  981. /* Special-case pte-setting operations for PAE, which can't update a
  982. 64-bit pte atomically */
  983. static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
  984. {
  985. PVOP_VCALL3(pv_mmu_ops.set_pte_atomic, ptep,
  986. pte.pte, pte.pte >> 32);
  987. }
  988. static inline void set_pte_present(struct mm_struct *mm, unsigned long addr,
  989. pte_t *ptep, pte_t pte)
  990. {
  991. /* 5 arg words */
  992. pv_mmu_ops.set_pte_present(mm, addr, ptep, pte);
  993. }
  994. static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
  995. pte_t *ptep)
  996. {
  997. PVOP_VCALL3(pv_mmu_ops.pte_clear, mm, addr, ptep);
  998. }
  999. static inline void pmd_clear(pmd_t *pmdp)
  1000. {
  1001. PVOP_VCALL1(pv_mmu_ops.pmd_clear, pmdp);
  1002. }
  1003. #else /* !CONFIG_X86_PAE */
  1004. static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
  1005. {
  1006. set_pte(ptep, pte);
  1007. }
  1008. static inline void set_pte_present(struct mm_struct *mm, unsigned long addr,
  1009. pte_t *ptep, pte_t pte)
  1010. {
  1011. set_pte(ptep, pte);
  1012. }
  1013. static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
  1014. pte_t *ptep)
  1015. {
  1016. set_pte_at(mm, addr, ptep, __pte(0));
  1017. }
  1018. static inline void pmd_clear(pmd_t *pmdp)
  1019. {
  1020. set_pmd(pmdp, __pmd(0));
  1021. }
  1022. #endif /* CONFIG_X86_PAE */
  1023. /* Lazy mode for batching updates / context switch */
  1024. enum paravirt_lazy_mode {
  1025. PARAVIRT_LAZY_NONE,
  1026. PARAVIRT_LAZY_MMU,
  1027. PARAVIRT_LAZY_CPU,
  1028. };
  1029. enum paravirt_lazy_mode paravirt_get_lazy_mode(void);
  1030. void paravirt_enter_lazy_cpu(void);
  1031. void paravirt_leave_lazy_cpu(void);
  1032. void paravirt_enter_lazy_mmu(void);
  1033. void paravirt_leave_lazy_mmu(void);
  1034. void paravirt_leave_lazy(enum paravirt_lazy_mode mode);
  1035. #define __HAVE_ARCH_ENTER_LAZY_CPU_MODE
  1036. static inline void arch_enter_lazy_cpu_mode(void)
  1037. {
  1038. PVOP_VCALL0(pv_cpu_ops.lazy_mode.enter);
  1039. }
  1040. static inline void arch_leave_lazy_cpu_mode(void)
  1041. {
  1042. PVOP_VCALL0(pv_cpu_ops.lazy_mode.leave);
  1043. }
  1044. static inline void arch_flush_lazy_cpu_mode(void)
  1045. {
  1046. if (unlikely(paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU)) {
  1047. arch_leave_lazy_cpu_mode();
  1048. arch_enter_lazy_cpu_mode();
  1049. }
  1050. }
  1051. #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
  1052. static inline void arch_enter_lazy_mmu_mode(void)
  1053. {
  1054. PVOP_VCALL0(pv_mmu_ops.lazy_mode.enter);
  1055. }
  1056. static inline void arch_leave_lazy_mmu_mode(void)
  1057. {
  1058. PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
  1059. }
  1060. static inline void arch_flush_lazy_mmu_mode(void)
  1061. {
  1062. if (unlikely(paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU)) {
  1063. arch_leave_lazy_mmu_mode();
  1064. arch_enter_lazy_mmu_mode();
  1065. }
  1066. }
  1067. void _paravirt_nop(void);
  1068. #define paravirt_nop ((void *)_paravirt_nop)
  1069. /* These all sit in the .parainstructions section to tell us what to patch. */
  1070. struct paravirt_patch_site {
  1071. u8 *instr; /* original instructions */
  1072. u8 instrtype; /* type of this instruction */
  1073. u8 len; /* length of original instruction */
  1074. u16 clobbers; /* what registers you may clobber */
  1075. };
  1076. extern struct paravirt_patch_site __parainstructions[],
  1077. __parainstructions_end[];
  1078. #ifdef CONFIG_X86_32
  1079. #define PV_SAVE_REGS "pushl %%ecx; pushl %%edx;"
  1080. #define PV_RESTORE_REGS "popl %%edx; popl %%ecx"
  1081. #define PV_FLAGS_ARG "0"
  1082. #define PV_EXTRA_CLOBBERS
  1083. #define PV_VEXTRA_CLOBBERS
  1084. #else
  1085. /* We save some registers, but all of them, that's too much. We clobber all
  1086. * caller saved registers but the argument parameter */
  1087. #define PV_SAVE_REGS "pushq %%rdi;"
  1088. #define PV_RESTORE_REGS "popq %%rdi;"
  1089. #define PV_EXTRA_CLOBBERS EXTRA_CLOBBERS, "rcx" , "rdx"
  1090. #define PV_VEXTRA_CLOBBERS EXTRA_CLOBBERS, "rdi", "rcx" , "rdx"
  1091. #define PV_FLAGS_ARG "D"
  1092. #endif
  1093. static inline unsigned long __raw_local_save_flags(void)
  1094. {
  1095. unsigned long f;
  1096. asm volatile(paravirt_alt(PV_SAVE_REGS
  1097. PARAVIRT_CALL
  1098. PV_RESTORE_REGS)
  1099. : "=a"(f)
  1100. : paravirt_type(pv_irq_ops.save_fl),
  1101. paravirt_clobber(CLBR_EAX)
  1102. : "memory", "cc" PV_VEXTRA_CLOBBERS);
  1103. return f;
  1104. }
  1105. static inline void raw_local_irq_restore(unsigned long f)
  1106. {
  1107. asm volatile(paravirt_alt(PV_SAVE_REGS
  1108. PARAVIRT_CALL
  1109. PV_RESTORE_REGS)
  1110. : "=a"(f)
  1111. : PV_FLAGS_ARG(f),
  1112. paravirt_type(pv_irq_ops.restore_fl),
  1113. paravirt_clobber(CLBR_EAX)
  1114. : "memory", "cc" PV_EXTRA_CLOBBERS);
  1115. }
  1116. static inline void raw_local_irq_disable(void)
  1117. {
  1118. asm volatile(paravirt_alt(PV_SAVE_REGS
  1119. PARAVIRT_CALL
  1120. PV_RESTORE_REGS)
  1121. :
  1122. : paravirt_type(pv_irq_ops.irq_disable),
  1123. paravirt_clobber(CLBR_EAX)
  1124. : "memory", "eax", "cc" PV_EXTRA_CLOBBERS);
  1125. }
  1126. static inline void raw_local_irq_enable(void)
  1127. {
  1128. asm volatile(paravirt_alt(PV_SAVE_REGS
  1129. PARAVIRT_CALL
  1130. PV_RESTORE_REGS)
  1131. :
  1132. : paravirt_type(pv_irq_ops.irq_enable),
  1133. paravirt_clobber(CLBR_EAX)
  1134. : "memory", "eax", "cc" PV_EXTRA_CLOBBERS);
  1135. }
  1136. static inline unsigned long __raw_local_irq_save(void)
  1137. {
  1138. unsigned long f;
  1139. f = __raw_local_save_flags();
  1140. raw_local_irq_disable();
  1141. return f;
  1142. }
  1143. /* Make sure as little as possible of this mess escapes. */
  1144. #undef PARAVIRT_CALL
  1145. #undef __PVOP_CALL
  1146. #undef __PVOP_VCALL
  1147. #undef PVOP_VCALL0
  1148. #undef PVOP_CALL0
  1149. #undef PVOP_VCALL1
  1150. #undef PVOP_CALL1
  1151. #undef PVOP_VCALL2
  1152. #undef PVOP_CALL2
  1153. #undef PVOP_VCALL3
  1154. #undef PVOP_CALL3
  1155. #undef PVOP_VCALL4
  1156. #undef PVOP_CALL4
  1157. #else /* __ASSEMBLY__ */
  1158. #define _PVSITE(ptype, clobbers, ops, word, algn) \
  1159. 771:; \
  1160. ops; \
  1161. 772:; \
  1162. .pushsection .parainstructions,"a"; \
  1163. .align algn; \
  1164. word 771b; \
  1165. .byte ptype; \
  1166. .byte 772b-771b; \
  1167. .short clobbers; \
  1168. .popsection
  1169. #ifdef CONFIG_X86_64
  1170. #define PV_SAVE_REGS pushq %rax; pushq %rdi; pushq %rcx; pushq %rdx
  1171. #define PV_RESTORE_REGS popq %rdx; popq %rcx; popq %rdi; popq %rax
  1172. #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 8)
  1173. #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8)
  1174. #else
  1175. #define PV_SAVE_REGS pushl %eax; pushl %edi; pushl %ecx; pushl %edx
  1176. #define PV_RESTORE_REGS popl %edx; popl %ecx; popl %edi; popl %eax
  1177. #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
  1178. #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
  1179. #endif
  1180. #define INTERRUPT_RETURN \
  1181. PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE, \
  1182. jmp *%cs:pv_cpu_ops+PV_CPU_iret)
  1183. #define DISABLE_INTERRUPTS(clobbers) \
  1184. PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
  1185. PV_SAVE_REGS; \
  1186. call *%cs:pv_irq_ops+PV_IRQ_irq_disable; \
  1187. PV_RESTORE_REGS;) \
  1188. #define ENABLE_INTERRUPTS(clobbers) \
  1189. PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers, \
  1190. PV_SAVE_REGS; \
  1191. call *%cs:pv_irq_ops+PV_IRQ_irq_enable; \
  1192. PV_RESTORE_REGS;)
  1193. #define ENABLE_INTERRUPTS_SYSCALL_RET \
  1194. PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_syscall_ret),\
  1195. CLBR_NONE, \
  1196. jmp *%cs:pv_cpu_ops+PV_CPU_irq_enable_syscall_ret)
  1197. #ifdef CONFIG_X86_32
  1198. #define GET_CR0_INTO_EAX \
  1199. push %ecx; push %edx; \
  1200. call *pv_cpu_ops+PV_CPU_read_cr0; \
  1201. pop %edx; pop %ecx
  1202. #else
  1203. #define SWAPGS \
  1204. PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
  1205. PV_SAVE_REGS; \
  1206. call *pv_cpu_ops+PV_CPU_swapgs; \
  1207. PV_RESTORE_REGS \
  1208. )
  1209. #define GET_CR2_INTO_RCX \
  1210. call *pv_mmu_ops+PV_MMU_read_cr2; \
  1211. movq %rax, %rcx; \
  1212. xorq %rax, %rax;
  1213. #endif
  1214. #endif /* __ASSEMBLY__ */
  1215. #endif /* CONFIG_PARAVIRT */
  1216. #endif /* __ASM_PARAVIRT_H */