paravirt.h 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083
  1. #ifndef __ASM_PARAVIRT_H
  2. #define __ASM_PARAVIRT_H
  3. /* Various instructions on x86 need to be replaced for
  4. * para-virtualization: those hooks are defined here. */
  5. #ifdef CONFIG_PARAVIRT
  6. #include <asm/page.h>
  7. /* Bitmask of what can be clobbered: usually at least eax. */
  8. #define CLBR_NONE 0x0
  9. #define CLBR_EAX 0x1
  10. #define CLBR_ECX 0x2
  11. #define CLBR_EDX 0x4
  12. #define CLBR_ANY 0x7
  13. #ifndef __ASSEMBLY__
  14. #include <linux/types.h>
  15. struct thread_struct;
  16. struct Xgt_desc_struct;
  17. struct tss_struct;
  18. struct mm_struct;
  19. struct desc_struct;
  20. struct paravirt_ops
  21. {
  22. unsigned int kernel_rpl;
  23. int shared_kernel_pmd;
  24. int paravirt_enabled;
  25. const char *name;
  26. /*
  27. * Patch may replace one of the defined code sequences with arbitrary
  28. * code, subject to the same register constraints. This generally
  29. * means the code is not free to clobber any registers other than EAX.
  30. * The patch function should return the number of bytes of code
  31. * generated, as we nop pad the rest in generic code.
  32. */
  33. unsigned (*patch)(u8 type, u16 clobber, void *firstinsn, unsigned len);
  34. void (*arch_setup)(void);
  35. char *(*memory_setup)(void);
  36. void (*init_IRQ)(void);
  37. void (*pagetable_setup_start)(pgd_t *pgd_base);
  38. void (*pagetable_setup_done)(pgd_t *pgd_base);
  39. void (*banner)(void);
  40. unsigned long (*get_wallclock)(void);
  41. int (*set_wallclock)(unsigned long);
  42. void (*time_init)(void);
  43. void (*cpuid)(unsigned int *eax, unsigned int *ebx,
  44. unsigned int *ecx, unsigned int *edx);
  45. unsigned long (*get_debugreg)(int regno);
  46. void (*set_debugreg)(int regno, unsigned long value);
  47. void (*clts)(void);
  48. unsigned long (*read_cr0)(void);
  49. void (*write_cr0)(unsigned long);
  50. unsigned long (*read_cr2)(void);
  51. void (*write_cr2)(unsigned long);
  52. unsigned long (*read_cr3)(void);
  53. void (*write_cr3)(unsigned long);
  54. unsigned long (*read_cr4_safe)(void);
  55. unsigned long (*read_cr4)(void);
  56. void (*write_cr4)(unsigned long);
  57. unsigned long (*save_fl)(void);
  58. void (*restore_fl)(unsigned long);
  59. void (*irq_disable)(void);
  60. void (*irq_enable)(void);
  61. void (*safe_halt)(void);
  62. void (*halt)(void);
  63. void (*wbinvd)(void);
  64. /* err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */
  65. u64 (*read_msr)(unsigned int msr, int *err);
  66. int (*write_msr)(unsigned int msr, u64 val);
  67. u64 (*read_tsc)(void);
  68. u64 (*read_pmc)(void);
  69. u64 (*get_scheduled_cycles)(void);
  70. unsigned long (*get_cpu_khz)(void);
  71. void (*load_tr_desc)(void);
  72. void (*load_gdt)(const struct Xgt_desc_struct *);
  73. void (*load_idt)(const struct Xgt_desc_struct *);
  74. void (*store_gdt)(struct Xgt_desc_struct *);
  75. void (*store_idt)(struct Xgt_desc_struct *);
  76. void (*set_ldt)(const void *desc, unsigned entries);
  77. unsigned long (*store_tr)(void);
  78. void (*load_tls)(struct thread_struct *t, unsigned int cpu);
  79. void (*write_ldt_entry)(struct desc_struct *,
  80. int entrynum, u32 low, u32 high);
  81. void (*write_gdt_entry)(struct desc_struct *,
  82. int entrynum, u32 low, u32 high);
  83. void (*write_idt_entry)(struct desc_struct *,
  84. int entrynum, u32 low, u32 high);
  85. void (*load_esp0)(struct tss_struct *tss, struct thread_struct *t);
  86. void (*set_iopl_mask)(unsigned mask);
  87. void (*io_delay)(void);
  88. void (*activate_mm)(struct mm_struct *prev,
  89. struct mm_struct *next);
  90. void (*dup_mmap)(struct mm_struct *oldmm,
  91. struct mm_struct *mm);
  92. void (*exit_mmap)(struct mm_struct *mm);
  93. #ifdef CONFIG_X86_LOCAL_APIC
  94. void (*apic_write)(unsigned long reg, unsigned long v);
  95. void (*apic_write_atomic)(unsigned long reg, unsigned long v);
  96. unsigned long (*apic_read)(unsigned long reg);
  97. void (*setup_boot_clock)(void);
  98. void (*setup_secondary_clock)(void);
  99. #endif
  100. void (*flush_tlb_user)(void);
  101. void (*flush_tlb_kernel)(void);
  102. void (*flush_tlb_single)(unsigned long addr);
  103. void (*map_pt_hook)(int type, pte_t *va, u32 pfn);
  104. void (*alloc_pt)(u32 pfn);
  105. void (*alloc_pd)(u32 pfn);
  106. void (*alloc_pd_clone)(u32 pfn, u32 clonepfn, u32 start, u32 count);
  107. void (*release_pt)(u32 pfn);
  108. void (*release_pd)(u32 pfn);
  109. void (*set_pte)(pte_t *ptep, pte_t pteval);
  110. void (*set_pte_at)(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pteval);
  111. void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval);
  112. void (*pte_update)(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
  113. void (*pte_update_defer)(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
  114. pte_t (*ptep_get_and_clear)(pte_t *ptep);
  115. #ifdef CONFIG_X86_PAE
  116. void (*set_pte_atomic)(pte_t *ptep, pte_t pteval);
  117. void (*set_pte_present)(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte);
  118. void (*set_pud)(pud_t *pudp, pud_t pudval);
  119. void (*pte_clear)(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
  120. void (*pmd_clear)(pmd_t *pmdp);
  121. unsigned long long (*pte_val)(pte_t);
  122. unsigned long long (*pmd_val)(pmd_t);
  123. unsigned long long (*pgd_val)(pgd_t);
  124. pte_t (*make_pte)(unsigned long long pte);
  125. pmd_t (*make_pmd)(unsigned long long pmd);
  126. pgd_t (*make_pgd)(unsigned long long pgd);
  127. #else
  128. unsigned long (*pte_val)(pte_t);
  129. unsigned long (*pgd_val)(pgd_t);
  130. pte_t (*make_pte)(unsigned long pte);
  131. pgd_t (*make_pgd)(unsigned long pgd);
  132. #endif
  133. void (*set_lazy_mode)(int mode);
  134. /* These two are jmp to, not actually called. */
  135. void (*irq_enable_sysexit)(void);
  136. void (*iret)(void);
  137. void (*startup_ipi_hook)(int phys_apicid, unsigned long start_eip, unsigned long start_esp);
  138. };
  139. /* Mark a paravirt probe function. */
  140. #define paravirt_probe(fn) \
  141. static asmlinkage void (*__paravirtprobe_##fn)(void) __attribute_used__ \
  142. __attribute__((__section__(".paravirtprobe"))) = fn
  143. extern struct paravirt_ops paravirt_ops;
  144. #define PARAVIRT_PATCH(x) \
  145. (offsetof(struct paravirt_ops, x) / sizeof(void *))
  146. #define paravirt_type(type) \
  147. [paravirt_typenum] "i" (PARAVIRT_PATCH(type))
  148. #define paravirt_clobber(clobber) \
  149. [paravirt_clobber] "i" (clobber)
  150. #define PARAVIRT_CALL "call *(paravirt_ops+%c[paravirt_typenum]*4);"
  151. #define _paravirt_alt(insn_string, type, clobber) \
  152. "771:\n\t" insn_string "\n" "772:\n" \
  153. ".pushsection .parainstructions,\"a\"\n" \
  154. " .long 771b\n" \
  155. " .byte " type "\n" \
  156. " .byte 772b-771b\n" \
  157. " .short " clobber "\n" \
  158. ".popsection\n"
  159. #define paravirt_alt(insn_string) \
  160. _paravirt_alt(insn_string, "%c[paravirt_typenum]", "%c[paravirt_clobber]")
  161. #define PVOP_CALL0(__rettype, __op) \
  162. ({ \
  163. __rettype __ret; \
  164. if (sizeof(__rettype) > sizeof(unsigned long)) { \
  165. unsigned long long __tmp; \
  166. unsigned long __ecx; \
  167. asm volatile(paravirt_alt(PARAVIRT_CALL) \
  168. : "=A" (__tmp), "=c" (__ecx) \
  169. : paravirt_type(__op), \
  170. paravirt_clobber(CLBR_ANY) \
  171. : "memory", "cc"); \
  172. __ret = (__rettype)__tmp; \
  173. } else { \
  174. unsigned long __tmp, __edx, __ecx; \
  175. asm volatile(paravirt_alt(PARAVIRT_CALL) \
  176. : "=a" (__tmp), "=d" (__edx), \
  177. "=c" (__ecx) \
  178. : paravirt_type(__op), \
  179. paravirt_clobber(CLBR_ANY) \
  180. : "memory", "cc"); \
  181. __ret = (__rettype)__tmp; \
  182. } \
  183. __ret; \
  184. })
  185. #define PVOP_VCALL0(__op) \
  186. ({ \
  187. unsigned long __eax, __edx, __ecx; \
  188. asm volatile(paravirt_alt(PARAVIRT_CALL) \
  189. : "=a" (__eax), "=d" (__edx), "=c" (__ecx) \
  190. : paravirt_type(__op), \
  191. paravirt_clobber(CLBR_ANY) \
  192. : "memory", "cc"); \
  193. })
  194. #define PVOP_CALL1(__rettype, __op, arg1) \
  195. ({ \
  196. __rettype __ret; \
  197. if (sizeof(__rettype) > sizeof(unsigned long)) { \
  198. unsigned long long __tmp; \
  199. unsigned long __ecx; \
  200. asm volatile(paravirt_alt(PARAVIRT_CALL) \
  201. : "=A" (__tmp), "=c" (__ecx) \
  202. : "a" ((u32)(arg1)), \
  203. paravirt_type(__op), \
  204. paravirt_clobber(CLBR_ANY) \
  205. : "memory", "cc"); \
  206. __ret = (__rettype)__tmp; \
  207. } else { \
  208. unsigned long __tmp, __edx, __ecx; \
  209. asm volatile(paravirt_alt(PARAVIRT_CALL) \
  210. : "=a" (__tmp), "=d" (__edx), \
  211. "=c" (__ecx) \
  212. : "0" ((u32)(arg1)), \
  213. paravirt_type(__op), \
  214. paravirt_clobber(CLBR_ANY) \
  215. : "memory", "cc"); \
  216. __ret = (__rettype)__tmp; \
  217. } \
  218. __ret; \
  219. })
  220. #define PVOP_VCALL1(__op, arg1) \
  221. ({ \
  222. unsigned long __eax, __edx, __ecx; \
  223. asm volatile(paravirt_alt(PARAVIRT_CALL) \
  224. : "=a" (__eax), "=d" (__edx), "=c" (__ecx) \
  225. : "0" ((u32)(arg1)), \
  226. paravirt_type(__op), \
  227. paravirt_clobber(CLBR_ANY) \
  228. : "memory", "cc"); \
  229. })
  230. #define PVOP_CALL2(__rettype, __op, arg1, arg2) \
  231. ({ \
  232. __rettype __ret; \
  233. if (sizeof(__rettype) > sizeof(unsigned long)) { \
  234. unsigned long long __tmp; \
  235. unsigned long __ecx; \
  236. asm volatile(paravirt_alt(PARAVIRT_CALL) \
  237. : "=A" (__tmp), "=c" (__ecx) \
  238. : "a" ((u32)(arg1)), \
  239. "d" ((u32)(arg2)), \
  240. paravirt_type(__op), \
  241. paravirt_clobber(CLBR_ANY) \
  242. : "memory", "cc"); \
  243. __ret = (__rettype)__tmp; \
  244. } else { \
  245. unsigned long __tmp, __edx, __ecx; \
  246. asm volatile(paravirt_alt(PARAVIRT_CALL) \
  247. : "=a" (__tmp), "=d" (__edx), \
  248. "=c" (__ecx) \
  249. : "0" ((u32)(arg1)), \
  250. "1" ((u32)(arg2)), \
  251. paravirt_type(__op), \
  252. paravirt_clobber(CLBR_ANY) \
  253. : "memory", "cc"); \
  254. __ret = (__rettype)__tmp; \
  255. } \
  256. __ret; \
  257. })
  258. #define PVOP_VCALL2(__op, arg1, arg2) \
  259. ({ \
  260. unsigned long __eax, __edx, __ecx; \
  261. asm volatile(paravirt_alt(PARAVIRT_CALL) \
  262. : "=a" (__eax), "=d" (__edx), "=c" (__ecx) \
  263. : "0" ((u32)(arg1)), \
  264. "1" ((u32)(arg2)), \
  265. paravirt_type(__op), \
  266. paravirt_clobber(CLBR_ANY) \
  267. : "memory", "cc"); \
  268. })
  269. #define PVOP_CALL3(__rettype, __op, arg1, arg2, arg3) \
  270. ({ \
  271. __rettype __ret; \
  272. if (sizeof(__rettype) > sizeof(unsigned long)) { \
  273. unsigned long long __tmp; \
  274. unsigned long __ecx; \
  275. asm volatile(paravirt_alt(PARAVIRT_CALL) \
  276. : "=A" (__tmp), "=c" (__ecx) \
  277. : "a" ((u32)(arg1)), \
  278. "d" ((u32)(arg2)), \
  279. "1" ((u32)(arg3)), \
  280. paravirt_type(__op), \
  281. paravirt_clobber(CLBR_ANY) \
  282. : "memory", "cc"); \
  283. __ret = (__rettype)__tmp; \
  284. } else { \
  285. unsigned long __tmp, __edx, __ecx; \
  286. asm volatile(paravirt_alt(PARAVIRT_CALL) \
  287. : "=a" (__tmp), "=d" (__edx), \
  288. "=c" (__ecx) \
  289. : "0" ((u32)(arg1)), \
  290. "1" ((u32)(arg2)), \
  291. "2" ((u32)(arg3)), \
  292. paravirt_type(__op), \
  293. paravirt_clobber(CLBR_ANY) \
  294. : "memory", "cc"); \
  295. __ret = (__rettype)__tmp; \
  296. } \
  297. __ret; \
  298. })
  299. #define PVOP_VCALL3(__op, arg1, arg2, arg3) \
  300. ({ \
  301. unsigned long __eax, __edx, __ecx; \
  302. asm volatile(paravirt_alt(PARAVIRT_CALL) \
  303. : "=a" (__eax), "=d" (__edx), "=c" (__ecx) \
  304. : "0" ((u32)(arg1)), \
  305. "1" ((u32)(arg2)), \
  306. "2" ((u32)(arg3)), \
  307. paravirt_type(__op), \
  308. paravirt_clobber(CLBR_ANY) \
  309. : "memory", "cc"); \
  310. })
  311. #define PVOP_CALL4(__rettype, __op, arg1, arg2, arg3, arg4) \
  312. ({ \
  313. __rettype __ret; \
  314. if (sizeof(__rettype) > sizeof(unsigned long)) { \
  315. unsigned long long __tmp; \
  316. unsigned long __ecx; \
  317. asm volatile("push %[_arg4]; " \
  318. paravirt_alt(PARAVIRT_CALL) \
  319. "lea 4(%%esp),%%esp" \
  320. : "=A" (__tmp), "=c" (__ecx) \
  321. : "a" ((u32)(arg1)), \
  322. "d" ((u32)(arg2)), \
  323. "1" ((u32)(arg3)), \
  324. [_arg4] "mr" ((u32)(arg4)), \
  325. paravirt_type(__op), \
  326. paravirt_clobber(CLBR_ANY) \
  327. : "memory", "cc",); \
  328. __ret = (__rettype)__tmp; \
  329. } else { \
  330. unsigned long __tmp, __edx, __ecx; \
  331. asm volatile("push %[_arg4]; " \
  332. paravirt_alt(PARAVIRT_CALL) \
  333. "lea 4(%%esp),%%esp" \
  334. : "=a" (__tmp), "=d" (__edx), "=c" (__ecx) \
  335. : "0" ((u32)(arg1)), \
  336. "1" ((u32)(arg2)), \
  337. "2" ((u32)(arg3)), \
  338. [_arg4]"mr" ((u32)(arg4)), \
  339. paravirt_type(__op), \
  340. paravirt_clobber(CLBR_ANY) \
  341. : "memory", "cc"); \
  342. __ret = (__rettype)__tmp; \
  343. } \
  344. __ret; \
  345. })
  346. #define PVOP_VCALL4(__op, arg1, arg2, arg3, arg4) \
  347. ({ \
  348. unsigned long __eax, __edx, __ecx; \
  349. asm volatile("push %[_arg4]; " \
  350. paravirt_alt(PARAVIRT_CALL) \
  351. "lea 4(%%esp),%%esp" \
  352. : "=a" (__eax), "=d" (__edx), "=c" (__ecx) \
  353. : "0" ((u32)(arg1)), \
  354. "1" ((u32)(arg2)), \
  355. "2" ((u32)(arg3)), \
  356. [_arg4]"mr" ((u32)(arg4)), \
  357. paravirt_type(__op), \
  358. paravirt_clobber(CLBR_ANY) \
  359. : "memory", "cc"); \
  360. })
  361. static inline int paravirt_enabled(void)
  362. {
  363. return paravirt_ops.paravirt_enabled;
  364. }
  365. static inline void load_esp0(struct tss_struct *tss,
  366. struct thread_struct *thread)
  367. {
  368. PVOP_VCALL2(load_esp0, tss, thread);
  369. }
  370. #define ARCH_SETUP paravirt_ops.arch_setup();
  371. static inline unsigned long get_wallclock(void)
  372. {
  373. return PVOP_CALL0(unsigned long, get_wallclock);
  374. }
  375. static inline int set_wallclock(unsigned long nowtime)
  376. {
  377. return PVOP_CALL1(int, set_wallclock, nowtime);
  378. }
  379. static inline void (*choose_time_init(void))(void)
  380. {
  381. return paravirt_ops.time_init;
  382. }
  383. /* The paravirtualized CPUID instruction. */
  384. static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
  385. unsigned int *ecx, unsigned int *edx)
  386. {
  387. PVOP_VCALL4(cpuid, eax, ebx, ecx, edx);
  388. }
  389. /*
  390. * These special macros can be used to get or set a debugging register
  391. */
  392. static inline unsigned long paravirt_get_debugreg(int reg)
  393. {
  394. return PVOP_CALL1(unsigned long, get_debugreg, reg);
  395. }
  396. #define get_debugreg(var, reg) var = paravirt_get_debugreg(reg)
  397. static inline void set_debugreg(unsigned long val, int reg)
  398. {
  399. PVOP_VCALL2(set_debugreg, reg, val);
  400. }
  401. static inline void clts(void)
  402. {
  403. PVOP_VCALL0(clts);
  404. }
  405. static inline unsigned long read_cr0(void)
  406. {
  407. return PVOP_CALL0(unsigned long, read_cr0);
  408. }
  409. static inline void write_cr0(unsigned long x)
  410. {
  411. PVOP_VCALL1(write_cr0, x);
  412. }
  413. static inline unsigned long read_cr2(void)
  414. {
  415. return PVOP_CALL0(unsigned long, read_cr2);
  416. }
  417. static inline void write_cr2(unsigned long x)
  418. {
  419. PVOP_VCALL1(write_cr2, x);
  420. }
  421. static inline unsigned long read_cr3(void)
  422. {
  423. return PVOP_CALL0(unsigned long, read_cr3);
  424. }
  425. static inline void write_cr3(unsigned long x)
  426. {
  427. PVOP_VCALL1(write_cr3, x);
  428. }
  429. static inline unsigned long read_cr4(void)
  430. {
  431. return PVOP_CALL0(unsigned long, read_cr4);
  432. }
  433. static inline unsigned long read_cr4_safe(void)
  434. {
  435. return PVOP_CALL0(unsigned long, read_cr4_safe);
  436. }
  437. static inline void write_cr4(unsigned long x)
  438. {
  439. PVOP_VCALL1(write_cr4, x);
  440. }
  441. static inline void raw_safe_halt(void)
  442. {
  443. PVOP_VCALL0(safe_halt);
  444. }
  445. static inline void halt(void)
  446. {
  447. PVOP_VCALL0(safe_halt);
  448. }
  449. static inline void wbinvd(void)
  450. {
  451. PVOP_VCALL0(wbinvd);
  452. }
  453. #define get_kernel_rpl() (paravirt_ops.kernel_rpl)
  454. static inline u64 paravirt_read_msr(unsigned msr, int *err)
  455. {
  456. return PVOP_CALL2(u64, read_msr, msr, err);
  457. }
  458. static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high)
  459. {
  460. return PVOP_CALL3(int, write_msr, msr, low, high);
  461. }
  462. /* These should all do BUG_ON(_err), but our headers are too tangled. */
  463. #define rdmsr(msr,val1,val2) do { \
  464. int _err; \
  465. u64 _l = paravirt_read_msr(msr, &_err); \
  466. val1 = (u32)_l; \
  467. val2 = _l >> 32; \
  468. } while(0)
  469. #define wrmsr(msr,val1,val2) do { \
  470. paravirt_write_msr(msr, val1, val2); \
  471. } while(0)
  472. #define rdmsrl(msr,val) do { \
  473. int _err; \
  474. val = paravirt_read_msr(msr, &_err); \
  475. } while(0)
  476. #define wrmsrl(msr,val) ((void)paravirt_write_msr(msr, val, 0))
  477. #define wrmsr_safe(msr,a,b) paravirt_write_msr(msr, a, b)
  478. /* rdmsr with exception handling */
  479. #define rdmsr_safe(msr,a,b) ({ \
  480. int _err; \
  481. u64 _l = paravirt_read_msr(msr, &_err); \
  482. (*a) = (u32)_l; \
  483. (*b) = _l >> 32; \
  484. _err; })
  485. static inline u64 paravirt_read_tsc(void)
  486. {
  487. return PVOP_CALL0(u64, read_tsc);
  488. }
  489. #define rdtsc(low,high) do { \
  490. u64 _l = paravirt_read_tsc(); \
  491. low = (u32)_l; \
  492. high = _l >> 32; \
  493. } while(0)
  494. #define rdtscl(low) do { \
  495. u64 _l = paravirt_read_tsc(); \
  496. low = (int)_l; \
  497. } while(0)
  498. #define rdtscll(val) (val = paravirt_read_tsc())
  499. #define get_scheduled_cycles(val) (val = paravirt_ops.get_scheduled_cycles())
  500. #define calculate_cpu_khz() (paravirt_ops.get_cpu_khz())
  501. #define write_tsc(val1,val2) wrmsr(0x10, val1, val2)
  502. static inline unsigned long long paravirt_read_pmc(int counter)
  503. {
  504. return PVOP_CALL1(u64, read_pmc, counter);
  505. }
  506. #define rdpmc(counter,low,high) do { \
  507. u64 _l = paravirt_read_pmc(counter); \
  508. low = (u32)_l; \
  509. high = _l >> 32; \
  510. } while(0)
  511. static inline void load_TR_desc(void)
  512. {
  513. PVOP_VCALL0(load_tr_desc);
  514. }
  515. static inline void load_gdt(const struct Xgt_desc_struct *dtr)
  516. {
  517. PVOP_VCALL1(load_gdt, dtr);
  518. }
  519. static inline void load_idt(const struct Xgt_desc_struct *dtr)
  520. {
  521. PVOP_VCALL1(load_idt, dtr);
  522. }
  523. static inline void set_ldt(const void *addr, unsigned entries)
  524. {
  525. PVOP_VCALL2(set_ldt, addr, entries);
  526. }
  527. static inline void store_gdt(struct Xgt_desc_struct *dtr)
  528. {
  529. PVOP_VCALL1(store_gdt, dtr);
  530. }
  531. static inline void store_idt(struct Xgt_desc_struct *dtr)
  532. {
  533. PVOP_VCALL1(store_idt, dtr);
  534. }
  535. static inline unsigned long paravirt_store_tr(void)
  536. {
  537. return PVOP_CALL0(unsigned long, store_tr);
  538. }
  539. #define store_tr(tr) ((tr) = paravirt_store_tr())
  540. static inline void load_TLS(struct thread_struct *t, unsigned cpu)
  541. {
  542. PVOP_VCALL2(load_tls, t, cpu);
  543. }
  544. static inline void write_ldt_entry(void *dt, int entry, u32 low, u32 high)
  545. {
  546. PVOP_VCALL4(write_ldt_entry, dt, entry, low, high);
  547. }
  548. static inline void write_gdt_entry(void *dt, int entry, u32 low, u32 high)
  549. {
  550. PVOP_VCALL4(write_gdt_entry, dt, entry, low, high);
  551. }
  552. static inline void write_idt_entry(void *dt, int entry, u32 low, u32 high)
  553. {
  554. PVOP_VCALL4(write_idt_entry, dt, entry, low, high);
  555. }
  556. static inline void set_iopl_mask(unsigned mask)
  557. {
  558. PVOP_VCALL1(set_iopl_mask, mask);
  559. }
  560. /* The paravirtualized I/O functions */
  561. static inline void slow_down_io(void) {
  562. paravirt_ops.io_delay();
  563. #ifdef REALLY_SLOW_IO
  564. paravirt_ops.io_delay();
  565. paravirt_ops.io_delay();
  566. paravirt_ops.io_delay();
  567. #endif
  568. }
  569. #ifdef CONFIG_X86_LOCAL_APIC
  570. /*
  571. * Basic functions accessing APICs.
  572. */
  573. static inline void apic_write(unsigned long reg, unsigned long v)
  574. {
  575. PVOP_VCALL2(apic_write, reg, v);
  576. }
  577. static inline void apic_write_atomic(unsigned long reg, unsigned long v)
  578. {
  579. PVOP_VCALL2(apic_write_atomic, reg, v);
  580. }
  581. static inline unsigned long apic_read(unsigned long reg)
  582. {
  583. return PVOP_CALL1(unsigned long, apic_read, reg);
  584. }
  585. static inline void setup_boot_clock(void)
  586. {
  587. PVOP_VCALL0(setup_boot_clock);
  588. }
  589. static inline void setup_secondary_clock(void)
  590. {
  591. PVOP_VCALL0(setup_secondary_clock);
  592. }
  593. #endif
  594. static inline void paravirt_pagetable_setup_start(pgd_t *base)
  595. {
  596. if (paravirt_ops.pagetable_setup_start)
  597. (*paravirt_ops.pagetable_setup_start)(base);
  598. }
  599. static inline void paravirt_pagetable_setup_done(pgd_t *base)
  600. {
  601. if (paravirt_ops.pagetable_setup_done)
  602. (*paravirt_ops.pagetable_setup_done)(base);
  603. }
  604. #ifdef CONFIG_SMP
  605. static inline void startup_ipi_hook(int phys_apicid, unsigned long start_eip,
  606. unsigned long start_esp)
  607. {
  608. PVOP_VCALL3(startup_ipi_hook, phys_apicid, start_eip, start_esp);
  609. }
  610. #endif
  611. static inline void paravirt_activate_mm(struct mm_struct *prev,
  612. struct mm_struct *next)
  613. {
  614. PVOP_VCALL2(activate_mm, prev, next);
  615. }
  616. static inline void arch_dup_mmap(struct mm_struct *oldmm,
  617. struct mm_struct *mm)
  618. {
  619. PVOP_VCALL2(dup_mmap, oldmm, mm);
  620. }
  621. static inline void arch_exit_mmap(struct mm_struct *mm)
  622. {
  623. PVOP_VCALL1(exit_mmap, mm);
  624. }
  625. static inline void __flush_tlb(void)
  626. {
  627. PVOP_VCALL0(flush_tlb_user);
  628. }
  629. static inline void __flush_tlb_global(void)
  630. {
  631. PVOP_VCALL0(flush_tlb_kernel);
  632. }
  633. static inline void __flush_tlb_single(unsigned long addr)
  634. {
  635. PVOP_VCALL1(flush_tlb_single, addr);
  636. }
  637. static inline void paravirt_map_pt_hook(int type, pte_t *va, u32 pfn)
  638. {
  639. PVOP_VCALL3(map_pt_hook, type, va, pfn);
  640. }
  641. static inline void paravirt_alloc_pt(unsigned pfn)
  642. {
  643. PVOP_VCALL1(alloc_pt, pfn);
  644. }
  645. static inline void paravirt_release_pt(unsigned pfn)
  646. {
  647. PVOP_VCALL1(release_pt, pfn);
  648. }
  649. static inline void paravirt_alloc_pd(unsigned pfn)
  650. {
  651. PVOP_VCALL1(alloc_pd, pfn);
  652. }
  653. static inline void paravirt_alloc_pd_clone(unsigned pfn, unsigned clonepfn,
  654. unsigned start, unsigned count)
  655. {
  656. PVOP_VCALL4(alloc_pd_clone, pfn, clonepfn, start, count);
  657. }
  658. static inline void paravirt_release_pd(unsigned pfn)
  659. {
  660. PVOP_VCALL1(release_pd, pfn);
  661. }
  662. static inline void pte_update(struct mm_struct *mm, unsigned long addr,
  663. pte_t *ptep)
  664. {
  665. PVOP_VCALL3(pte_update, mm, addr, ptep);
  666. }
  667. static inline void pte_update_defer(struct mm_struct *mm, unsigned long addr,
  668. pte_t *ptep)
  669. {
  670. PVOP_VCALL3(pte_update_defer, mm, addr, ptep);
  671. }
  672. #ifdef CONFIG_X86_PAE
  673. static inline pte_t __pte(unsigned long long val)
  674. {
  675. unsigned long long ret = PVOP_CALL2(unsigned long long, make_pte,
  676. val, val >> 32);
  677. return (pte_t) { ret, ret >> 32 };
  678. }
  679. static inline pmd_t __pmd(unsigned long long val)
  680. {
  681. return (pmd_t) { PVOP_CALL2(unsigned long long, make_pmd, val, val >> 32) };
  682. }
  683. static inline pgd_t __pgd(unsigned long long val)
  684. {
  685. return (pgd_t) { PVOP_CALL2(unsigned long long, make_pgd, val, val >> 32) };
  686. }
  687. static inline unsigned long long pte_val(pte_t x)
  688. {
  689. return PVOP_CALL2(unsigned long long, pte_val, x.pte_low, x.pte_high);
  690. }
  691. static inline unsigned long long pmd_val(pmd_t x)
  692. {
  693. return PVOP_CALL2(unsigned long long, pmd_val, x.pmd, x.pmd >> 32);
  694. }
  695. static inline unsigned long long pgd_val(pgd_t x)
  696. {
  697. return PVOP_CALL2(unsigned long long, pgd_val, x.pgd, x.pgd >> 32);
  698. }
  699. static inline void set_pte(pte_t *ptep, pte_t pteval)
  700. {
  701. PVOP_VCALL3(set_pte, ptep, pteval.pte_low, pteval.pte_high);
  702. }
  703. static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
  704. pte_t *ptep, pte_t pteval)
  705. {
  706. /* 5 arg words */
  707. paravirt_ops.set_pte_at(mm, addr, ptep, pteval);
  708. }
  709. static inline void set_pte_atomic(pte_t *ptep, pte_t pteval)
  710. {
  711. PVOP_VCALL3(set_pte_atomic, ptep, pteval.pte_low, pteval.pte_high);
  712. }
  713. static inline void set_pte_present(struct mm_struct *mm, unsigned long addr,
  714. pte_t *ptep, pte_t pte)
  715. {
  716. /* 5 arg words */
  717. paravirt_ops.set_pte_present(mm, addr, ptep, pte);
  718. }
  719. static inline void set_pmd(pmd_t *pmdp, pmd_t pmdval)
  720. {
  721. PVOP_VCALL3(set_pmd, pmdp, pmdval.pmd, pmdval.pmd >> 32);
  722. }
  723. static inline void set_pud(pud_t *pudp, pud_t pudval)
  724. {
  725. PVOP_VCALL3(set_pud, pudp, pudval.pgd.pgd, pudval.pgd.pgd >> 32);
  726. }
  727. static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
  728. {
  729. PVOP_VCALL3(pte_clear, mm, addr, ptep);
  730. }
  731. static inline void pmd_clear(pmd_t *pmdp)
  732. {
  733. PVOP_VCALL1(pmd_clear, pmdp);
  734. }
  735. static inline pte_t raw_ptep_get_and_clear(pte_t *p)
  736. {
  737. unsigned long long val = PVOP_CALL1(unsigned long long, ptep_get_and_clear, p);
  738. return (pte_t) { val, val >> 32 };
  739. }
  740. #else /* !CONFIG_X86_PAE */
  741. static inline pte_t __pte(unsigned long val)
  742. {
  743. return (pte_t) { PVOP_CALL1(unsigned long, make_pte, val) };
  744. }
  745. static inline pgd_t __pgd(unsigned long val)
  746. {
  747. return (pgd_t) { PVOP_CALL1(unsigned long, make_pgd, val) };
  748. }
  749. static inline unsigned long pte_val(pte_t x)
  750. {
  751. return PVOP_CALL1(unsigned long, pte_val, x.pte_low);
  752. }
  753. static inline unsigned long pgd_val(pgd_t x)
  754. {
  755. return PVOP_CALL1(unsigned long, pgd_val, x.pgd);
  756. }
  757. static inline void set_pte(pte_t *ptep, pte_t pteval)
  758. {
  759. PVOP_VCALL2(set_pte, ptep, pteval.pte_low);
  760. }
  761. static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
  762. pte_t *ptep, pte_t pteval)
  763. {
  764. PVOP_VCALL4(set_pte_at, mm, addr, ptep, pteval.pte_low);
  765. }
  766. static inline void set_pmd(pmd_t *pmdp, pmd_t pmdval)
  767. {
  768. PVOP_VCALL2(set_pmd, pmdp, pmdval.pud.pgd.pgd);
  769. }
  770. static inline pte_t raw_ptep_get_and_clear(pte_t *p)
  771. {
  772. return (pte_t) { PVOP_CALL1(unsigned long, ptep_get_and_clear, p) };
  773. }
  774. #endif /* CONFIG_X86_PAE */
  775. /* Lazy mode for batching updates / context switch */
  776. #define PARAVIRT_LAZY_NONE 0
  777. #define PARAVIRT_LAZY_MMU 1
  778. #define PARAVIRT_LAZY_CPU 2
  779. #define PARAVIRT_LAZY_FLUSH 3
  780. #define __HAVE_ARCH_ENTER_LAZY_CPU_MODE
  781. static inline void arch_enter_lazy_cpu_mode(void)
  782. {
  783. PVOP_VCALL1(set_lazy_mode, PARAVIRT_LAZY_CPU);
  784. }
  785. static inline void arch_leave_lazy_cpu_mode(void)
  786. {
  787. PVOP_VCALL1(set_lazy_mode, PARAVIRT_LAZY_NONE);
  788. }
  789. static inline void arch_flush_lazy_cpu_mode(void)
  790. {
  791. PVOP_VCALL1(set_lazy_mode, PARAVIRT_LAZY_FLUSH);
  792. }
  793. #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
  794. static inline void arch_enter_lazy_mmu_mode(void)
  795. {
  796. PVOP_VCALL1(set_lazy_mode, PARAVIRT_LAZY_MMU);
  797. }
  798. static inline void arch_leave_lazy_mmu_mode(void)
  799. {
  800. PVOP_VCALL1(set_lazy_mode, PARAVIRT_LAZY_NONE);
  801. }
  802. static inline void arch_flush_lazy_mmu_mode(void)
  803. {
  804. PVOP_VCALL1(set_lazy_mode, PARAVIRT_LAZY_FLUSH);
  805. }
  806. void _paravirt_nop(void);
  807. #define paravirt_nop ((void *)_paravirt_nop)
  808. /* These all sit in the .parainstructions section to tell us what to patch. */
  809. struct paravirt_patch_site {
  810. u8 *instr; /* original instructions */
  811. u8 instrtype; /* type of this instruction */
  812. u8 len; /* length of original instruction */
  813. u16 clobbers; /* what registers you may clobber */
  814. };
  815. extern struct paravirt_patch_site __parainstructions[],
  816. __parainstructions_end[];
  817. static inline unsigned long __raw_local_save_flags(void)
  818. {
  819. unsigned long f;
  820. asm volatile(paravirt_alt("pushl %%ecx; pushl %%edx;"
  821. PARAVIRT_CALL
  822. "popl %%edx; popl %%ecx")
  823. : "=a"(f)
  824. : paravirt_type(save_fl),
  825. paravirt_clobber(CLBR_EAX)
  826. : "memory", "cc");
  827. return f;
  828. }
  829. static inline void raw_local_irq_restore(unsigned long f)
  830. {
  831. asm volatile(paravirt_alt("pushl %%ecx; pushl %%edx;"
  832. PARAVIRT_CALL
  833. "popl %%edx; popl %%ecx")
  834. : "=a"(f)
  835. : "0"(f),
  836. paravirt_type(restore_fl),
  837. paravirt_clobber(CLBR_EAX)
  838. : "memory", "cc");
  839. }
  840. static inline void raw_local_irq_disable(void)
  841. {
  842. asm volatile(paravirt_alt("pushl %%ecx; pushl %%edx;"
  843. PARAVIRT_CALL
  844. "popl %%edx; popl %%ecx")
  845. :
  846. : paravirt_type(irq_disable),
  847. paravirt_clobber(CLBR_EAX)
  848. : "memory", "eax", "cc");
  849. }
  850. static inline void raw_local_irq_enable(void)
  851. {
  852. asm volatile(paravirt_alt("pushl %%ecx; pushl %%edx;"
  853. PARAVIRT_CALL
  854. "popl %%edx; popl %%ecx")
  855. :
  856. : paravirt_type(irq_enable),
  857. paravirt_clobber(CLBR_EAX)
  858. : "memory", "eax", "cc");
  859. }
  860. static inline unsigned long __raw_local_irq_save(void)
  861. {
  862. unsigned long f;
  863. f = __raw_local_save_flags();
  864. raw_local_irq_disable();
  865. return f;
  866. }
  867. #define CLI_STRING \
  868. _paravirt_alt("pushl %%ecx; pushl %%edx;" \
  869. "call *paravirt_ops+%c[paravirt_cli_type]*4;" \
  870. "popl %%edx; popl %%ecx", \
  871. "%c[paravirt_cli_type]", "%c[paravirt_clobber]")
  872. #define STI_STRING \
  873. _paravirt_alt("pushl %%ecx; pushl %%edx;" \
  874. "call *paravirt_ops+%c[paravirt_sti_type]*4;" \
  875. "popl %%edx; popl %%ecx", \
  876. "%c[paravirt_sti_type]", "%c[paravirt_clobber]")
  877. #define CLI_STI_CLOBBERS , "%eax"
  878. #define CLI_STI_INPUT_ARGS \
  879. , \
  880. [paravirt_cli_type] "i" (PARAVIRT_PATCH(irq_disable)), \
  881. [paravirt_sti_type] "i" (PARAVIRT_PATCH(irq_enable)), \
  882. paravirt_clobber(CLBR_EAX)
  883. #undef PARAVIRT_CALL
  884. #undef PVOP_VCALL0
  885. #undef PVOP_CALL0
  886. #undef PVOP_VCALL1
  887. #undef PVOP_CALL1
  888. #undef PVOP_VCALL2
  889. #undef PVOP_CALL2
  890. #undef PVOP_VCALL3
  891. #undef PVOP_CALL3
  892. #undef PVOP_VCALL4
  893. #undef PVOP_CALL4
  894. #else /* __ASSEMBLY__ */
  895. #define PARA_PATCH(off) ((off) / 4)
  896. #define PARA_SITE(ptype, clobbers, ops) \
  897. 771:; \
  898. ops; \
  899. 772:; \
  900. .pushsection .parainstructions,"a"; \
  901. .long 771b; \
  902. .byte ptype; \
  903. .byte 772b-771b; \
  904. .short clobbers; \
  905. .popsection
  906. #define INTERRUPT_RETURN \
  907. PARA_SITE(PARA_PATCH(PARAVIRT_iret), CLBR_NONE, \
  908. jmp *%cs:paravirt_ops+PARAVIRT_iret)
  909. #define DISABLE_INTERRUPTS(clobbers) \
  910. PARA_SITE(PARA_PATCH(PARAVIRT_irq_disable), clobbers, \
  911. pushl %eax; pushl %ecx; pushl %edx; \
  912. call *%cs:paravirt_ops+PARAVIRT_irq_disable; \
  913. popl %edx; popl %ecx; popl %eax) \
  914. #define ENABLE_INTERRUPTS(clobbers) \
  915. PARA_SITE(PARA_PATCH(PARAVIRT_irq_enable), clobbers, \
  916. pushl %eax; pushl %ecx; pushl %edx; \
  917. call *%cs:paravirt_ops+PARAVIRT_irq_enable; \
  918. popl %edx; popl %ecx; popl %eax)
  919. #define ENABLE_INTERRUPTS_SYSEXIT \
  920. PARA_SITE(PARA_PATCH(PARAVIRT_irq_enable_sysexit), CLBR_NONE, \
  921. jmp *%cs:paravirt_ops+PARAVIRT_irq_enable_sysexit)
  922. #define GET_CR0_INTO_EAX \
  923. push %ecx; push %edx; \
  924. call *paravirt_ops+PARAVIRT_read_cr0; \
  925. pop %edx; pop %ecx
  926. #endif /* __ASSEMBLY__ */
  927. #endif /* CONFIG_PARAVIRT */
  928. #endif /* __ASM_PARAVIRT_H */