paravirt.h 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990
  1. #ifndef _ASM_X86_PARAVIRT_H
  2. #define _ASM_X86_PARAVIRT_H
  3. /* Various instructions on x86 need to be replaced for
  4. * para-virtualization: those hooks are defined here. */
  5. #ifdef CONFIG_PARAVIRT
  6. #include <asm/pgtable_types.h>
  7. #include <asm/asm.h>
  8. #include <asm/paravirt_types.h>
  9. #ifndef __ASSEMBLY__
  10. #include <linux/bug.h>
  11. #include <linux/types.h>
  12. #include <linux/cpumask.h>
  13. static inline int paravirt_enabled(void)
  14. {
  15. return pv_info.paravirt_enabled;
  16. }
  17. static inline void load_sp0(struct tss_struct *tss,
  18. struct thread_struct *thread)
  19. {
  20. PVOP_VCALL2(pv_cpu_ops.load_sp0, tss, thread);
  21. }
  22. /* The paravirtualized CPUID instruction. */
  23. static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
  24. unsigned int *ecx, unsigned int *edx)
  25. {
  26. PVOP_VCALL4(pv_cpu_ops.cpuid, eax, ebx, ecx, edx);
  27. }
  28. /*
  29. * These special macros can be used to get or set a debugging register
  30. */
  31. static inline unsigned long paravirt_get_debugreg(int reg)
  32. {
  33. return PVOP_CALL1(unsigned long, pv_cpu_ops.get_debugreg, reg);
  34. }
  35. #define get_debugreg(var, reg) var = paravirt_get_debugreg(reg)
  36. static inline void set_debugreg(unsigned long val, int reg)
  37. {
  38. PVOP_VCALL2(pv_cpu_ops.set_debugreg, reg, val);
  39. }
  40. static inline void clts(void)
  41. {
  42. PVOP_VCALL0(pv_cpu_ops.clts);
  43. }
  44. static inline unsigned long read_cr0(void)
  45. {
  46. return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr0);
  47. }
  48. static inline void write_cr0(unsigned long x)
  49. {
  50. PVOP_VCALL1(pv_cpu_ops.write_cr0, x);
  51. }
  52. static inline unsigned long read_cr2(void)
  53. {
  54. return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr2);
  55. }
  56. static inline void write_cr2(unsigned long x)
  57. {
  58. PVOP_VCALL1(pv_mmu_ops.write_cr2, x);
  59. }
  60. static inline unsigned long read_cr3(void)
  61. {
  62. return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr3);
  63. }
  64. static inline void write_cr3(unsigned long x)
  65. {
  66. PVOP_VCALL1(pv_mmu_ops.write_cr3, x);
  67. }
  68. static inline unsigned long read_cr4(void)
  69. {
  70. return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4);
  71. }
  72. static inline unsigned long read_cr4_safe(void)
  73. {
  74. return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4_safe);
  75. }
  76. static inline void write_cr4(unsigned long x)
  77. {
  78. PVOP_VCALL1(pv_cpu_ops.write_cr4, x);
  79. }
  80. #ifdef CONFIG_X86_64
  81. static inline unsigned long read_cr8(void)
  82. {
  83. return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr8);
  84. }
  85. static inline void write_cr8(unsigned long x)
  86. {
  87. PVOP_VCALL1(pv_cpu_ops.write_cr8, x);
  88. }
  89. #endif
  90. static inline void arch_safe_halt(void)
  91. {
  92. PVOP_VCALL0(pv_irq_ops.safe_halt);
  93. }
  94. static inline void halt(void)
  95. {
  96. PVOP_VCALL0(pv_irq_ops.halt);
  97. }
  98. static inline void wbinvd(void)
  99. {
  100. PVOP_VCALL0(pv_cpu_ops.wbinvd);
  101. }
  102. #define get_kernel_rpl() (pv_info.kernel_rpl)
  103. static inline u64 paravirt_read_msr(unsigned msr, int *err)
  104. {
  105. return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err);
  106. }
  107. static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high)
  108. {
  109. return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high);
  110. }
  111. /* These should all do BUG_ON(_err), but our headers are too tangled. */
  112. #define rdmsr(msr, val1, val2) \
  113. do { \
  114. int _err; \
  115. u64 _l = paravirt_read_msr(msr, &_err); \
  116. val1 = (u32)_l; \
  117. val2 = _l >> 32; \
  118. } while (0)
  119. #define wrmsr(msr, val1, val2) \
  120. do { \
  121. paravirt_write_msr(msr, val1, val2); \
  122. } while (0)
  123. #define rdmsrl(msr, val) \
  124. do { \
  125. int _err; \
  126. val = paravirt_read_msr(msr, &_err); \
  127. } while (0)
  128. #define wrmsrl(msr, val) wrmsr(msr, (u32)((u64)(val)), ((u64)(val))>>32)
  129. #define wrmsr_safe(msr, a, b) paravirt_write_msr(msr, a, b)
  130. /* rdmsr with exception handling */
  131. #define rdmsr_safe(msr, a, b) \
  132. ({ \
  133. int _err; \
  134. u64 _l = paravirt_read_msr(msr, &_err); \
  135. (*a) = (u32)_l; \
  136. (*b) = _l >> 32; \
  137. _err; \
  138. })
  139. static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
  140. {
  141. int err;
  142. *p = paravirt_read_msr(msr, &err);
  143. return err;
  144. }
  145. static inline u64 paravirt_read_tsc(void)
  146. {
  147. return PVOP_CALL0(u64, pv_cpu_ops.read_tsc);
  148. }
  149. #define rdtscl(low) \
  150. do { \
  151. u64 _l = paravirt_read_tsc(); \
  152. low = (int)_l; \
  153. } while (0)
  154. #define rdtscll(val) (val = paravirt_read_tsc())
  155. static inline unsigned long long paravirt_sched_clock(void)
  156. {
  157. return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock);
  158. }
  159. struct static_key;
  160. extern struct static_key paravirt_steal_enabled;
  161. extern struct static_key paravirt_steal_rq_enabled;
  162. static inline u64 paravirt_steal_clock(int cpu)
  163. {
  164. return PVOP_CALL1(u64, pv_time_ops.steal_clock, cpu);
  165. }
  166. static inline unsigned long long paravirt_read_pmc(int counter)
  167. {
  168. return PVOP_CALL1(u64, pv_cpu_ops.read_pmc, counter);
  169. }
  170. #define rdpmc(counter, low, high) \
  171. do { \
  172. u64 _l = paravirt_read_pmc(counter); \
  173. low = (u32)_l; \
  174. high = _l >> 32; \
  175. } while (0)
  176. #define rdpmcl(counter, val) ((val) = paravirt_read_pmc(counter))
  177. static inline unsigned long long paravirt_rdtscp(unsigned int *aux)
  178. {
  179. return PVOP_CALL1(u64, pv_cpu_ops.read_tscp, aux);
  180. }
  181. #define rdtscp(low, high, aux) \
  182. do { \
  183. int __aux; \
  184. unsigned long __val = paravirt_rdtscp(&__aux); \
  185. (low) = (u32)__val; \
  186. (high) = (u32)(__val >> 32); \
  187. (aux) = __aux; \
  188. } while (0)
  189. #define rdtscpll(val, aux) \
  190. do { \
  191. unsigned long __aux; \
  192. val = paravirt_rdtscp(&__aux); \
  193. (aux) = __aux; \
  194. } while (0)
  195. static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
  196. {
  197. PVOP_VCALL2(pv_cpu_ops.alloc_ldt, ldt, entries);
  198. }
  199. static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
  200. {
  201. PVOP_VCALL2(pv_cpu_ops.free_ldt, ldt, entries);
  202. }
  203. static inline void load_TR_desc(void)
  204. {
  205. PVOP_VCALL0(pv_cpu_ops.load_tr_desc);
  206. }
  207. static inline void load_gdt(const struct desc_ptr *dtr)
  208. {
  209. PVOP_VCALL1(pv_cpu_ops.load_gdt, dtr);
  210. }
  211. static inline void load_idt(const struct desc_ptr *dtr)
  212. {
  213. PVOP_VCALL1(pv_cpu_ops.load_idt, dtr);
  214. }
  215. static inline void set_ldt(const void *addr, unsigned entries)
  216. {
  217. PVOP_VCALL2(pv_cpu_ops.set_ldt, addr, entries);
  218. }
  219. static inline void store_idt(struct desc_ptr *dtr)
  220. {
  221. PVOP_VCALL1(pv_cpu_ops.store_idt, dtr);
  222. }
  223. static inline unsigned long paravirt_store_tr(void)
  224. {
  225. return PVOP_CALL0(unsigned long, pv_cpu_ops.store_tr);
  226. }
  227. #define store_tr(tr) ((tr) = paravirt_store_tr())
  228. static inline void load_TLS(struct thread_struct *t, unsigned cpu)
  229. {
  230. PVOP_VCALL2(pv_cpu_ops.load_tls, t, cpu);
  231. }
  232. #ifdef CONFIG_X86_64
  233. static inline void load_gs_index(unsigned int gs)
  234. {
  235. PVOP_VCALL1(pv_cpu_ops.load_gs_index, gs);
  236. }
  237. #endif
  238. static inline void write_ldt_entry(struct desc_struct *dt, int entry,
  239. const void *desc)
  240. {
  241. PVOP_VCALL3(pv_cpu_ops.write_ldt_entry, dt, entry, desc);
  242. }
  243. static inline void write_gdt_entry(struct desc_struct *dt, int entry,
  244. void *desc, int type)
  245. {
  246. PVOP_VCALL4(pv_cpu_ops.write_gdt_entry, dt, entry, desc, type);
  247. }
  248. static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
  249. {
  250. PVOP_VCALL3(pv_cpu_ops.write_idt_entry, dt, entry, g);
  251. }
  252. static inline void set_iopl_mask(unsigned mask)
  253. {
  254. PVOP_VCALL1(pv_cpu_ops.set_iopl_mask, mask);
  255. }
  256. /* The paravirtualized I/O functions */
  257. static inline void slow_down_io(void)
  258. {
  259. pv_cpu_ops.io_delay();
  260. #ifdef REALLY_SLOW_IO
  261. pv_cpu_ops.io_delay();
  262. pv_cpu_ops.io_delay();
  263. pv_cpu_ops.io_delay();
  264. #endif
  265. }
  266. #ifdef CONFIG_SMP
  267. static inline void startup_ipi_hook(int phys_apicid, unsigned long start_eip,
  268. unsigned long start_esp)
  269. {
  270. PVOP_VCALL3(pv_apic_ops.startup_ipi_hook,
  271. phys_apicid, start_eip, start_esp);
  272. }
  273. #endif
  274. static inline void paravirt_activate_mm(struct mm_struct *prev,
  275. struct mm_struct *next)
  276. {
  277. PVOP_VCALL2(pv_mmu_ops.activate_mm, prev, next);
  278. }
  279. static inline void arch_dup_mmap(struct mm_struct *oldmm,
  280. struct mm_struct *mm)
  281. {
  282. PVOP_VCALL2(pv_mmu_ops.dup_mmap, oldmm, mm);
  283. }
  284. static inline void arch_exit_mmap(struct mm_struct *mm)
  285. {
  286. PVOP_VCALL1(pv_mmu_ops.exit_mmap, mm);
  287. }
  288. static inline void __flush_tlb(void)
  289. {
  290. PVOP_VCALL0(pv_mmu_ops.flush_tlb_user);
  291. }
  292. static inline void __flush_tlb_global(void)
  293. {
  294. PVOP_VCALL0(pv_mmu_ops.flush_tlb_kernel);
  295. }
  296. static inline void __flush_tlb_single(unsigned long addr)
  297. {
  298. PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr);
  299. }
  300. static inline void flush_tlb_others(const struct cpumask *cpumask,
  301. struct mm_struct *mm,
  302. unsigned long start,
  303. unsigned long end)
  304. {
  305. PVOP_VCALL4(pv_mmu_ops.flush_tlb_others, cpumask, mm, start, end);
  306. }
  307. static inline int paravirt_pgd_alloc(struct mm_struct *mm)
  308. {
  309. return PVOP_CALL1(int, pv_mmu_ops.pgd_alloc, mm);
  310. }
  311. static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd)
  312. {
  313. PVOP_VCALL2(pv_mmu_ops.pgd_free, mm, pgd);
  314. }
  315. static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn)
  316. {
  317. PVOP_VCALL2(pv_mmu_ops.alloc_pte, mm, pfn);
  318. }
  319. static inline void paravirt_release_pte(unsigned long pfn)
  320. {
  321. PVOP_VCALL1(pv_mmu_ops.release_pte, pfn);
  322. }
  323. static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
  324. {
  325. PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn);
  326. }
  327. static inline void paravirt_release_pmd(unsigned long pfn)
  328. {
  329. PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn);
  330. }
  331. static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn)
  332. {
  333. PVOP_VCALL2(pv_mmu_ops.alloc_pud, mm, pfn);
  334. }
  335. static inline void paravirt_release_pud(unsigned long pfn)
  336. {
  337. PVOP_VCALL1(pv_mmu_ops.release_pud, pfn);
  338. }
  339. static inline void pte_update(struct mm_struct *mm, unsigned long addr,
  340. pte_t *ptep)
  341. {
  342. PVOP_VCALL3(pv_mmu_ops.pte_update, mm, addr, ptep);
  343. }
  344. static inline void pmd_update(struct mm_struct *mm, unsigned long addr,
  345. pmd_t *pmdp)
  346. {
  347. PVOP_VCALL3(pv_mmu_ops.pmd_update, mm, addr, pmdp);
  348. }
  349. static inline void pte_update_defer(struct mm_struct *mm, unsigned long addr,
  350. pte_t *ptep)
  351. {
  352. PVOP_VCALL3(pv_mmu_ops.pte_update_defer, mm, addr, ptep);
  353. }
  354. static inline void pmd_update_defer(struct mm_struct *mm, unsigned long addr,
  355. pmd_t *pmdp)
  356. {
  357. PVOP_VCALL3(pv_mmu_ops.pmd_update_defer, mm, addr, pmdp);
  358. }
  359. static inline pte_t __pte(pteval_t val)
  360. {
  361. pteval_t ret;
  362. if (sizeof(pteval_t) > sizeof(long))
  363. ret = PVOP_CALLEE2(pteval_t,
  364. pv_mmu_ops.make_pte,
  365. val, (u64)val >> 32);
  366. else
  367. ret = PVOP_CALLEE1(pteval_t,
  368. pv_mmu_ops.make_pte,
  369. val);
  370. return (pte_t) { .pte = ret };
  371. }
  372. static inline pteval_t pte_val(pte_t pte)
  373. {
  374. pteval_t ret;
  375. if (sizeof(pteval_t) > sizeof(long))
  376. ret = PVOP_CALLEE2(pteval_t, pv_mmu_ops.pte_val,
  377. pte.pte, (u64)pte.pte >> 32);
  378. else
  379. ret = PVOP_CALLEE1(pteval_t, pv_mmu_ops.pte_val,
  380. pte.pte);
  381. return ret;
  382. }
  383. static inline pgd_t __pgd(pgdval_t val)
  384. {
  385. pgdval_t ret;
  386. if (sizeof(pgdval_t) > sizeof(long))
  387. ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.make_pgd,
  388. val, (u64)val >> 32);
  389. else
  390. ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.make_pgd,
  391. val);
  392. return (pgd_t) { ret };
  393. }
  394. static inline pgdval_t pgd_val(pgd_t pgd)
  395. {
  396. pgdval_t ret;
  397. if (sizeof(pgdval_t) > sizeof(long))
  398. ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.pgd_val,
  399. pgd.pgd, (u64)pgd.pgd >> 32);
  400. else
  401. ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.pgd_val,
  402. pgd.pgd);
  403. return ret;
  404. }
  405. #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
  406. static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr,
  407. pte_t *ptep)
  408. {
  409. pteval_t ret;
  410. ret = PVOP_CALL3(pteval_t, pv_mmu_ops.ptep_modify_prot_start,
  411. mm, addr, ptep);
  412. return (pte_t) { .pte = ret };
  413. }
  414. static inline void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
  415. pte_t *ptep, pte_t pte)
  416. {
  417. if (sizeof(pteval_t) > sizeof(long))
  418. /* 5 arg words */
  419. pv_mmu_ops.ptep_modify_prot_commit(mm, addr, ptep, pte);
  420. else
  421. PVOP_VCALL4(pv_mmu_ops.ptep_modify_prot_commit,
  422. mm, addr, ptep, pte.pte);
  423. }
  424. static inline void set_pte(pte_t *ptep, pte_t pte)
  425. {
  426. if (sizeof(pteval_t) > sizeof(long))
  427. PVOP_VCALL3(pv_mmu_ops.set_pte, ptep,
  428. pte.pte, (u64)pte.pte >> 32);
  429. else
  430. PVOP_VCALL2(pv_mmu_ops.set_pte, ptep,
  431. pte.pte);
  432. }
  433. static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
  434. pte_t *ptep, pte_t pte)
  435. {
  436. if (sizeof(pteval_t) > sizeof(long))
  437. /* 5 arg words */
  438. pv_mmu_ops.set_pte_at(mm, addr, ptep, pte);
  439. else
  440. PVOP_VCALL4(pv_mmu_ops.set_pte_at, mm, addr, ptep, pte.pte);
  441. }
  442. static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
  443. pmd_t *pmdp, pmd_t pmd)
  444. {
  445. if (sizeof(pmdval_t) > sizeof(long))
  446. /* 5 arg words */
  447. pv_mmu_ops.set_pmd_at(mm, addr, pmdp, pmd);
  448. else
  449. PVOP_VCALL4(pv_mmu_ops.set_pmd_at, mm, addr, pmdp,
  450. native_pmd_val(pmd));
  451. }
  452. static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
  453. {
  454. pmdval_t val = native_pmd_val(pmd);
  455. if (sizeof(pmdval_t) > sizeof(long))
  456. PVOP_VCALL3(pv_mmu_ops.set_pmd, pmdp, val, (u64)val >> 32);
  457. else
  458. PVOP_VCALL2(pv_mmu_ops.set_pmd, pmdp, val);
  459. }
  460. #if PAGETABLE_LEVELS >= 3
  461. static inline pmd_t __pmd(pmdval_t val)
  462. {
  463. pmdval_t ret;
  464. if (sizeof(pmdval_t) > sizeof(long))
  465. ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.make_pmd,
  466. val, (u64)val >> 32);
  467. else
  468. ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.make_pmd,
  469. val);
  470. return (pmd_t) { ret };
  471. }
  472. static inline pmdval_t pmd_val(pmd_t pmd)
  473. {
  474. pmdval_t ret;
  475. if (sizeof(pmdval_t) > sizeof(long))
  476. ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.pmd_val,
  477. pmd.pmd, (u64)pmd.pmd >> 32);
  478. else
  479. ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.pmd_val,
  480. pmd.pmd);
  481. return ret;
  482. }
  483. static inline void set_pud(pud_t *pudp, pud_t pud)
  484. {
  485. pudval_t val = native_pud_val(pud);
  486. if (sizeof(pudval_t) > sizeof(long))
  487. PVOP_VCALL3(pv_mmu_ops.set_pud, pudp,
  488. val, (u64)val >> 32);
  489. else
  490. PVOP_VCALL2(pv_mmu_ops.set_pud, pudp,
  491. val);
  492. }
  493. #if PAGETABLE_LEVELS == 4
  494. static inline pud_t __pud(pudval_t val)
  495. {
  496. pudval_t ret;
  497. if (sizeof(pudval_t) > sizeof(long))
  498. ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.make_pud,
  499. val, (u64)val >> 32);
  500. else
  501. ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.make_pud,
  502. val);
  503. return (pud_t) { ret };
  504. }
  505. static inline pudval_t pud_val(pud_t pud)
  506. {
  507. pudval_t ret;
  508. if (sizeof(pudval_t) > sizeof(long))
  509. ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.pud_val,
  510. pud.pud, (u64)pud.pud >> 32);
  511. else
  512. ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.pud_val,
  513. pud.pud);
  514. return ret;
  515. }
  516. static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
  517. {
  518. pgdval_t val = native_pgd_val(pgd);
  519. if (sizeof(pgdval_t) > sizeof(long))
  520. PVOP_VCALL3(pv_mmu_ops.set_pgd, pgdp,
  521. val, (u64)val >> 32);
  522. else
  523. PVOP_VCALL2(pv_mmu_ops.set_pgd, pgdp,
  524. val);
  525. }
  526. static inline void pgd_clear(pgd_t *pgdp)
  527. {
  528. set_pgd(pgdp, __pgd(0));
  529. }
  530. static inline void pud_clear(pud_t *pudp)
  531. {
  532. set_pud(pudp, __pud(0));
  533. }
  534. #endif /* PAGETABLE_LEVELS == 4 */
  535. #endif /* PAGETABLE_LEVELS >= 3 */
  536. #ifdef CONFIG_X86_PAE
  537. /* Special-case pte-setting operations for PAE, which can't update a
  538. 64-bit pte atomically */
  539. static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
  540. {
  541. PVOP_VCALL3(pv_mmu_ops.set_pte_atomic, ptep,
  542. pte.pte, pte.pte >> 32);
  543. }
  544. static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
  545. pte_t *ptep)
  546. {
  547. PVOP_VCALL3(pv_mmu_ops.pte_clear, mm, addr, ptep);
  548. }
  549. static inline void pmd_clear(pmd_t *pmdp)
  550. {
  551. PVOP_VCALL1(pv_mmu_ops.pmd_clear, pmdp);
  552. }
  553. #else /* !CONFIG_X86_PAE */
  554. static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
  555. {
  556. set_pte(ptep, pte);
  557. }
  558. static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
  559. pte_t *ptep)
  560. {
  561. set_pte_at(mm, addr, ptep, __pte(0));
  562. }
  563. static inline void pmd_clear(pmd_t *pmdp)
  564. {
  565. set_pmd(pmdp, __pmd(0));
  566. }
  567. #endif /* CONFIG_X86_PAE */
  568. #define __HAVE_ARCH_START_CONTEXT_SWITCH
  569. static inline void arch_start_context_switch(struct task_struct *prev)
  570. {
  571. PVOP_VCALL1(pv_cpu_ops.start_context_switch, prev);
  572. }
  573. static inline void arch_end_context_switch(struct task_struct *next)
  574. {
  575. PVOP_VCALL1(pv_cpu_ops.end_context_switch, next);
  576. }
  577. #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
  578. static inline void arch_enter_lazy_mmu_mode(void)
  579. {
  580. PVOP_VCALL0(pv_mmu_ops.lazy_mode.enter);
  581. }
  582. static inline void arch_leave_lazy_mmu_mode(void)
  583. {
  584. PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
  585. }
  586. static inline void arch_flush_lazy_mmu_mode(void)
  587. {
  588. PVOP_VCALL0(pv_mmu_ops.lazy_mode.flush);
  589. }
  590. static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
  591. phys_addr_t phys, pgprot_t flags)
  592. {
  593. pv_mmu_ops.set_fixmap(idx, phys, flags);
  594. }
  595. #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
  596. static __always_inline void __ticket_lock_spinning(struct arch_spinlock *lock,
  597. __ticket_t ticket)
  598. {
  599. PVOP_VCALLEE2(pv_lock_ops.lock_spinning, lock, ticket);
  600. }
  601. static __always_inline void __ticket_unlock_kick(struct arch_spinlock *lock,
  602. __ticket_t ticket)
  603. {
  604. PVOP_VCALL2(pv_lock_ops.unlock_kick, lock, ticket);
  605. }
  606. #endif
  607. #ifdef CONFIG_X86_32
  608. #define PV_SAVE_REGS "pushl %ecx; pushl %edx;"
  609. #define PV_RESTORE_REGS "popl %edx; popl %ecx;"
  610. /* save and restore all caller-save registers, except return value */
  611. #define PV_SAVE_ALL_CALLER_REGS "pushl %ecx;"
  612. #define PV_RESTORE_ALL_CALLER_REGS "popl %ecx;"
  613. #define PV_FLAGS_ARG "0"
  614. #define PV_EXTRA_CLOBBERS
  615. #define PV_VEXTRA_CLOBBERS
  616. #else
  617. /* save and restore all caller-save registers, except return value */
  618. #define PV_SAVE_ALL_CALLER_REGS \
  619. "push %rcx;" \
  620. "push %rdx;" \
  621. "push %rsi;" \
  622. "push %rdi;" \
  623. "push %r8;" \
  624. "push %r9;" \
  625. "push %r10;" \
  626. "push %r11;"
  627. #define PV_RESTORE_ALL_CALLER_REGS \
  628. "pop %r11;" \
  629. "pop %r10;" \
  630. "pop %r9;" \
  631. "pop %r8;" \
  632. "pop %rdi;" \
  633. "pop %rsi;" \
  634. "pop %rdx;" \
  635. "pop %rcx;"
  636. /* We save some registers, but all of them, that's too much. We clobber all
  637. * caller saved registers but the argument parameter */
  638. #define PV_SAVE_REGS "pushq %%rdi;"
  639. #define PV_RESTORE_REGS "popq %%rdi;"
  640. #define PV_EXTRA_CLOBBERS EXTRA_CLOBBERS, "rcx" , "rdx", "rsi"
  641. #define PV_VEXTRA_CLOBBERS EXTRA_CLOBBERS, "rdi", "rcx" , "rdx", "rsi"
  642. #define PV_FLAGS_ARG "D"
  643. #endif
  644. /*
  645. * Generate a thunk around a function which saves all caller-save
  646. * registers except for the return value. This allows C functions to
  647. * be called from assembler code where fewer than normal registers are
  648. * available. It may also help code generation around calls from C
  649. * code if the common case doesn't use many registers.
  650. *
  651. * When a callee is wrapped in a thunk, the caller can assume that all
  652. * arg regs and all scratch registers are preserved across the
  653. * call. The return value in rax/eax will not be saved, even for void
  654. * functions.
  655. */
  656. #define PV_CALLEE_SAVE_REGS_THUNK(func) \
  657. extern typeof(func) __raw_callee_save_##func; \
  658. static void *__##func##__ __used = func; \
  659. \
  660. asm(".pushsection .text;" \
  661. "__raw_callee_save_" #func ": " \
  662. PV_SAVE_ALL_CALLER_REGS \
  663. "call " #func ";" \
  664. PV_RESTORE_ALL_CALLER_REGS \
  665. "ret;" \
  666. ".popsection")
  667. /* Get a reference to a callee-save function */
  668. #define PV_CALLEE_SAVE(func) \
  669. ((struct paravirt_callee_save) { __raw_callee_save_##func })
  670. /* Promise that "func" already uses the right calling convention */
  671. #define __PV_IS_CALLEE_SAVE(func) \
  672. ((struct paravirt_callee_save) { func })
  673. static inline notrace unsigned long arch_local_save_flags(void)
  674. {
  675. return PVOP_CALLEE0(unsigned long, pv_irq_ops.save_fl);
  676. }
  677. static inline notrace void arch_local_irq_restore(unsigned long f)
  678. {
  679. PVOP_VCALLEE1(pv_irq_ops.restore_fl, f);
  680. }
  681. static inline notrace void arch_local_irq_disable(void)
  682. {
  683. PVOP_VCALLEE0(pv_irq_ops.irq_disable);
  684. }
  685. static inline notrace void arch_local_irq_enable(void)
  686. {
  687. PVOP_VCALLEE0(pv_irq_ops.irq_enable);
  688. }
  689. static inline notrace unsigned long arch_local_irq_save(void)
  690. {
  691. unsigned long f;
  692. f = arch_local_save_flags();
  693. arch_local_irq_disable();
  694. return f;
  695. }
  696. /* Make sure as little as possible of this mess escapes. */
  697. #undef PARAVIRT_CALL
  698. #undef __PVOP_CALL
  699. #undef __PVOP_VCALL
  700. #undef PVOP_VCALL0
  701. #undef PVOP_CALL0
  702. #undef PVOP_VCALL1
  703. #undef PVOP_CALL1
  704. #undef PVOP_VCALL2
  705. #undef PVOP_CALL2
  706. #undef PVOP_VCALL3
  707. #undef PVOP_CALL3
  708. #undef PVOP_VCALL4
  709. #undef PVOP_CALL4
  710. extern void default_banner(void);
  711. #else /* __ASSEMBLY__ */
  712. #define _PVSITE(ptype, clobbers, ops, word, algn) \
  713. 771:; \
  714. ops; \
  715. 772:; \
  716. .pushsection .parainstructions,"a"; \
  717. .align algn; \
  718. word 771b; \
  719. .byte ptype; \
  720. .byte 772b-771b; \
  721. .short clobbers; \
  722. .popsection
  723. #define COND_PUSH(set, mask, reg) \
  724. .if ((~(set)) & mask); push %reg; .endif
  725. #define COND_POP(set, mask, reg) \
  726. .if ((~(set)) & mask); pop %reg; .endif
  727. #ifdef CONFIG_X86_64
  728. #define PV_SAVE_REGS(set) \
  729. COND_PUSH(set, CLBR_RAX, rax); \
  730. COND_PUSH(set, CLBR_RCX, rcx); \
  731. COND_PUSH(set, CLBR_RDX, rdx); \
  732. COND_PUSH(set, CLBR_RSI, rsi); \
  733. COND_PUSH(set, CLBR_RDI, rdi); \
  734. COND_PUSH(set, CLBR_R8, r8); \
  735. COND_PUSH(set, CLBR_R9, r9); \
  736. COND_PUSH(set, CLBR_R10, r10); \
  737. COND_PUSH(set, CLBR_R11, r11)
  738. #define PV_RESTORE_REGS(set) \
  739. COND_POP(set, CLBR_R11, r11); \
  740. COND_POP(set, CLBR_R10, r10); \
  741. COND_POP(set, CLBR_R9, r9); \
  742. COND_POP(set, CLBR_R8, r8); \
  743. COND_POP(set, CLBR_RDI, rdi); \
  744. COND_POP(set, CLBR_RSI, rsi); \
  745. COND_POP(set, CLBR_RDX, rdx); \
  746. COND_POP(set, CLBR_RCX, rcx); \
  747. COND_POP(set, CLBR_RAX, rax)
  748. #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 8)
  749. #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8)
  750. #define PARA_INDIRECT(addr) *addr(%rip)
  751. #else
  752. #define PV_SAVE_REGS(set) \
  753. COND_PUSH(set, CLBR_EAX, eax); \
  754. COND_PUSH(set, CLBR_EDI, edi); \
  755. COND_PUSH(set, CLBR_ECX, ecx); \
  756. COND_PUSH(set, CLBR_EDX, edx)
  757. #define PV_RESTORE_REGS(set) \
  758. COND_POP(set, CLBR_EDX, edx); \
  759. COND_POP(set, CLBR_ECX, ecx); \
  760. COND_POP(set, CLBR_EDI, edi); \
  761. COND_POP(set, CLBR_EAX, eax)
  762. #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
  763. #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
  764. #define PARA_INDIRECT(addr) *%cs:addr
  765. #endif
  766. #define INTERRUPT_RETURN \
  767. PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE, \
  768. jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret))
  769. #define DISABLE_INTERRUPTS(clobbers) \
  770. PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
  771. PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
  772. call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable); \
  773. PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
  774. #define ENABLE_INTERRUPTS(clobbers) \
  775. PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers, \
  776. PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
  777. call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable); \
  778. PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
  779. #define USERGS_SYSRET32 \
  780. PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret32), \
  781. CLBR_NONE, \
  782. jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret32))
  783. #ifdef CONFIG_X86_32
  784. #define GET_CR0_INTO_EAX \
  785. push %ecx; push %edx; \
  786. call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
  787. pop %edx; pop %ecx
  788. #define ENABLE_INTERRUPTS_SYSEXIT \
  789. PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
  790. CLBR_NONE, \
  791. jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
  792. #else /* !CONFIG_X86_32 */
  793. /*
  794. * If swapgs is used while the userspace stack is still current,
  795. * there's no way to call a pvop. The PV replacement *must* be
  796. * inlined, or the swapgs instruction must be trapped and emulated.
  797. */
  798. #define SWAPGS_UNSAFE_STACK \
  799. PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
  800. swapgs)
  801. /*
  802. * Note: swapgs is very special, and in practise is either going to be
  803. * implemented with a single "swapgs" instruction or something very
  804. * special. Either way, we don't need to save any registers for
  805. * it.
  806. */
  807. #define SWAPGS \
  808. PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
  809. call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs) \
  810. )
  811. #define GET_CR2_INTO_RAX \
  812. call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2)
  813. #define PARAVIRT_ADJUST_EXCEPTION_FRAME \
  814. PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_adjust_exception_frame), \
  815. CLBR_NONE, \
  816. call PARA_INDIRECT(pv_irq_ops+PV_IRQ_adjust_exception_frame))
  817. #define USERGS_SYSRET64 \
  818. PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64), \
  819. CLBR_NONE, \
  820. jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64))
  821. #define ENABLE_INTERRUPTS_SYSEXIT32 \
  822. PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
  823. CLBR_NONE, \
  824. jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
  825. #endif /* CONFIG_X86_32 */
  826. #endif /* __ASSEMBLY__ */
  827. #else /* CONFIG_PARAVIRT */
  828. # define default_banner x86_init_noop
  829. #endif /* !CONFIG_PARAVIRT */
  830. #endif /* _ASM_X86_PARAVIRT_H */