paravirt.h 25 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051
  1. #ifndef _ASM_X86_PARAVIRT_H
  2. #define _ASM_X86_PARAVIRT_H
  3. /* Various instructions on x86 need to be replaced for
  4. * para-virtualization: those hooks are defined here. */
  5. #ifdef CONFIG_PARAVIRT
  6. #include <asm/pgtable_types.h>
  7. #include <asm/asm.h>
  8. #include <asm/paravirt_types.h>
  9. #ifndef __ASSEMBLY__
  10. #include <linux/bug.h>
  11. #include <linux/types.h>
  12. #include <linux/cpumask.h>
  13. static inline int paravirt_enabled(void)
  14. {
  15. return pv_info.paravirt_enabled;
  16. }
  17. static inline void load_sp0(struct tss_struct *tss,
  18. struct thread_struct *thread)
  19. {
  20. PVOP_VCALL2(pv_cpu_ops.load_sp0, tss, thread);
  21. }
  22. /* The paravirtualized CPUID instruction. */
  23. static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
  24. unsigned int *ecx, unsigned int *edx)
  25. {
  26. PVOP_VCALL4(pv_cpu_ops.cpuid, eax, ebx, ecx, edx);
  27. }
  28. /*
  29. * These special macros can be used to get or set a debugging register
  30. */
  31. static inline unsigned long paravirt_get_debugreg(int reg)
  32. {
  33. return PVOP_CALL1(unsigned long, pv_cpu_ops.get_debugreg, reg);
  34. }
  35. #define get_debugreg(var, reg) var = paravirt_get_debugreg(reg)
  36. static inline void set_debugreg(unsigned long val, int reg)
  37. {
  38. PVOP_VCALL2(pv_cpu_ops.set_debugreg, reg, val);
  39. }
  40. static inline void clts(void)
  41. {
  42. PVOP_VCALL0(pv_cpu_ops.clts);
  43. }
  44. static inline unsigned long read_cr0(void)
  45. {
  46. return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr0);
  47. }
  48. static inline void write_cr0(unsigned long x)
  49. {
  50. PVOP_VCALL1(pv_cpu_ops.write_cr0, x);
  51. }
  52. static inline unsigned long read_cr2(void)
  53. {
  54. return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr2);
  55. }
  56. static inline void write_cr2(unsigned long x)
  57. {
  58. PVOP_VCALL1(pv_mmu_ops.write_cr2, x);
  59. }
  60. static inline unsigned long read_cr3(void)
  61. {
  62. return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr3);
  63. }
  64. static inline void write_cr3(unsigned long x)
  65. {
  66. PVOP_VCALL1(pv_mmu_ops.write_cr3, x);
  67. }
  68. static inline unsigned long read_cr4(void)
  69. {
  70. return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4);
  71. }
  72. static inline unsigned long read_cr4_safe(void)
  73. {
  74. return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4_safe);
  75. }
  76. static inline void write_cr4(unsigned long x)
  77. {
  78. PVOP_VCALL1(pv_cpu_ops.write_cr4, x);
  79. }
  80. #ifdef CONFIG_X86_64
  81. static inline unsigned long read_cr8(void)
  82. {
  83. return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr8);
  84. }
  85. static inline void write_cr8(unsigned long x)
  86. {
  87. PVOP_VCALL1(pv_cpu_ops.write_cr8, x);
  88. }
  89. #endif
  90. static inline void arch_safe_halt(void)
  91. {
  92. PVOP_VCALL0(pv_irq_ops.safe_halt);
  93. }
  94. static inline void halt(void)
  95. {
  96. PVOP_VCALL0(pv_irq_ops.halt);
  97. }
  98. static inline void wbinvd(void)
  99. {
  100. PVOP_VCALL0(pv_cpu_ops.wbinvd);
  101. }
  102. #define get_kernel_rpl() (pv_info.kernel_rpl)
  103. static inline u64 paravirt_read_msr(unsigned msr, int *err)
  104. {
  105. return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err);
  106. }
  107. static inline int paravirt_rdmsr_regs(u32 *regs)
  108. {
  109. return PVOP_CALL1(int, pv_cpu_ops.rdmsr_regs, regs);
  110. }
  111. static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high)
  112. {
  113. return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high);
  114. }
  115. static inline int paravirt_wrmsr_regs(u32 *regs)
  116. {
  117. return PVOP_CALL1(int, pv_cpu_ops.wrmsr_regs, regs);
  118. }
  119. /* These should all do BUG_ON(_err), but our headers are too tangled. */
  120. #define rdmsr(msr, val1, val2) \
  121. do { \
  122. int _err; \
  123. u64 _l = paravirt_read_msr(msr, &_err); \
  124. val1 = (u32)_l; \
  125. val2 = _l >> 32; \
  126. } while (0)
  127. #define wrmsr(msr, val1, val2) \
  128. do { \
  129. paravirt_write_msr(msr, val1, val2); \
  130. } while (0)
  131. #define rdmsrl(msr, val) \
  132. do { \
  133. int _err; \
  134. val = paravirt_read_msr(msr, &_err); \
  135. } while (0)
  136. #define wrmsrl(msr, val) wrmsr(msr, (u32)((u64)(val)), ((u64)(val))>>32)
  137. #define wrmsr_safe(msr, a, b) paravirt_write_msr(msr, a, b)
  138. /* rdmsr with exception handling */
  139. #define rdmsr_safe(msr, a, b) \
  140. ({ \
  141. int _err; \
  142. u64 _l = paravirt_read_msr(msr, &_err); \
  143. (*a) = (u32)_l; \
  144. (*b) = _l >> 32; \
  145. _err; \
  146. })
  147. #define rdmsr_safe_regs(regs) paravirt_rdmsr_regs(regs)
  148. #define wrmsr_safe_regs(regs) paravirt_wrmsr_regs(regs)
  149. static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
  150. {
  151. int err;
  152. *p = paravirt_read_msr(msr, &err);
  153. return err;
  154. }
  155. static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
  156. {
  157. u32 gprs[8] = { 0 };
  158. int err;
  159. gprs[1] = msr;
  160. gprs[7] = 0x9c5a203a;
  161. err = paravirt_rdmsr_regs(gprs);
  162. *p = gprs[0] | ((u64)gprs[2] << 32);
  163. return err;
  164. }
  165. static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
  166. {
  167. u32 gprs[8] = { 0 };
  168. gprs[0] = (u32)val;
  169. gprs[1] = msr;
  170. gprs[2] = val >> 32;
  171. gprs[7] = 0x9c5a203a;
  172. return paravirt_wrmsr_regs(gprs);
  173. }
  174. static inline u64 paravirt_read_tsc(void)
  175. {
  176. return PVOP_CALL0(u64, pv_cpu_ops.read_tsc);
  177. }
  178. #define rdtscl(low) \
  179. do { \
  180. u64 _l = paravirt_read_tsc(); \
  181. low = (int)_l; \
  182. } while (0)
  183. #define rdtscll(val) (val = paravirt_read_tsc())
  184. static inline unsigned long long paravirt_sched_clock(void)
  185. {
  186. return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock);
  187. }
  188. struct static_key;
  189. extern struct static_key paravirt_steal_enabled;
  190. extern struct static_key paravirt_steal_rq_enabled;
  191. static inline u64 paravirt_steal_clock(int cpu)
  192. {
  193. return PVOP_CALL1(u64, pv_time_ops.steal_clock, cpu);
  194. }
  195. static inline unsigned long long paravirt_read_pmc(int counter)
  196. {
  197. return PVOP_CALL1(u64, pv_cpu_ops.read_pmc, counter);
  198. }
  199. #define rdpmc(counter, low, high) \
  200. do { \
  201. u64 _l = paravirt_read_pmc(counter); \
  202. low = (u32)_l; \
  203. high = _l >> 32; \
  204. } while (0)
  205. #define rdpmcl(counter, val) ((val) = paravirt_read_pmc(counter))
  206. static inline unsigned long long paravirt_rdtscp(unsigned int *aux)
  207. {
  208. return PVOP_CALL1(u64, pv_cpu_ops.read_tscp, aux);
  209. }
  210. #define rdtscp(low, high, aux) \
  211. do { \
  212. int __aux; \
  213. unsigned long __val = paravirt_rdtscp(&__aux); \
  214. (low) = (u32)__val; \
  215. (high) = (u32)(__val >> 32); \
  216. (aux) = __aux; \
  217. } while (0)
  218. #define rdtscpll(val, aux) \
  219. do { \
  220. unsigned long __aux; \
  221. val = paravirt_rdtscp(&__aux); \
  222. (aux) = __aux; \
  223. } while (0)
  224. static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
  225. {
  226. PVOP_VCALL2(pv_cpu_ops.alloc_ldt, ldt, entries);
  227. }
  228. static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
  229. {
  230. PVOP_VCALL2(pv_cpu_ops.free_ldt, ldt, entries);
  231. }
  232. static inline void load_TR_desc(void)
  233. {
  234. PVOP_VCALL0(pv_cpu_ops.load_tr_desc);
  235. }
  236. static inline void load_gdt(const struct desc_ptr *dtr)
  237. {
  238. PVOP_VCALL1(pv_cpu_ops.load_gdt, dtr);
  239. }
  240. static inline void load_idt(const struct desc_ptr *dtr)
  241. {
  242. PVOP_VCALL1(pv_cpu_ops.load_idt, dtr);
  243. }
  244. static inline void set_ldt(const void *addr, unsigned entries)
  245. {
  246. PVOP_VCALL2(pv_cpu_ops.set_ldt, addr, entries);
  247. }
  248. static inline void store_gdt(struct desc_ptr *dtr)
  249. {
  250. PVOP_VCALL1(pv_cpu_ops.store_gdt, dtr);
  251. }
  252. static inline void store_idt(struct desc_ptr *dtr)
  253. {
  254. PVOP_VCALL1(pv_cpu_ops.store_idt, dtr);
  255. }
  256. static inline unsigned long paravirt_store_tr(void)
  257. {
  258. return PVOP_CALL0(unsigned long, pv_cpu_ops.store_tr);
  259. }
  260. #define store_tr(tr) ((tr) = paravirt_store_tr())
  261. static inline void load_TLS(struct thread_struct *t, unsigned cpu)
  262. {
  263. PVOP_VCALL2(pv_cpu_ops.load_tls, t, cpu);
  264. }
  265. #ifdef CONFIG_X86_64
  266. static inline void load_gs_index(unsigned int gs)
  267. {
  268. PVOP_VCALL1(pv_cpu_ops.load_gs_index, gs);
  269. }
  270. #endif
  271. static inline void write_ldt_entry(struct desc_struct *dt, int entry,
  272. const void *desc)
  273. {
  274. PVOP_VCALL3(pv_cpu_ops.write_ldt_entry, dt, entry, desc);
  275. }
  276. static inline void write_gdt_entry(struct desc_struct *dt, int entry,
  277. void *desc, int type)
  278. {
  279. PVOP_VCALL4(pv_cpu_ops.write_gdt_entry, dt, entry, desc, type);
  280. }
  281. static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
  282. {
  283. PVOP_VCALL3(pv_cpu_ops.write_idt_entry, dt, entry, g);
  284. }
  285. static inline void set_iopl_mask(unsigned mask)
  286. {
  287. PVOP_VCALL1(pv_cpu_ops.set_iopl_mask, mask);
  288. }
  289. /* The paravirtualized I/O functions */
  290. static inline void slow_down_io(void)
  291. {
  292. pv_cpu_ops.io_delay();
  293. #ifdef REALLY_SLOW_IO
  294. pv_cpu_ops.io_delay();
  295. pv_cpu_ops.io_delay();
  296. pv_cpu_ops.io_delay();
  297. #endif
  298. }
  299. #ifdef CONFIG_SMP
  300. static inline void startup_ipi_hook(int phys_apicid, unsigned long start_eip,
  301. unsigned long start_esp)
  302. {
  303. PVOP_VCALL3(pv_apic_ops.startup_ipi_hook,
  304. phys_apicid, start_eip, start_esp);
  305. }
  306. #endif
  307. static inline void paravirt_activate_mm(struct mm_struct *prev,
  308. struct mm_struct *next)
  309. {
  310. PVOP_VCALL2(pv_mmu_ops.activate_mm, prev, next);
  311. }
  312. static inline void arch_dup_mmap(struct mm_struct *oldmm,
  313. struct mm_struct *mm)
  314. {
  315. PVOP_VCALL2(pv_mmu_ops.dup_mmap, oldmm, mm);
  316. }
  317. static inline void arch_exit_mmap(struct mm_struct *mm)
  318. {
  319. PVOP_VCALL1(pv_mmu_ops.exit_mmap, mm);
  320. }
  321. static inline void __flush_tlb(void)
  322. {
  323. PVOP_VCALL0(pv_mmu_ops.flush_tlb_user);
  324. }
  325. static inline void __flush_tlb_global(void)
  326. {
  327. PVOP_VCALL0(pv_mmu_ops.flush_tlb_kernel);
  328. }
  329. static inline void __flush_tlb_single(unsigned long addr)
  330. {
  331. PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr);
  332. }
  333. static inline void flush_tlb_others(const struct cpumask *cpumask,
  334. struct mm_struct *mm,
  335. unsigned long va)
  336. {
  337. PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, cpumask, mm, va);
  338. }
  339. static inline int paravirt_pgd_alloc(struct mm_struct *mm)
  340. {
  341. return PVOP_CALL1(int, pv_mmu_ops.pgd_alloc, mm);
  342. }
  343. static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd)
  344. {
  345. PVOP_VCALL2(pv_mmu_ops.pgd_free, mm, pgd);
  346. }
  347. static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn)
  348. {
  349. PVOP_VCALL2(pv_mmu_ops.alloc_pte, mm, pfn);
  350. }
  351. static inline void paravirt_release_pte(unsigned long pfn)
  352. {
  353. PVOP_VCALL1(pv_mmu_ops.release_pte, pfn);
  354. }
  355. static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
  356. {
  357. PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn);
  358. }
  359. static inline void paravirt_release_pmd(unsigned long pfn)
  360. {
  361. PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn);
  362. }
  363. static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn)
  364. {
  365. PVOP_VCALL2(pv_mmu_ops.alloc_pud, mm, pfn);
  366. }
  367. static inline void paravirt_release_pud(unsigned long pfn)
  368. {
  369. PVOP_VCALL1(pv_mmu_ops.release_pud, pfn);
  370. }
  371. static inline void pte_update(struct mm_struct *mm, unsigned long addr,
  372. pte_t *ptep)
  373. {
  374. PVOP_VCALL3(pv_mmu_ops.pte_update, mm, addr, ptep);
  375. }
  376. static inline void pmd_update(struct mm_struct *mm, unsigned long addr,
  377. pmd_t *pmdp)
  378. {
  379. PVOP_VCALL3(pv_mmu_ops.pmd_update, mm, addr, pmdp);
  380. }
  381. static inline void pte_update_defer(struct mm_struct *mm, unsigned long addr,
  382. pte_t *ptep)
  383. {
  384. PVOP_VCALL3(pv_mmu_ops.pte_update_defer, mm, addr, ptep);
  385. }
  386. static inline void pmd_update_defer(struct mm_struct *mm, unsigned long addr,
  387. pmd_t *pmdp)
  388. {
  389. PVOP_VCALL3(pv_mmu_ops.pmd_update_defer, mm, addr, pmdp);
  390. }
  391. static inline pte_t __pte(pteval_t val)
  392. {
  393. pteval_t ret;
  394. if (sizeof(pteval_t) > sizeof(long))
  395. ret = PVOP_CALLEE2(pteval_t,
  396. pv_mmu_ops.make_pte,
  397. val, (u64)val >> 32);
  398. else
  399. ret = PVOP_CALLEE1(pteval_t,
  400. pv_mmu_ops.make_pte,
  401. val);
  402. return (pte_t) { .pte = ret };
  403. }
  404. static inline pteval_t pte_val(pte_t pte)
  405. {
  406. pteval_t ret;
  407. if (sizeof(pteval_t) > sizeof(long))
  408. ret = PVOP_CALLEE2(pteval_t, pv_mmu_ops.pte_val,
  409. pte.pte, (u64)pte.pte >> 32);
  410. else
  411. ret = PVOP_CALLEE1(pteval_t, pv_mmu_ops.pte_val,
  412. pte.pte);
  413. return ret;
  414. }
  415. static inline pgd_t __pgd(pgdval_t val)
  416. {
  417. pgdval_t ret;
  418. if (sizeof(pgdval_t) > sizeof(long))
  419. ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.make_pgd,
  420. val, (u64)val >> 32);
  421. else
  422. ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.make_pgd,
  423. val);
  424. return (pgd_t) { ret };
  425. }
  426. static inline pgdval_t pgd_val(pgd_t pgd)
  427. {
  428. pgdval_t ret;
  429. if (sizeof(pgdval_t) > sizeof(long))
  430. ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.pgd_val,
  431. pgd.pgd, (u64)pgd.pgd >> 32);
  432. else
  433. ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.pgd_val,
  434. pgd.pgd);
  435. return ret;
  436. }
  437. #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
  438. static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr,
  439. pte_t *ptep)
  440. {
  441. pteval_t ret;
  442. ret = PVOP_CALL3(pteval_t, pv_mmu_ops.ptep_modify_prot_start,
  443. mm, addr, ptep);
  444. return (pte_t) { .pte = ret };
  445. }
  446. static inline void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
  447. pte_t *ptep, pte_t pte)
  448. {
  449. if (sizeof(pteval_t) > sizeof(long))
  450. /* 5 arg words */
  451. pv_mmu_ops.ptep_modify_prot_commit(mm, addr, ptep, pte);
  452. else
  453. PVOP_VCALL4(pv_mmu_ops.ptep_modify_prot_commit,
  454. mm, addr, ptep, pte.pte);
  455. }
  456. static inline void set_pte(pte_t *ptep, pte_t pte)
  457. {
  458. if (sizeof(pteval_t) > sizeof(long))
  459. PVOP_VCALL3(pv_mmu_ops.set_pte, ptep,
  460. pte.pte, (u64)pte.pte >> 32);
  461. else
  462. PVOP_VCALL2(pv_mmu_ops.set_pte, ptep,
  463. pte.pte);
  464. }
  465. static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
  466. pte_t *ptep, pte_t pte)
  467. {
  468. if (sizeof(pteval_t) > sizeof(long))
  469. /* 5 arg words */
  470. pv_mmu_ops.set_pte_at(mm, addr, ptep, pte);
  471. else
  472. PVOP_VCALL4(pv_mmu_ops.set_pte_at, mm, addr, ptep, pte.pte);
  473. }
  474. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  475. static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
  476. pmd_t *pmdp, pmd_t pmd)
  477. {
  478. if (sizeof(pmdval_t) > sizeof(long))
  479. /* 5 arg words */
  480. pv_mmu_ops.set_pmd_at(mm, addr, pmdp, pmd);
  481. else
  482. PVOP_VCALL4(pv_mmu_ops.set_pmd_at, mm, addr, pmdp,
  483. native_pmd_val(pmd));
  484. }
  485. #endif
  486. static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
  487. {
  488. pmdval_t val = native_pmd_val(pmd);
  489. if (sizeof(pmdval_t) > sizeof(long))
  490. PVOP_VCALL3(pv_mmu_ops.set_pmd, pmdp, val, (u64)val >> 32);
  491. else
  492. PVOP_VCALL2(pv_mmu_ops.set_pmd, pmdp, val);
  493. }
  494. #if PAGETABLE_LEVELS >= 3
  495. static inline pmd_t __pmd(pmdval_t val)
  496. {
  497. pmdval_t ret;
  498. if (sizeof(pmdval_t) > sizeof(long))
  499. ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.make_pmd,
  500. val, (u64)val >> 32);
  501. else
  502. ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.make_pmd,
  503. val);
  504. return (pmd_t) { ret };
  505. }
  506. static inline pmdval_t pmd_val(pmd_t pmd)
  507. {
  508. pmdval_t ret;
  509. if (sizeof(pmdval_t) > sizeof(long))
  510. ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.pmd_val,
  511. pmd.pmd, (u64)pmd.pmd >> 32);
  512. else
  513. ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.pmd_val,
  514. pmd.pmd);
  515. return ret;
  516. }
  517. static inline void set_pud(pud_t *pudp, pud_t pud)
  518. {
  519. pudval_t val = native_pud_val(pud);
  520. if (sizeof(pudval_t) > sizeof(long))
  521. PVOP_VCALL3(pv_mmu_ops.set_pud, pudp,
  522. val, (u64)val >> 32);
  523. else
  524. PVOP_VCALL2(pv_mmu_ops.set_pud, pudp,
  525. val);
  526. }
  527. #if PAGETABLE_LEVELS == 4
  528. static inline pud_t __pud(pudval_t val)
  529. {
  530. pudval_t ret;
  531. if (sizeof(pudval_t) > sizeof(long))
  532. ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.make_pud,
  533. val, (u64)val >> 32);
  534. else
  535. ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.make_pud,
  536. val);
  537. return (pud_t) { ret };
  538. }
  539. static inline pudval_t pud_val(pud_t pud)
  540. {
  541. pudval_t ret;
  542. if (sizeof(pudval_t) > sizeof(long))
  543. ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.pud_val,
  544. pud.pud, (u64)pud.pud >> 32);
  545. else
  546. ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.pud_val,
  547. pud.pud);
  548. return ret;
  549. }
  550. static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
  551. {
  552. pgdval_t val = native_pgd_val(pgd);
  553. if (sizeof(pgdval_t) > sizeof(long))
  554. PVOP_VCALL3(pv_mmu_ops.set_pgd, pgdp,
  555. val, (u64)val >> 32);
  556. else
  557. PVOP_VCALL2(pv_mmu_ops.set_pgd, pgdp,
  558. val);
  559. }
  560. static inline void pgd_clear(pgd_t *pgdp)
  561. {
  562. set_pgd(pgdp, __pgd(0));
  563. }
  564. static inline void pud_clear(pud_t *pudp)
  565. {
  566. set_pud(pudp, __pud(0));
  567. }
  568. #endif /* PAGETABLE_LEVELS == 4 */
  569. #endif /* PAGETABLE_LEVELS >= 3 */
  570. #ifdef CONFIG_X86_PAE
  571. /* Special-case pte-setting operations for PAE, which can't update a
  572. 64-bit pte atomically */
  573. static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
  574. {
  575. PVOP_VCALL3(pv_mmu_ops.set_pte_atomic, ptep,
  576. pte.pte, pte.pte >> 32);
  577. }
  578. static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
  579. pte_t *ptep)
  580. {
  581. PVOP_VCALL3(pv_mmu_ops.pte_clear, mm, addr, ptep);
  582. }
  583. static inline void pmd_clear(pmd_t *pmdp)
  584. {
  585. PVOP_VCALL1(pv_mmu_ops.pmd_clear, pmdp);
  586. }
  587. #else /* !CONFIG_X86_PAE */
  588. static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
  589. {
  590. set_pte(ptep, pte);
  591. }
  592. static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
  593. pte_t *ptep)
  594. {
  595. set_pte_at(mm, addr, ptep, __pte(0));
  596. }
  597. static inline void pmd_clear(pmd_t *pmdp)
  598. {
  599. set_pmd(pmdp, __pmd(0));
  600. }
  601. #endif /* CONFIG_X86_PAE */
  602. #define __HAVE_ARCH_START_CONTEXT_SWITCH
  603. static inline void arch_start_context_switch(struct task_struct *prev)
  604. {
  605. PVOP_VCALL1(pv_cpu_ops.start_context_switch, prev);
  606. }
  607. static inline void arch_end_context_switch(struct task_struct *next)
  608. {
  609. PVOP_VCALL1(pv_cpu_ops.end_context_switch, next);
  610. }
  611. #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
  612. static inline void arch_enter_lazy_mmu_mode(void)
  613. {
  614. PVOP_VCALL0(pv_mmu_ops.lazy_mode.enter);
  615. }
  616. static inline void arch_leave_lazy_mmu_mode(void)
  617. {
  618. PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
  619. }
  620. void arch_flush_lazy_mmu_mode(void);
  621. static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
  622. phys_addr_t phys, pgprot_t flags)
  623. {
  624. pv_mmu_ops.set_fixmap(idx, phys, flags);
  625. }
  626. #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
  627. static inline int arch_spin_is_locked(struct arch_spinlock *lock)
  628. {
  629. return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock);
  630. }
  631. static inline int arch_spin_is_contended(struct arch_spinlock *lock)
  632. {
  633. return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock);
  634. }
  635. #define arch_spin_is_contended arch_spin_is_contended
  636. static __always_inline void arch_spin_lock(struct arch_spinlock *lock)
  637. {
  638. PVOP_VCALL1(pv_lock_ops.spin_lock, lock);
  639. }
  640. static __always_inline void arch_spin_lock_flags(struct arch_spinlock *lock,
  641. unsigned long flags)
  642. {
  643. PVOP_VCALL2(pv_lock_ops.spin_lock_flags, lock, flags);
  644. }
  645. static __always_inline int arch_spin_trylock(struct arch_spinlock *lock)
  646. {
  647. return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock);
  648. }
  649. static __always_inline void arch_spin_unlock(struct arch_spinlock *lock)
  650. {
  651. PVOP_VCALL1(pv_lock_ops.spin_unlock, lock);
  652. }
  653. #endif
  654. #ifdef CONFIG_X86_32
  655. #define PV_SAVE_REGS "pushl %ecx; pushl %edx;"
  656. #define PV_RESTORE_REGS "popl %edx; popl %ecx;"
  657. /* save and restore all caller-save registers, except return value */
  658. #define PV_SAVE_ALL_CALLER_REGS "pushl %ecx;"
  659. #define PV_RESTORE_ALL_CALLER_REGS "popl %ecx;"
  660. #define PV_FLAGS_ARG "0"
  661. #define PV_EXTRA_CLOBBERS
  662. #define PV_VEXTRA_CLOBBERS
  663. #else
  664. /* save and restore all caller-save registers, except return value */
  665. #define PV_SAVE_ALL_CALLER_REGS \
  666. "push %rcx;" \
  667. "push %rdx;" \
  668. "push %rsi;" \
  669. "push %rdi;" \
  670. "push %r8;" \
  671. "push %r9;" \
  672. "push %r10;" \
  673. "push %r11;"
  674. #define PV_RESTORE_ALL_CALLER_REGS \
  675. "pop %r11;" \
  676. "pop %r10;" \
  677. "pop %r9;" \
  678. "pop %r8;" \
  679. "pop %rdi;" \
  680. "pop %rsi;" \
  681. "pop %rdx;" \
  682. "pop %rcx;"
  683. /* We save some registers, but all of them, that's too much. We clobber all
  684. * caller saved registers but the argument parameter */
  685. #define PV_SAVE_REGS "pushq %%rdi;"
  686. #define PV_RESTORE_REGS "popq %%rdi;"
  687. #define PV_EXTRA_CLOBBERS EXTRA_CLOBBERS, "rcx" , "rdx", "rsi"
  688. #define PV_VEXTRA_CLOBBERS EXTRA_CLOBBERS, "rdi", "rcx" , "rdx", "rsi"
  689. #define PV_FLAGS_ARG "D"
  690. #endif
  691. /*
  692. * Generate a thunk around a function which saves all caller-save
  693. * registers except for the return value. This allows C functions to
  694. * be called from assembler code where fewer than normal registers are
  695. * available. It may also help code generation around calls from C
  696. * code if the common case doesn't use many registers.
  697. *
  698. * When a callee is wrapped in a thunk, the caller can assume that all
  699. * arg regs and all scratch registers are preserved across the
  700. * call. The return value in rax/eax will not be saved, even for void
  701. * functions.
  702. */
  703. #define PV_CALLEE_SAVE_REGS_THUNK(func) \
  704. extern typeof(func) __raw_callee_save_##func; \
  705. static void *__##func##__ __used = func; \
  706. \
  707. asm(".pushsection .text;" \
  708. "__raw_callee_save_" #func ": " \
  709. PV_SAVE_ALL_CALLER_REGS \
  710. "call " #func ";" \
  711. PV_RESTORE_ALL_CALLER_REGS \
  712. "ret;" \
  713. ".popsection")
  714. /* Get a reference to a callee-save function */
  715. #define PV_CALLEE_SAVE(func) \
  716. ((struct paravirt_callee_save) { __raw_callee_save_##func })
  717. /* Promise that "func" already uses the right calling convention */
  718. #define __PV_IS_CALLEE_SAVE(func) \
  719. ((struct paravirt_callee_save) { func })
  720. static inline notrace unsigned long arch_local_save_flags(void)
  721. {
  722. return PVOP_CALLEE0(unsigned long, pv_irq_ops.save_fl);
  723. }
  724. static inline notrace void arch_local_irq_restore(unsigned long f)
  725. {
  726. PVOP_VCALLEE1(pv_irq_ops.restore_fl, f);
  727. }
  728. static inline notrace void arch_local_irq_disable(void)
  729. {
  730. PVOP_VCALLEE0(pv_irq_ops.irq_disable);
  731. }
  732. static inline notrace void arch_local_irq_enable(void)
  733. {
  734. PVOP_VCALLEE0(pv_irq_ops.irq_enable);
  735. }
  736. static inline notrace unsigned long arch_local_irq_save(void)
  737. {
  738. unsigned long f;
  739. f = arch_local_save_flags();
  740. arch_local_irq_disable();
  741. return f;
  742. }
  743. /* Make sure as little as possible of this mess escapes. */
  744. #undef PARAVIRT_CALL
  745. #undef __PVOP_CALL
  746. #undef __PVOP_VCALL
  747. #undef PVOP_VCALL0
  748. #undef PVOP_CALL0
  749. #undef PVOP_VCALL1
  750. #undef PVOP_CALL1
  751. #undef PVOP_VCALL2
  752. #undef PVOP_CALL2
  753. #undef PVOP_VCALL3
  754. #undef PVOP_CALL3
  755. #undef PVOP_VCALL4
  756. #undef PVOP_CALL4
  757. extern void default_banner(void);
  758. #else /* __ASSEMBLY__ */
  759. #define _PVSITE(ptype, clobbers, ops, word, algn) \
  760. 771:; \
  761. ops; \
  762. 772:; \
  763. .pushsection .parainstructions,"a"; \
  764. .align algn; \
  765. word 771b; \
  766. .byte ptype; \
  767. .byte 772b-771b; \
  768. .short clobbers; \
  769. .popsection
  770. #define COND_PUSH(set, mask, reg) \
  771. .if ((~(set)) & mask); push %reg; .endif
  772. #define COND_POP(set, mask, reg) \
  773. .if ((~(set)) & mask); pop %reg; .endif
  774. #ifdef CONFIG_X86_64
  775. #define PV_SAVE_REGS(set) \
  776. COND_PUSH(set, CLBR_RAX, rax); \
  777. COND_PUSH(set, CLBR_RCX, rcx); \
  778. COND_PUSH(set, CLBR_RDX, rdx); \
  779. COND_PUSH(set, CLBR_RSI, rsi); \
  780. COND_PUSH(set, CLBR_RDI, rdi); \
  781. COND_PUSH(set, CLBR_R8, r8); \
  782. COND_PUSH(set, CLBR_R9, r9); \
  783. COND_PUSH(set, CLBR_R10, r10); \
  784. COND_PUSH(set, CLBR_R11, r11)
  785. #define PV_RESTORE_REGS(set) \
  786. COND_POP(set, CLBR_R11, r11); \
  787. COND_POP(set, CLBR_R10, r10); \
  788. COND_POP(set, CLBR_R9, r9); \
  789. COND_POP(set, CLBR_R8, r8); \
  790. COND_POP(set, CLBR_RDI, rdi); \
  791. COND_POP(set, CLBR_RSI, rsi); \
  792. COND_POP(set, CLBR_RDX, rdx); \
  793. COND_POP(set, CLBR_RCX, rcx); \
  794. COND_POP(set, CLBR_RAX, rax)
  795. #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 8)
  796. #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8)
  797. #define PARA_INDIRECT(addr) *addr(%rip)
  798. #else
  799. #define PV_SAVE_REGS(set) \
  800. COND_PUSH(set, CLBR_EAX, eax); \
  801. COND_PUSH(set, CLBR_EDI, edi); \
  802. COND_PUSH(set, CLBR_ECX, ecx); \
  803. COND_PUSH(set, CLBR_EDX, edx)
  804. #define PV_RESTORE_REGS(set) \
  805. COND_POP(set, CLBR_EDX, edx); \
  806. COND_POP(set, CLBR_ECX, ecx); \
  807. COND_POP(set, CLBR_EDI, edi); \
  808. COND_POP(set, CLBR_EAX, eax)
  809. #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
  810. #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
  811. #define PARA_INDIRECT(addr) *%cs:addr
  812. #endif
  813. #define INTERRUPT_RETURN \
  814. PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE, \
  815. jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret))
  816. #define DISABLE_INTERRUPTS(clobbers) \
  817. PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
  818. PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
  819. call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable); \
  820. PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
  821. #define ENABLE_INTERRUPTS(clobbers) \
  822. PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers, \
  823. PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
  824. call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable); \
  825. PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
  826. #define USERGS_SYSRET32 \
  827. PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret32), \
  828. CLBR_NONE, \
  829. jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret32))
  830. #ifdef CONFIG_X86_32
  831. #define GET_CR0_INTO_EAX \
  832. push %ecx; push %edx; \
  833. call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
  834. pop %edx; pop %ecx
  835. #define ENABLE_INTERRUPTS_SYSEXIT \
  836. PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
  837. CLBR_NONE, \
  838. jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
  839. #else /* !CONFIG_X86_32 */
  840. /*
  841. * If swapgs is used while the userspace stack is still current,
  842. * there's no way to call a pvop. The PV replacement *must* be
  843. * inlined, or the swapgs instruction must be trapped and emulated.
  844. */
  845. #define SWAPGS_UNSAFE_STACK \
  846. PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
  847. swapgs)
  848. /*
  849. * Note: swapgs is very special, and in practise is either going to be
  850. * implemented with a single "swapgs" instruction or something very
  851. * special. Either way, we don't need to save any registers for
  852. * it.
  853. */
  854. #define SWAPGS \
  855. PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
  856. call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs) \
  857. )
  858. #define GET_CR2_INTO_RAX \
  859. call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2)
  860. #define PARAVIRT_ADJUST_EXCEPTION_FRAME \
  861. PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_adjust_exception_frame), \
  862. CLBR_NONE, \
  863. call PARA_INDIRECT(pv_irq_ops+PV_IRQ_adjust_exception_frame))
  864. #define USERGS_SYSRET64 \
  865. PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64), \
  866. CLBR_NONE, \
  867. jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64))
  868. #define ENABLE_INTERRUPTS_SYSEXIT32 \
  869. PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
  870. CLBR_NONE, \
  871. jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
  872. #endif /* CONFIG_X86_32 */
  873. #endif /* __ASSEMBLY__ */
  874. #else /* CONFIG_PARAVIRT */
  875. # define default_banner x86_init_noop
  876. #endif /* !CONFIG_PARAVIRT */
  877. #endif /* _ASM_X86_PARAVIRT_H */