paravirt.h 25 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051
  1. #ifndef _ASM_X86_PARAVIRT_H
  2. #define _ASM_X86_PARAVIRT_H
  3. /* Various instructions on x86 need to be replaced for
  4. * para-virtualization: those hooks are defined here. */
  5. #ifdef CONFIG_PARAVIRT
  6. #include <asm/pgtable_types.h>
  7. #include <asm/asm.h>
  8. #include <asm/paravirt_types.h>
  9. #ifndef __ASSEMBLY__
  10. #include <linux/types.h>
  11. #include <linux/cpumask.h>
  12. static inline int paravirt_enabled(void)
  13. {
  14. return pv_info.paravirt_enabled;
  15. }
  16. static inline void load_sp0(struct tss_struct *tss,
  17. struct thread_struct *thread)
  18. {
  19. PVOP_VCALL2(pv_cpu_ops.load_sp0, tss, thread);
  20. }
  21. /* The paravirtualized CPUID instruction. */
  22. static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
  23. unsigned int *ecx, unsigned int *edx)
  24. {
  25. PVOP_VCALL4(pv_cpu_ops.cpuid, eax, ebx, ecx, edx);
  26. }
  27. /*
  28. * These special macros can be used to get or set a debugging register
  29. */
  30. static inline unsigned long paravirt_get_debugreg(int reg)
  31. {
  32. return PVOP_CALL1(unsigned long, pv_cpu_ops.get_debugreg, reg);
  33. }
  34. #define get_debugreg(var, reg) var = paravirt_get_debugreg(reg)
  35. static inline void set_debugreg(unsigned long val, int reg)
  36. {
  37. PVOP_VCALL2(pv_cpu_ops.set_debugreg, reg, val);
  38. }
  39. static inline void clts(void)
  40. {
  41. PVOP_VCALL0(pv_cpu_ops.clts);
  42. }
  43. static inline unsigned long read_cr0(void)
  44. {
  45. return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr0);
  46. }
  47. static inline void write_cr0(unsigned long x)
  48. {
  49. PVOP_VCALL1(pv_cpu_ops.write_cr0, x);
  50. }
  51. static inline unsigned long read_cr2(void)
  52. {
  53. return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr2);
  54. }
  55. static inline void write_cr2(unsigned long x)
  56. {
  57. PVOP_VCALL1(pv_mmu_ops.write_cr2, x);
  58. }
  59. static inline unsigned long read_cr3(void)
  60. {
  61. return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr3);
  62. }
  63. static inline void write_cr3(unsigned long x)
  64. {
  65. PVOP_VCALL1(pv_mmu_ops.write_cr3, x);
  66. }
  67. static inline unsigned long read_cr4(void)
  68. {
  69. return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4);
  70. }
  71. static inline unsigned long read_cr4_safe(void)
  72. {
  73. return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4_safe);
  74. }
  75. static inline void write_cr4(unsigned long x)
  76. {
  77. PVOP_VCALL1(pv_cpu_ops.write_cr4, x);
  78. }
  79. #ifdef CONFIG_X86_64
  80. static inline unsigned long read_cr8(void)
  81. {
  82. return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr8);
  83. }
  84. static inline void write_cr8(unsigned long x)
  85. {
  86. PVOP_VCALL1(pv_cpu_ops.write_cr8, x);
  87. }
  88. #endif
  89. static inline void raw_safe_halt(void)
  90. {
  91. PVOP_VCALL0(pv_irq_ops.safe_halt);
  92. }
  93. static inline void halt(void)
  94. {
  95. PVOP_VCALL0(pv_irq_ops.safe_halt);
  96. }
  97. static inline void wbinvd(void)
  98. {
  99. PVOP_VCALL0(pv_cpu_ops.wbinvd);
  100. }
  101. #define get_kernel_rpl() (pv_info.kernel_rpl)
  102. static inline u64 paravirt_read_msr(unsigned msr, int *err)
  103. {
  104. return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err);
  105. }
  106. static inline int paravirt_rdmsr_regs(u32 *regs)
  107. {
  108. return PVOP_CALL1(int, pv_cpu_ops.rdmsr_regs, regs);
  109. }
  110. static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high)
  111. {
  112. return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high);
  113. }
  114. static inline int paravirt_wrmsr_regs(u32 *regs)
  115. {
  116. return PVOP_CALL1(int, pv_cpu_ops.wrmsr_regs, regs);
  117. }
  118. /* These should all do BUG_ON(_err), but our headers are too tangled. */
  119. #define rdmsr(msr, val1, val2) \
  120. do { \
  121. int _err; \
  122. u64 _l = paravirt_read_msr(msr, &_err); \
  123. val1 = (u32)_l; \
  124. val2 = _l >> 32; \
  125. } while (0)
  126. #define wrmsr(msr, val1, val2) \
  127. do { \
  128. paravirt_write_msr(msr, val1, val2); \
  129. } while (0)
  130. #define rdmsrl(msr, val) \
  131. do { \
  132. int _err; \
  133. val = paravirt_read_msr(msr, &_err); \
  134. } while (0)
  135. #define wrmsrl(msr, val) wrmsr(msr, (u32)((u64)(val)), ((u64)(val))>>32)
  136. #define wrmsr_safe(msr, a, b) paravirt_write_msr(msr, a, b)
  137. /* rdmsr with exception handling */
  138. #define rdmsr_safe(msr, a, b) \
  139. ({ \
  140. int _err; \
  141. u64 _l = paravirt_read_msr(msr, &_err); \
  142. (*a) = (u32)_l; \
  143. (*b) = _l >> 32; \
  144. _err; \
  145. })
  146. #define rdmsr_safe_regs(regs) paravirt_rdmsr_regs(regs)
  147. #define wrmsr_safe_regs(regs) paravirt_wrmsr_regs(regs)
  148. static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
  149. {
  150. int err;
  151. *p = paravirt_read_msr(msr, &err);
  152. return err;
  153. }
  154. static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
  155. {
  156. u32 gprs[8] = { 0 };
  157. int err;
  158. gprs[1] = msr;
  159. gprs[7] = 0x9c5a203a;
  160. err = paravirt_rdmsr_regs(gprs);
  161. *p = gprs[0] | ((u64)gprs[2] << 32);
  162. return err;
  163. }
  164. static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
  165. {
  166. u32 gprs[8] = { 0 };
  167. gprs[0] = (u32)val;
  168. gprs[1] = msr;
  169. gprs[2] = val >> 32;
  170. gprs[7] = 0x9c5a203a;
  171. return paravirt_wrmsr_regs(gprs);
  172. }
  173. static inline u64 paravirt_read_tsc(void)
  174. {
  175. return PVOP_CALL0(u64, pv_cpu_ops.read_tsc);
  176. }
  177. #define rdtscl(low) \
  178. do { \
  179. u64 _l = paravirt_read_tsc(); \
  180. low = (int)_l; \
  181. } while (0)
  182. #define rdtscll(val) (val = paravirt_read_tsc())
  183. static inline unsigned long long paravirt_sched_clock(void)
  184. {
  185. return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock);
  186. }
  187. static inline unsigned long long paravirt_read_pmc(int counter)
  188. {
  189. return PVOP_CALL1(u64, pv_cpu_ops.read_pmc, counter);
  190. }
  191. #define rdpmc(counter, low, high) \
  192. do { \
  193. u64 _l = paravirt_read_pmc(counter); \
  194. low = (u32)_l; \
  195. high = _l >> 32; \
  196. } while (0)
  197. static inline unsigned long long paravirt_rdtscp(unsigned int *aux)
  198. {
  199. return PVOP_CALL1(u64, pv_cpu_ops.read_tscp, aux);
  200. }
  201. #define rdtscp(low, high, aux) \
  202. do { \
  203. int __aux; \
  204. unsigned long __val = paravirt_rdtscp(&__aux); \
  205. (low) = (u32)__val; \
  206. (high) = (u32)(__val >> 32); \
  207. (aux) = __aux; \
  208. } while (0)
  209. #define rdtscpll(val, aux) \
  210. do { \
  211. unsigned long __aux; \
  212. val = paravirt_rdtscp(&__aux); \
  213. (aux) = __aux; \
  214. } while (0)
  215. static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
  216. {
  217. PVOP_VCALL2(pv_cpu_ops.alloc_ldt, ldt, entries);
  218. }
  219. static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
  220. {
  221. PVOP_VCALL2(pv_cpu_ops.free_ldt, ldt, entries);
  222. }
  223. static inline void load_TR_desc(void)
  224. {
  225. PVOP_VCALL0(pv_cpu_ops.load_tr_desc);
  226. }
  227. static inline void load_gdt(const struct desc_ptr *dtr)
  228. {
  229. PVOP_VCALL1(pv_cpu_ops.load_gdt, dtr);
  230. }
  231. static inline void load_idt(const struct desc_ptr *dtr)
  232. {
  233. PVOP_VCALL1(pv_cpu_ops.load_idt, dtr);
  234. }
  235. static inline void set_ldt(const void *addr, unsigned entries)
  236. {
  237. PVOP_VCALL2(pv_cpu_ops.set_ldt, addr, entries);
  238. }
  239. static inline void store_gdt(struct desc_ptr *dtr)
  240. {
  241. PVOP_VCALL1(pv_cpu_ops.store_gdt, dtr);
  242. }
  243. static inline void store_idt(struct desc_ptr *dtr)
  244. {
  245. PVOP_VCALL1(pv_cpu_ops.store_idt, dtr);
  246. }
  247. static inline unsigned long paravirt_store_tr(void)
  248. {
  249. return PVOP_CALL0(unsigned long, pv_cpu_ops.store_tr);
  250. }
  251. #define store_tr(tr) ((tr) = paravirt_store_tr())
  252. static inline void load_TLS(struct thread_struct *t, unsigned cpu)
  253. {
  254. PVOP_VCALL2(pv_cpu_ops.load_tls, t, cpu);
  255. }
  256. #ifdef CONFIG_X86_64
  257. static inline void load_gs_index(unsigned int gs)
  258. {
  259. PVOP_VCALL1(pv_cpu_ops.load_gs_index, gs);
  260. }
  261. #endif
  262. static inline void write_ldt_entry(struct desc_struct *dt, int entry,
  263. const void *desc)
  264. {
  265. PVOP_VCALL3(pv_cpu_ops.write_ldt_entry, dt, entry, desc);
  266. }
  267. static inline void write_gdt_entry(struct desc_struct *dt, int entry,
  268. void *desc, int type)
  269. {
  270. PVOP_VCALL4(pv_cpu_ops.write_gdt_entry, dt, entry, desc, type);
  271. }
  272. static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
  273. {
  274. PVOP_VCALL3(pv_cpu_ops.write_idt_entry, dt, entry, g);
  275. }
  276. static inline void set_iopl_mask(unsigned mask)
  277. {
  278. PVOP_VCALL1(pv_cpu_ops.set_iopl_mask, mask);
  279. }
  280. /* The paravirtualized I/O functions */
  281. static inline void slow_down_io(void)
  282. {
  283. pv_cpu_ops.io_delay();
  284. #ifdef REALLY_SLOW_IO
  285. pv_cpu_ops.io_delay();
  286. pv_cpu_ops.io_delay();
  287. pv_cpu_ops.io_delay();
  288. #endif
  289. }
  290. #ifdef CONFIG_SMP
  291. static inline void startup_ipi_hook(int phys_apicid, unsigned long start_eip,
  292. unsigned long start_esp)
  293. {
  294. PVOP_VCALL3(pv_apic_ops.startup_ipi_hook,
  295. phys_apicid, start_eip, start_esp);
  296. }
  297. #endif
  298. static inline void paravirt_activate_mm(struct mm_struct *prev,
  299. struct mm_struct *next)
  300. {
  301. PVOP_VCALL2(pv_mmu_ops.activate_mm, prev, next);
  302. }
  303. static inline void arch_dup_mmap(struct mm_struct *oldmm,
  304. struct mm_struct *mm)
  305. {
  306. PVOP_VCALL2(pv_mmu_ops.dup_mmap, oldmm, mm);
  307. }
  308. static inline void arch_exit_mmap(struct mm_struct *mm)
  309. {
  310. PVOP_VCALL1(pv_mmu_ops.exit_mmap, mm);
  311. }
  312. static inline void __flush_tlb(void)
  313. {
  314. PVOP_VCALL0(pv_mmu_ops.flush_tlb_user);
  315. }
  316. static inline void __flush_tlb_global(void)
  317. {
  318. PVOP_VCALL0(pv_mmu_ops.flush_tlb_kernel);
  319. }
  320. static inline void __flush_tlb_single(unsigned long addr)
  321. {
  322. PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr);
  323. }
  324. static inline void flush_tlb_others(const struct cpumask *cpumask,
  325. struct mm_struct *mm,
  326. unsigned long va)
  327. {
  328. PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, cpumask, mm, va);
  329. }
  330. static inline int paravirt_pgd_alloc(struct mm_struct *mm)
  331. {
  332. return PVOP_CALL1(int, pv_mmu_ops.pgd_alloc, mm);
  333. }
  334. static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd)
  335. {
  336. PVOP_VCALL2(pv_mmu_ops.pgd_free, mm, pgd);
  337. }
  338. static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn)
  339. {
  340. PVOP_VCALL2(pv_mmu_ops.alloc_pte, mm, pfn);
  341. }
  342. static inline void paravirt_release_pte(unsigned long pfn)
  343. {
  344. PVOP_VCALL1(pv_mmu_ops.release_pte, pfn);
  345. }
  346. static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
  347. {
  348. PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn);
  349. }
  350. static inline void paravirt_alloc_pmd_clone(unsigned long pfn, unsigned long clonepfn,
  351. unsigned long start, unsigned long count)
  352. {
  353. PVOP_VCALL4(pv_mmu_ops.alloc_pmd_clone, pfn, clonepfn, start, count);
  354. }
  355. static inline void paravirt_release_pmd(unsigned long pfn)
  356. {
  357. PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn);
  358. }
  359. static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn)
  360. {
  361. PVOP_VCALL2(pv_mmu_ops.alloc_pud, mm, pfn);
  362. }
  363. static inline void paravirt_release_pud(unsigned long pfn)
  364. {
  365. PVOP_VCALL1(pv_mmu_ops.release_pud, pfn);
  366. }
  367. #ifdef CONFIG_HIGHPTE
  368. static inline void *kmap_atomic_pte(struct page *page, enum km_type type)
  369. {
  370. unsigned long ret;
  371. ret = PVOP_CALL2(unsigned long, pv_mmu_ops.kmap_atomic_pte, page, type);
  372. return (void *)ret;
  373. }
  374. #endif
  375. static inline void pte_update(struct mm_struct *mm, unsigned long addr,
  376. pte_t *ptep)
  377. {
  378. PVOP_VCALL3(pv_mmu_ops.pte_update, mm, addr, ptep);
  379. }
  380. static inline void pte_update_defer(struct mm_struct *mm, unsigned long addr,
  381. pte_t *ptep)
  382. {
  383. PVOP_VCALL3(pv_mmu_ops.pte_update_defer, mm, addr, ptep);
  384. }
  385. static inline pte_t __pte(pteval_t val)
  386. {
  387. pteval_t ret;
  388. if (sizeof(pteval_t) > sizeof(long))
  389. ret = PVOP_CALLEE2(pteval_t,
  390. pv_mmu_ops.make_pte,
  391. val, (u64)val >> 32);
  392. else
  393. ret = PVOP_CALLEE1(pteval_t,
  394. pv_mmu_ops.make_pte,
  395. val);
  396. return (pte_t) { .pte = ret };
  397. }
  398. static inline pteval_t pte_val(pte_t pte)
  399. {
  400. pteval_t ret;
  401. if (sizeof(pteval_t) > sizeof(long))
  402. ret = PVOP_CALLEE2(pteval_t, pv_mmu_ops.pte_val,
  403. pte.pte, (u64)pte.pte >> 32);
  404. else
  405. ret = PVOP_CALLEE1(pteval_t, pv_mmu_ops.pte_val,
  406. pte.pte);
  407. return ret;
  408. }
  409. static inline pgd_t __pgd(pgdval_t val)
  410. {
  411. pgdval_t ret;
  412. if (sizeof(pgdval_t) > sizeof(long))
  413. ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.make_pgd,
  414. val, (u64)val >> 32);
  415. else
  416. ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.make_pgd,
  417. val);
  418. return (pgd_t) { ret };
  419. }
  420. static inline pgdval_t pgd_val(pgd_t pgd)
  421. {
  422. pgdval_t ret;
  423. if (sizeof(pgdval_t) > sizeof(long))
  424. ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.pgd_val,
  425. pgd.pgd, (u64)pgd.pgd >> 32);
  426. else
  427. ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.pgd_val,
  428. pgd.pgd);
  429. return ret;
  430. }
  431. #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
  432. static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr,
  433. pte_t *ptep)
  434. {
  435. pteval_t ret;
  436. ret = PVOP_CALL3(pteval_t, pv_mmu_ops.ptep_modify_prot_start,
  437. mm, addr, ptep);
  438. return (pte_t) { .pte = ret };
  439. }
  440. static inline void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
  441. pte_t *ptep, pte_t pte)
  442. {
  443. if (sizeof(pteval_t) > sizeof(long))
  444. /* 5 arg words */
  445. pv_mmu_ops.ptep_modify_prot_commit(mm, addr, ptep, pte);
  446. else
  447. PVOP_VCALL4(pv_mmu_ops.ptep_modify_prot_commit,
  448. mm, addr, ptep, pte.pte);
  449. }
  450. static inline void set_pte(pte_t *ptep, pte_t pte)
  451. {
  452. if (sizeof(pteval_t) > sizeof(long))
  453. PVOP_VCALL3(pv_mmu_ops.set_pte, ptep,
  454. pte.pte, (u64)pte.pte >> 32);
  455. else
  456. PVOP_VCALL2(pv_mmu_ops.set_pte, ptep,
  457. pte.pte);
  458. }
  459. static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
  460. pte_t *ptep, pte_t pte)
  461. {
  462. if (sizeof(pteval_t) > sizeof(long))
  463. /* 5 arg words */
  464. pv_mmu_ops.set_pte_at(mm, addr, ptep, pte);
  465. else
  466. PVOP_VCALL4(pv_mmu_ops.set_pte_at, mm, addr, ptep, pte.pte);
  467. }
  468. static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
  469. {
  470. pmdval_t val = native_pmd_val(pmd);
  471. if (sizeof(pmdval_t) > sizeof(long))
  472. PVOP_VCALL3(pv_mmu_ops.set_pmd, pmdp, val, (u64)val >> 32);
  473. else
  474. PVOP_VCALL2(pv_mmu_ops.set_pmd, pmdp, val);
  475. }
  476. #if PAGETABLE_LEVELS >= 3
  477. static inline pmd_t __pmd(pmdval_t val)
  478. {
  479. pmdval_t ret;
  480. if (sizeof(pmdval_t) > sizeof(long))
  481. ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.make_pmd,
  482. val, (u64)val >> 32);
  483. else
  484. ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.make_pmd,
  485. val);
  486. return (pmd_t) { ret };
  487. }
  488. static inline pmdval_t pmd_val(pmd_t pmd)
  489. {
  490. pmdval_t ret;
  491. if (sizeof(pmdval_t) > sizeof(long))
  492. ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.pmd_val,
  493. pmd.pmd, (u64)pmd.pmd >> 32);
  494. else
  495. ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.pmd_val,
  496. pmd.pmd);
  497. return ret;
  498. }
  499. static inline void set_pud(pud_t *pudp, pud_t pud)
  500. {
  501. pudval_t val = native_pud_val(pud);
  502. if (sizeof(pudval_t) > sizeof(long))
  503. PVOP_VCALL3(pv_mmu_ops.set_pud, pudp,
  504. val, (u64)val >> 32);
  505. else
  506. PVOP_VCALL2(pv_mmu_ops.set_pud, pudp,
  507. val);
  508. }
  509. #if PAGETABLE_LEVELS == 4
  510. static inline pud_t __pud(pudval_t val)
  511. {
  512. pudval_t ret;
  513. if (sizeof(pudval_t) > sizeof(long))
  514. ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.make_pud,
  515. val, (u64)val >> 32);
  516. else
  517. ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.make_pud,
  518. val);
  519. return (pud_t) { ret };
  520. }
  521. static inline pudval_t pud_val(pud_t pud)
  522. {
  523. pudval_t ret;
  524. if (sizeof(pudval_t) > sizeof(long))
  525. ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.pud_val,
  526. pud.pud, (u64)pud.pud >> 32);
  527. else
  528. ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.pud_val,
  529. pud.pud);
  530. return ret;
  531. }
  532. static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
  533. {
  534. pgdval_t val = native_pgd_val(pgd);
  535. if (sizeof(pgdval_t) > sizeof(long))
  536. PVOP_VCALL3(pv_mmu_ops.set_pgd, pgdp,
  537. val, (u64)val >> 32);
  538. else
  539. PVOP_VCALL2(pv_mmu_ops.set_pgd, pgdp,
  540. val);
  541. }
  542. static inline void pgd_clear(pgd_t *pgdp)
  543. {
  544. set_pgd(pgdp, __pgd(0));
  545. }
  546. static inline void pud_clear(pud_t *pudp)
  547. {
  548. set_pud(pudp, __pud(0));
  549. }
  550. #endif /* PAGETABLE_LEVELS == 4 */
  551. #endif /* PAGETABLE_LEVELS >= 3 */
  552. #ifdef CONFIG_X86_PAE
  553. /* Special-case pte-setting operations for PAE, which can't update a
  554. 64-bit pte atomically */
  555. static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
  556. {
  557. PVOP_VCALL3(pv_mmu_ops.set_pte_atomic, ptep,
  558. pte.pte, pte.pte >> 32);
  559. }
  560. static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
  561. pte_t *ptep)
  562. {
  563. PVOP_VCALL3(pv_mmu_ops.pte_clear, mm, addr, ptep);
  564. }
  565. static inline void pmd_clear(pmd_t *pmdp)
  566. {
  567. PVOP_VCALL1(pv_mmu_ops.pmd_clear, pmdp);
  568. }
  569. #else /* !CONFIG_X86_PAE */
  570. static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
  571. {
  572. set_pte(ptep, pte);
  573. }
  574. static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
  575. pte_t *ptep)
  576. {
  577. set_pte_at(mm, addr, ptep, __pte(0));
  578. }
  579. static inline void pmd_clear(pmd_t *pmdp)
  580. {
  581. set_pmd(pmdp, __pmd(0));
  582. }
  583. #endif /* CONFIG_X86_PAE */
  584. #define __HAVE_ARCH_START_CONTEXT_SWITCH
  585. static inline void arch_start_context_switch(struct task_struct *prev)
  586. {
  587. PVOP_VCALL1(pv_cpu_ops.start_context_switch, prev);
  588. }
  589. static inline void arch_end_context_switch(struct task_struct *next)
  590. {
  591. PVOP_VCALL1(pv_cpu_ops.end_context_switch, next);
  592. }
  593. #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
  594. static inline void arch_enter_lazy_mmu_mode(void)
  595. {
  596. PVOP_VCALL0(pv_mmu_ops.lazy_mode.enter);
  597. }
  598. static inline void arch_leave_lazy_mmu_mode(void)
  599. {
  600. PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
  601. }
  602. void arch_flush_lazy_mmu_mode(void);
  603. static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
  604. phys_addr_t phys, pgprot_t flags)
  605. {
  606. pv_mmu_ops.set_fixmap(idx, phys, flags);
  607. }
  608. #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
  609. static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
  610. {
  611. return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock);
  612. }
  613. static inline int __raw_spin_is_contended(struct raw_spinlock *lock)
  614. {
  615. return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock);
  616. }
  617. #define __raw_spin_is_contended __raw_spin_is_contended
  618. static __always_inline void __raw_spin_lock(struct raw_spinlock *lock)
  619. {
  620. PVOP_VCALL1(pv_lock_ops.spin_lock, lock);
  621. }
  622. static __always_inline void __raw_spin_lock_flags(struct raw_spinlock *lock,
  623. unsigned long flags)
  624. {
  625. PVOP_VCALL2(pv_lock_ops.spin_lock_flags, lock, flags);
  626. }
  627. static __always_inline int __raw_spin_trylock(struct raw_spinlock *lock)
  628. {
  629. return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock);
  630. }
  631. static __always_inline void __raw_spin_unlock(struct raw_spinlock *lock)
  632. {
  633. PVOP_VCALL1(pv_lock_ops.spin_unlock, lock);
  634. }
  635. #endif
  636. #ifdef CONFIG_X86_32
  637. #define PV_SAVE_REGS "pushl %ecx; pushl %edx;"
  638. #define PV_RESTORE_REGS "popl %edx; popl %ecx;"
  639. /* save and restore all caller-save registers, except return value */
  640. #define PV_SAVE_ALL_CALLER_REGS "pushl %ecx;"
  641. #define PV_RESTORE_ALL_CALLER_REGS "popl %ecx;"
  642. #define PV_FLAGS_ARG "0"
  643. #define PV_EXTRA_CLOBBERS
  644. #define PV_VEXTRA_CLOBBERS
  645. #else
  646. /* save and restore all caller-save registers, except return value */
  647. #define PV_SAVE_ALL_CALLER_REGS \
  648. "push %rcx;" \
  649. "push %rdx;" \
  650. "push %rsi;" \
  651. "push %rdi;" \
  652. "push %r8;" \
  653. "push %r9;" \
  654. "push %r10;" \
  655. "push %r11;"
  656. #define PV_RESTORE_ALL_CALLER_REGS \
  657. "pop %r11;" \
  658. "pop %r10;" \
  659. "pop %r9;" \
  660. "pop %r8;" \
  661. "pop %rdi;" \
  662. "pop %rsi;" \
  663. "pop %rdx;" \
  664. "pop %rcx;"
  665. /* We save some registers, but all of them, that's too much. We clobber all
  666. * caller saved registers but the argument parameter */
  667. #define PV_SAVE_REGS "pushq %%rdi;"
  668. #define PV_RESTORE_REGS "popq %%rdi;"
  669. #define PV_EXTRA_CLOBBERS EXTRA_CLOBBERS, "rcx" , "rdx", "rsi"
  670. #define PV_VEXTRA_CLOBBERS EXTRA_CLOBBERS, "rdi", "rcx" , "rdx", "rsi"
  671. #define PV_FLAGS_ARG "D"
  672. #endif
  673. /*
  674. * Generate a thunk around a function which saves all caller-save
  675. * registers except for the return value. This allows C functions to
  676. * be called from assembler code where fewer than normal registers are
  677. * available. It may also help code generation around calls from C
  678. * code if the common case doesn't use many registers.
  679. *
  680. * When a callee is wrapped in a thunk, the caller can assume that all
  681. * arg regs and all scratch registers are preserved across the
  682. * call. The return value in rax/eax will not be saved, even for void
  683. * functions.
  684. */
  685. #define PV_CALLEE_SAVE_REGS_THUNK(func) \
  686. extern typeof(func) __raw_callee_save_##func; \
  687. static void *__##func##__ __used = func; \
  688. \
  689. asm(".pushsection .text;" \
  690. "__raw_callee_save_" #func ": " \
  691. PV_SAVE_ALL_CALLER_REGS \
  692. "call " #func ";" \
  693. PV_RESTORE_ALL_CALLER_REGS \
  694. "ret;" \
  695. ".popsection")
  696. /* Get a reference to a callee-save function */
  697. #define PV_CALLEE_SAVE(func) \
  698. ((struct paravirt_callee_save) { __raw_callee_save_##func })
  699. /* Promise that "func" already uses the right calling convention */
  700. #define __PV_IS_CALLEE_SAVE(func) \
  701. ((struct paravirt_callee_save) { func })
  702. static inline unsigned long __raw_local_save_flags(void)
  703. {
  704. unsigned long f;
  705. asm volatile(paravirt_alt(PARAVIRT_CALL)
  706. : "=a"(f)
  707. : paravirt_type(pv_irq_ops.save_fl),
  708. paravirt_clobber(CLBR_EAX)
  709. : "memory", "cc");
  710. return f;
  711. }
  712. static inline void raw_local_irq_restore(unsigned long f)
  713. {
  714. asm volatile(paravirt_alt(PARAVIRT_CALL)
  715. : "=a"(f)
  716. : PV_FLAGS_ARG(f),
  717. paravirt_type(pv_irq_ops.restore_fl),
  718. paravirt_clobber(CLBR_EAX)
  719. : "memory", "cc");
  720. }
  721. static inline void raw_local_irq_disable(void)
  722. {
  723. asm volatile(paravirt_alt(PARAVIRT_CALL)
  724. :
  725. : paravirt_type(pv_irq_ops.irq_disable),
  726. paravirt_clobber(CLBR_EAX)
  727. : "memory", "eax", "cc");
  728. }
  729. static inline void raw_local_irq_enable(void)
  730. {
  731. asm volatile(paravirt_alt(PARAVIRT_CALL)
  732. :
  733. : paravirt_type(pv_irq_ops.irq_enable),
  734. paravirt_clobber(CLBR_EAX)
  735. : "memory", "eax", "cc");
  736. }
  737. static inline unsigned long __raw_local_irq_save(void)
  738. {
  739. unsigned long f;
  740. f = __raw_local_save_flags();
  741. raw_local_irq_disable();
  742. return f;
  743. }
  744. /* Make sure as little as possible of this mess escapes. */
  745. #undef PARAVIRT_CALL
  746. #undef __PVOP_CALL
  747. #undef __PVOP_VCALL
  748. #undef PVOP_VCALL0
  749. #undef PVOP_CALL0
  750. #undef PVOP_VCALL1
  751. #undef PVOP_CALL1
  752. #undef PVOP_VCALL2
  753. #undef PVOP_CALL2
  754. #undef PVOP_VCALL3
  755. #undef PVOP_CALL3
  756. #undef PVOP_VCALL4
  757. #undef PVOP_CALL4
  758. extern void default_banner(void);
  759. #else /* __ASSEMBLY__ */
  760. #define _PVSITE(ptype, clobbers, ops, word, algn) \
  761. 771:; \
  762. ops; \
  763. 772:; \
  764. .pushsection .parainstructions,"a"; \
  765. .align algn; \
  766. word 771b; \
  767. .byte ptype; \
  768. .byte 772b-771b; \
  769. .short clobbers; \
  770. .popsection
  771. #define COND_PUSH(set, mask, reg) \
  772. .if ((~(set)) & mask); push %reg; .endif
  773. #define COND_POP(set, mask, reg) \
  774. .if ((~(set)) & mask); pop %reg; .endif
  775. #ifdef CONFIG_X86_64
  776. #define PV_SAVE_REGS(set) \
  777. COND_PUSH(set, CLBR_RAX, rax); \
  778. COND_PUSH(set, CLBR_RCX, rcx); \
  779. COND_PUSH(set, CLBR_RDX, rdx); \
  780. COND_PUSH(set, CLBR_RSI, rsi); \
  781. COND_PUSH(set, CLBR_RDI, rdi); \
  782. COND_PUSH(set, CLBR_R8, r8); \
  783. COND_PUSH(set, CLBR_R9, r9); \
  784. COND_PUSH(set, CLBR_R10, r10); \
  785. COND_PUSH(set, CLBR_R11, r11)
  786. #define PV_RESTORE_REGS(set) \
  787. COND_POP(set, CLBR_R11, r11); \
  788. COND_POP(set, CLBR_R10, r10); \
  789. COND_POP(set, CLBR_R9, r9); \
  790. COND_POP(set, CLBR_R8, r8); \
  791. COND_POP(set, CLBR_RDI, rdi); \
  792. COND_POP(set, CLBR_RSI, rsi); \
  793. COND_POP(set, CLBR_RDX, rdx); \
  794. COND_POP(set, CLBR_RCX, rcx); \
  795. COND_POP(set, CLBR_RAX, rax)
  796. #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 8)
  797. #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8)
  798. #define PARA_INDIRECT(addr) *addr(%rip)
  799. #else
  800. #define PV_SAVE_REGS(set) \
  801. COND_PUSH(set, CLBR_EAX, eax); \
  802. COND_PUSH(set, CLBR_EDI, edi); \
  803. COND_PUSH(set, CLBR_ECX, ecx); \
  804. COND_PUSH(set, CLBR_EDX, edx)
  805. #define PV_RESTORE_REGS(set) \
  806. COND_POP(set, CLBR_EDX, edx); \
  807. COND_POP(set, CLBR_ECX, ecx); \
  808. COND_POP(set, CLBR_EDI, edi); \
  809. COND_POP(set, CLBR_EAX, eax)
  810. #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
  811. #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
  812. #define PARA_INDIRECT(addr) *%cs:addr
  813. #endif
  814. #define INTERRUPT_RETURN \
  815. PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE, \
  816. jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret))
  817. #define DISABLE_INTERRUPTS(clobbers) \
  818. PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
  819. PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
  820. call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable); \
  821. PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
  822. #define ENABLE_INTERRUPTS(clobbers) \
  823. PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers, \
  824. PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
  825. call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable); \
  826. PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
  827. #define USERGS_SYSRET32 \
  828. PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret32), \
  829. CLBR_NONE, \
  830. jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret32))
  831. #ifdef CONFIG_X86_32
  832. #define GET_CR0_INTO_EAX \
  833. push %ecx; push %edx; \
  834. call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
  835. pop %edx; pop %ecx
  836. #define ENABLE_INTERRUPTS_SYSEXIT \
  837. PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
  838. CLBR_NONE, \
  839. jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
  840. #else /* !CONFIG_X86_32 */
  841. /*
  842. * If swapgs is used while the userspace stack is still current,
  843. * there's no way to call a pvop. The PV replacement *must* be
  844. * inlined, or the swapgs instruction must be trapped and emulated.
  845. */
  846. #define SWAPGS_UNSAFE_STACK \
  847. PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
  848. swapgs)
  849. /*
  850. * Note: swapgs is very special, and in practise is either going to be
  851. * implemented with a single "swapgs" instruction or something very
  852. * special. Either way, we don't need to save any registers for
  853. * it.
  854. */
  855. #define SWAPGS \
  856. PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
  857. call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs) \
  858. )
  859. #define GET_CR2_INTO_RCX \
  860. call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2); \
  861. movq %rax, %rcx; \
  862. xorq %rax, %rax;
  863. #define PARAVIRT_ADJUST_EXCEPTION_FRAME \
  864. PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_adjust_exception_frame), \
  865. CLBR_NONE, \
  866. call PARA_INDIRECT(pv_irq_ops+PV_IRQ_adjust_exception_frame))
  867. #define USERGS_SYSRET64 \
  868. PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64), \
  869. CLBR_NONE, \
  870. jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64))
  871. #define ENABLE_INTERRUPTS_SYSEXIT32 \
  872. PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
  873. CLBR_NONE, \
  874. jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
  875. #endif /* CONFIG_X86_32 */
  876. #endif /* __ASSEMBLY__ */
  877. #else /* CONFIG_PARAVIRT */
  878. # define default_banner x86_init_noop
  879. #endif /* !CONFIG_PARAVIRT */
  880. #endif /* _ASM_X86_PARAVIRT_H */