paravirt.h 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050
  1. #ifndef _ASM_X86_PARAVIRT_H
  2. #define _ASM_X86_PARAVIRT_H
  3. /* Various instructions on x86 need to be replaced for
  4. * para-virtualization: those hooks are defined here. */
  5. #ifdef CONFIG_PARAVIRT
  6. #include <asm/pgtable_types.h>
  7. #include <asm/asm.h>
  8. #include <asm/paravirt_types.h>
  9. #ifndef __ASSEMBLY__
  10. #include <linux/types.h>
  11. #include <linux/cpumask.h>
  12. static inline int paravirt_enabled(void)
  13. {
  14. return pv_info.paravirt_enabled;
  15. }
  16. static inline void load_sp0(struct tss_struct *tss,
  17. struct thread_struct *thread)
  18. {
  19. PVOP_VCALL2(pv_cpu_ops.load_sp0, tss, thread);
  20. }
  21. /* The paravirtualized CPUID instruction. */
  22. static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
  23. unsigned int *ecx, unsigned int *edx)
  24. {
  25. PVOP_VCALL4(pv_cpu_ops.cpuid, eax, ebx, ecx, edx);
  26. }
  27. /*
  28. * These special macros can be used to get or set a debugging register
  29. */
  30. static inline unsigned long paravirt_get_debugreg(int reg)
  31. {
  32. return PVOP_CALL1(unsigned long, pv_cpu_ops.get_debugreg, reg);
  33. }
  34. #define get_debugreg(var, reg) var = paravirt_get_debugreg(reg)
  35. static inline void set_debugreg(unsigned long val, int reg)
  36. {
  37. PVOP_VCALL2(pv_cpu_ops.set_debugreg, reg, val);
  38. }
  39. static inline void clts(void)
  40. {
  41. PVOP_VCALL0(pv_cpu_ops.clts);
  42. }
  43. static inline unsigned long read_cr0(void)
  44. {
  45. return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr0);
  46. }
  47. static inline void write_cr0(unsigned long x)
  48. {
  49. PVOP_VCALL1(pv_cpu_ops.write_cr0, x);
  50. }
  51. static inline unsigned long read_cr2(void)
  52. {
  53. return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr2);
  54. }
  55. static inline void write_cr2(unsigned long x)
  56. {
  57. PVOP_VCALL1(pv_mmu_ops.write_cr2, x);
  58. }
  59. static inline unsigned long read_cr3(void)
  60. {
  61. return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr3);
  62. }
  63. static inline void write_cr3(unsigned long x)
  64. {
  65. PVOP_VCALL1(pv_mmu_ops.write_cr3, x);
  66. }
  67. static inline unsigned long read_cr4(void)
  68. {
  69. return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4);
  70. }
  71. static inline unsigned long read_cr4_safe(void)
  72. {
  73. return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4_safe);
  74. }
  75. static inline void write_cr4(unsigned long x)
  76. {
  77. PVOP_VCALL1(pv_cpu_ops.write_cr4, x);
  78. }
  79. #ifdef CONFIG_X86_64
  80. static inline unsigned long read_cr8(void)
  81. {
  82. return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr8);
  83. }
  84. static inline void write_cr8(unsigned long x)
  85. {
  86. PVOP_VCALL1(pv_cpu_ops.write_cr8, x);
  87. }
  88. #endif
  89. static inline void arch_safe_halt(void)
  90. {
  91. PVOP_VCALL0(pv_irq_ops.safe_halt);
  92. }
  93. static inline void halt(void)
  94. {
  95. PVOP_VCALL0(pv_irq_ops.halt);
  96. }
  97. static inline void wbinvd(void)
  98. {
  99. PVOP_VCALL0(pv_cpu_ops.wbinvd);
  100. }
  101. #define get_kernel_rpl() (pv_info.kernel_rpl)
  102. static inline u64 paravirt_read_msr(unsigned msr, int *err)
  103. {
  104. return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err);
  105. }
  106. static inline int paravirt_rdmsr_regs(u32 *regs)
  107. {
  108. return PVOP_CALL1(int, pv_cpu_ops.rdmsr_regs, regs);
  109. }
  110. static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high)
  111. {
  112. return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high);
  113. }
  114. static inline int paravirt_wrmsr_regs(u32 *regs)
  115. {
  116. return PVOP_CALL1(int, pv_cpu_ops.wrmsr_regs, regs);
  117. }
  118. /* These should all do BUG_ON(_err), but our headers are too tangled. */
  119. #define rdmsr(msr, val1, val2) \
  120. do { \
  121. int _err; \
  122. u64 _l = paravirt_read_msr(msr, &_err); \
  123. val1 = (u32)_l; \
  124. val2 = _l >> 32; \
  125. } while (0)
  126. #define wrmsr(msr, val1, val2) \
  127. do { \
  128. paravirt_write_msr(msr, val1, val2); \
  129. } while (0)
  130. #define rdmsrl(msr, val) \
  131. do { \
  132. int _err; \
  133. val = paravirt_read_msr(msr, &_err); \
  134. } while (0)
  135. #define wrmsrl(msr, val) wrmsr(msr, (u32)((u64)(val)), ((u64)(val))>>32)
  136. #define wrmsr_safe(msr, a, b) paravirt_write_msr(msr, a, b)
  137. /* rdmsr with exception handling */
  138. #define rdmsr_safe(msr, a, b) \
  139. ({ \
  140. int _err; \
  141. u64 _l = paravirt_read_msr(msr, &_err); \
  142. (*a) = (u32)_l; \
  143. (*b) = _l >> 32; \
  144. _err; \
  145. })
  146. #define rdmsr_safe_regs(regs) paravirt_rdmsr_regs(regs)
  147. #define wrmsr_safe_regs(regs) paravirt_wrmsr_regs(regs)
  148. static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
  149. {
  150. int err;
  151. *p = paravirt_read_msr(msr, &err);
  152. return err;
  153. }
  154. static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
  155. {
  156. u32 gprs[8] = { 0 };
  157. int err;
  158. gprs[1] = msr;
  159. gprs[7] = 0x9c5a203a;
  160. err = paravirt_rdmsr_regs(gprs);
  161. *p = gprs[0] | ((u64)gprs[2] << 32);
  162. return err;
  163. }
  164. static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
  165. {
  166. u32 gprs[8] = { 0 };
  167. gprs[0] = (u32)val;
  168. gprs[1] = msr;
  169. gprs[2] = val >> 32;
  170. gprs[7] = 0x9c5a203a;
  171. return paravirt_wrmsr_regs(gprs);
  172. }
  173. static inline u64 paravirt_read_tsc(void)
  174. {
  175. return PVOP_CALL0(u64, pv_cpu_ops.read_tsc);
  176. }
  177. #define rdtscl(low) \
  178. do { \
  179. u64 _l = paravirt_read_tsc(); \
  180. low = (int)_l; \
  181. } while (0)
  182. #define rdtscll(val) (val = paravirt_read_tsc())
  183. static inline unsigned long long paravirt_sched_clock(void)
  184. {
  185. return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock);
  186. }
  187. struct jump_label_key;
  188. extern struct jump_label_key paravirt_steal_enabled;
  189. extern struct jump_label_key paravirt_steal_rq_enabled;
  190. static inline u64 paravirt_steal_clock(int cpu)
  191. {
  192. return PVOP_CALL1(u64, pv_time_ops.steal_clock, cpu);
  193. }
  194. static inline unsigned long long paravirt_read_pmc(int counter)
  195. {
  196. return PVOP_CALL1(u64, pv_cpu_ops.read_pmc, counter);
  197. }
  198. #define rdpmc(counter, low, high) \
  199. do { \
  200. u64 _l = paravirt_read_pmc(counter); \
  201. low = (u32)_l; \
  202. high = _l >> 32; \
  203. } while (0)
  204. static inline unsigned long long paravirt_rdtscp(unsigned int *aux)
  205. {
  206. return PVOP_CALL1(u64, pv_cpu_ops.read_tscp, aux);
  207. }
  208. #define rdtscp(low, high, aux) \
  209. do { \
  210. int __aux; \
  211. unsigned long __val = paravirt_rdtscp(&__aux); \
  212. (low) = (u32)__val; \
  213. (high) = (u32)(__val >> 32); \
  214. (aux) = __aux; \
  215. } while (0)
  216. #define rdtscpll(val, aux) \
  217. do { \
  218. unsigned long __aux; \
  219. val = paravirt_rdtscp(&__aux); \
  220. (aux) = __aux; \
  221. } while (0)
  222. static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
  223. {
  224. PVOP_VCALL2(pv_cpu_ops.alloc_ldt, ldt, entries);
  225. }
  226. static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
  227. {
  228. PVOP_VCALL2(pv_cpu_ops.free_ldt, ldt, entries);
  229. }
  230. static inline void load_TR_desc(void)
  231. {
  232. PVOP_VCALL0(pv_cpu_ops.load_tr_desc);
  233. }
  234. static inline void load_gdt(const struct desc_ptr *dtr)
  235. {
  236. PVOP_VCALL1(pv_cpu_ops.load_gdt, dtr);
  237. }
  238. static inline void load_idt(const struct desc_ptr *dtr)
  239. {
  240. PVOP_VCALL1(pv_cpu_ops.load_idt, dtr);
  241. }
  242. static inline void set_ldt(const void *addr, unsigned entries)
  243. {
  244. PVOP_VCALL2(pv_cpu_ops.set_ldt, addr, entries);
  245. }
  246. static inline void store_gdt(struct desc_ptr *dtr)
  247. {
  248. PVOP_VCALL1(pv_cpu_ops.store_gdt, dtr);
  249. }
  250. static inline void store_idt(struct desc_ptr *dtr)
  251. {
  252. PVOP_VCALL1(pv_cpu_ops.store_idt, dtr);
  253. }
  254. static inline unsigned long paravirt_store_tr(void)
  255. {
  256. return PVOP_CALL0(unsigned long, pv_cpu_ops.store_tr);
  257. }
  258. #define store_tr(tr) ((tr) = paravirt_store_tr())
  259. static inline void load_TLS(struct thread_struct *t, unsigned cpu)
  260. {
  261. PVOP_VCALL2(pv_cpu_ops.load_tls, t, cpu);
  262. }
  263. #ifdef CONFIG_X86_64
  264. static inline void load_gs_index(unsigned int gs)
  265. {
  266. PVOP_VCALL1(pv_cpu_ops.load_gs_index, gs);
  267. }
  268. #endif
  269. static inline void write_ldt_entry(struct desc_struct *dt, int entry,
  270. const void *desc)
  271. {
  272. PVOP_VCALL3(pv_cpu_ops.write_ldt_entry, dt, entry, desc);
  273. }
  274. static inline void write_gdt_entry(struct desc_struct *dt, int entry,
  275. void *desc, int type)
  276. {
  277. PVOP_VCALL4(pv_cpu_ops.write_gdt_entry, dt, entry, desc, type);
  278. }
  279. static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
  280. {
  281. PVOP_VCALL3(pv_cpu_ops.write_idt_entry, dt, entry, g);
  282. }
  283. static inline void set_iopl_mask(unsigned mask)
  284. {
  285. PVOP_VCALL1(pv_cpu_ops.set_iopl_mask, mask);
  286. }
  287. /* The paravirtualized I/O functions */
  288. static inline void slow_down_io(void)
  289. {
  290. pv_cpu_ops.io_delay();
  291. #ifdef REALLY_SLOW_IO
  292. pv_cpu_ops.io_delay();
  293. pv_cpu_ops.io_delay();
  294. pv_cpu_ops.io_delay();
  295. #endif
  296. }
  297. #ifdef CONFIG_SMP
  298. static inline void startup_ipi_hook(int phys_apicid, unsigned long start_eip,
  299. unsigned long start_esp)
  300. {
  301. PVOP_VCALL3(pv_apic_ops.startup_ipi_hook,
  302. phys_apicid, start_eip, start_esp);
  303. }
  304. #endif
  305. static inline void paravirt_activate_mm(struct mm_struct *prev,
  306. struct mm_struct *next)
  307. {
  308. PVOP_VCALL2(pv_mmu_ops.activate_mm, prev, next);
  309. }
  310. static inline void arch_dup_mmap(struct mm_struct *oldmm,
  311. struct mm_struct *mm)
  312. {
  313. PVOP_VCALL2(pv_mmu_ops.dup_mmap, oldmm, mm);
  314. }
  315. static inline void arch_exit_mmap(struct mm_struct *mm)
  316. {
  317. PVOP_VCALL1(pv_mmu_ops.exit_mmap, mm);
  318. }
  319. static inline void __flush_tlb(void)
  320. {
  321. PVOP_VCALL0(pv_mmu_ops.flush_tlb_user);
  322. }
  323. static inline void __flush_tlb_global(void)
  324. {
  325. PVOP_VCALL0(pv_mmu_ops.flush_tlb_kernel);
  326. }
  327. static inline void __flush_tlb_single(unsigned long addr)
  328. {
  329. PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr);
  330. }
  331. static inline void flush_tlb_others(const struct cpumask *cpumask,
  332. struct mm_struct *mm,
  333. unsigned long va)
  334. {
  335. PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, cpumask, mm, va);
  336. }
  337. static inline int paravirt_pgd_alloc(struct mm_struct *mm)
  338. {
  339. return PVOP_CALL1(int, pv_mmu_ops.pgd_alloc, mm);
  340. }
  341. static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd)
  342. {
  343. PVOP_VCALL2(pv_mmu_ops.pgd_free, mm, pgd);
  344. }
  345. static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn)
  346. {
  347. PVOP_VCALL2(pv_mmu_ops.alloc_pte, mm, pfn);
  348. }
  349. static inline void paravirt_release_pte(unsigned long pfn)
  350. {
  351. PVOP_VCALL1(pv_mmu_ops.release_pte, pfn);
  352. }
  353. static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
  354. {
  355. PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn);
  356. }
  357. static inline void paravirt_release_pmd(unsigned long pfn)
  358. {
  359. PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn);
  360. }
  361. static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn)
  362. {
  363. PVOP_VCALL2(pv_mmu_ops.alloc_pud, mm, pfn);
  364. }
  365. static inline void paravirt_release_pud(unsigned long pfn)
  366. {
  367. PVOP_VCALL1(pv_mmu_ops.release_pud, pfn);
  368. }
  369. static inline void pte_update(struct mm_struct *mm, unsigned long addr,
  370. pte_t *ptep)
  371. {
  372. PVOP_VCALL3(pv_mmu_ops.pte_update, mm, addr, ptep);
  373. }
  374. static inline void pmd_update(struct mm_struct *mm, unsigned long addr,
  375. pmd_t *pmdp)
  376. {
  377. PVOP_VCALL3(pv_mmu_ops.pmd_update, mm, addr, pmdp);
  378. }
  379. static inline void pte_update_defer(struct mm_struct *mm, unsigned long addr,
  380. pte_t *ptep)
  381. {
  382. PVOP_VCALL3(pv_mmu_ops.pte_update_defer, mm, addr, ptep);
  383. }
  384. static inline void pmd_update_defer(struct mm_struct *mm, unsigned long addr,
  385. pmd_t *pmdp)
  386. {
  387. PVOP_VCALL3(pv_mmu_ops.pmd_update_defer, mm, addr, pmdp);
  388. }
  389. static inline pte_t __pte(pteval_t val)
  390. {
  391. pteval_t ret;
  392. if (sizeof(pteval_t) > sizeof(long))
  393. ret = PVOP_CALLEE2(pteval_t,
  394. pv_mmu_ops.make_pte,
  395. val, (u64)val >> 32);
  396. else
  397. ret = PVOP_CALLEE1(pteval_t,
  398. pv_mmu_ops.make_pte,
  399. val);
  400. return (pte_t) { .pte = ret };
  401. }
  402. static inline pteval_t pte_val(pte_t pte)
  403. {
  404. pteval_t ret;
  405. if (sizeof(pteval_t) > sizeof(long))
  406. ret = PVOP_CALLEE2(pteval_t, pv_mmu_ops.pte_val,
  407. pte.pte, (u64)pte.pte >> 32);
  408. else
  409. ret = PVOP_CALLEE1(pteval_t, pv_mmu_ops.pte_val,
  410. pte.pte);
  411. return ret;
  412. }
  413. static inline pgd_t __pgd(pgdval_t val)
  414. {
  415. pgdval_t ret;
  416. if (sizeof(pgdval_t) > sizeof(long))
  417. ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.make_pgd,
  418. val, (u64)val >> 32);
  419. else
  420. ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.make_pgd,
  421. val);
  422. return (pgd_t) { ret };
  423. }
  424. static inline pgdval_t pgd_val(pgd_t pgd)
  425. {
  426. pgdval_t ret;
  427. if (sizeof(pgdval_t) > sizeof(long))
  428. ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.pgd_val,
  429. pgd.pgd, (u64)pgd.pgd >> 32);
  430. else
  431. ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.pgd_val,
  432. pgd.pgd);
  433. return ret;
  434. }
  435. #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
  436. static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr,
  437. pte_t *ptep)
  438. {
  439. pteval_t ret;
  440. ret = PVOP_CALL3(pteval_t, pv_mmu_ops.ptep_modify_prot_start,
  441. mm, addr, ptep);
  442. return (pte_t) { .pte = ret };
  443. }
  444. static inline void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
  445. pte_t *ptep, pte_t pte)
  446. {
  447. if (sizeof(pteval_t) > sizeof(long))
  448. /* 5 arg words */
  449. pv_mmu_ops.ptep_modify_prot_commit(mm, addr, ptep, pte);
  450. else
  451. PVOP_VCALL4(pv_mmu_ops.ptep_modify_prot_commit,
  452. mm, addr, ptep, pte.pte);
  453. }
  454. static inline void set_pte(pte_t *ptep, pte_t pte)
  455. {
  456. if (sizeof(pteval_t) > sizeof(long))
  457. PVOP_VCALL3(pv_mmu_ops.set_pte, ptep,
  458. pte.pte, (u64)pte.pte >> 32);
  459. else
  460. PVOP_VCALL2(pv_mmu_ops.set_pte, ptep,
  461. pte.pte);
  462. }
  463. static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
  464. pte_t *ptep, pte_t pte)
  465. {
  466. if (sizeof(pteval_t) > sizeof(long))
  467. /* 5 arg words */
  468. pv_mmu_ops.set_pte_at(mm, addr, ptep, pte);
  469. else
  470. PVOP_VCALL4(pv_mmu_ops.set_pte_at, mm, addr, ptep, pte.pte);
  471. }
  472. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  473. static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
  474. pmd_t *pmdp, pmd_t pmd)
  475. {
  476. if (sizeof(pmdval_t) > sizeof(long))
  477. /* 5 arg words */
  478. pv_mmu_ops.set_pmd_at(mm, addr, pmdp, pmd);
  479. else
  480. PVOP_VCALL4(pv_mmu_ops.set_pmd_at, mm, addr, pmdp,
  481. native_pmd_val(pmd));
  482. }
  483. #endif
  484. static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
  485. {
  486. pmdval_t val = native_pmd_val(pmd);
  487. if (sizeof(pmdval_t) > sizeof(long))
  488. PVOP_VCALL3(pv_mmu_ops.set_pmd, pmdp, val, (u64)val >> 32);
  489. else
  490. PVOP_VCALL2(pv_mmu_ops.set_pmd, pmdp, val);
  491. }
  492. #if PAGETABLE_LEVELS >= 3
  493. static inline pmd_t __pmd(pmdval_t val)
  494. {
  495. pmdval_t ret;
  496. if (sizeof(pmdval_t) > sizeof(long))
  497. ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.make_pmd,
  498. val, (u64)val >> 32);
  499. else
  500. ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.make_pmd,
  501. val);
  502. return (pmd_t) { ret };
  503. }
  504. static inline pmdval_t pmd_val(pmd_t pmd)
  505. {
  506. pmdval_t ret;
  507. if (sizeof(pmdval_t) > sizeof(long))
  508. ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.pmd_val,
  509. pmd.pmd, (u64)pmd.pmd >> 32);
  510. else
  511. ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.pmd_val,
  512. pmd.pmd);
  513. return ret;
  514. }
  515. static inline void set_pud(pud_t *pudp, pud_t pud)
  516. {
  517. pudval_t val = native_pud_val(pud);
  518. if (sizeof(pudval_t) > sizeof(long))
  519. PVOP_VCALL3(pv_mmu_ops.set_pud, pudp,
  520. val, (u64)val >> 32);
  521. else
  522. PVOP_VCALL2(pv_mmu_ops.set_pud, pudp,
  523. val);
  524. }
  525. #if PAGETABLE_LEVELS == 4
  526. static inline pud_t __pud(pudval_t val)
  527. {
  528. pudval_t ret;
  529. if (sizeof(pudval_t) > sizeof(long))
  530. ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.make_pud,
  531. val, (u64)val >> 32);
  532. else
  533. ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.make_pud,
  534. val);
  535. return (pud_t) { ret };
  536. }
  537. static inline pudval_t pud_val(pud_t pud)
  538. {
  539. pudval_t ret;
  540. if (sizeof(pudval_t) > sizeof(long))
  541. ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.pud_val,
  542. pud.pud, (u64)pud.pud >> 32);
  543. else
  544. ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.pud_val,
  545. pud.pud);
  546. return ret;
  547. }
  548. static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
  549. {
  550. pgdval_t val = native_pgd_val(pgd);
  551. if (sizeof(pgdval_t) > sizeof(long))
  552. PVOP_VCALL3(pv_mmu_ops.set_pgd, pgdp,
  553. val, (u64)val >> 32);
  554. else
  555. PVOP_VCALL2(pv_mmu_ops.set_pgd, pgdp,
  556. val);
  557. }
  558. static inline void pgd_clear(pgd_t *pgdp)
  559. {
  560. set_pgd(pgdp, __pgd(0));
  561. }
  562. static inline void pud_clear(pud_t *pudp)
  563. {
  564. set_pud(pudp, __pud(0));
  565. }
  566. #endif /* PAGETABLE_LEVELS == 4 */
  567. #endif /* PAGETABLE_LEVELS >= 3 */
  568. #ifdef CONFIG_X86_PAE
  569. /* Special-case pte-setting operations for PAE, which can't update a
  570. 64-bit pte atomically */
  571. static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
  572. {
  573. PVOP_VCALL3(pv_mmu_ops.set_pte_atomic, ptep,
  574. pte.pte, pte.pte >> 32);
  575. }
  576. static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
  577. pte_t *ptep)
  578. {
  579. PVOP_VCALL3(pv_mmu_ops.pte_clear, mm, addr, ptep);
  580. }
  581. static inline void pmd_clear(pmd_t *pmdp)
  582. {
  583. PVOP_VCALL1(pv_mmu_ops.pmd_clear, pmdp);
  584. }
  585. #else /* !CONFIG_X86_PAE */
  586. static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
  587. {
  588. set_pte(ptep, pte);
  589. }
  590. static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
  591. pte_t *ptep)
  592. {
  593. set_pte_at(mm, addr, ptep, __pte(0));
  594. }
  595. static inline void pmd_clear(pmd_t *pmdp)
  596. {
  597. set_pmd(pmdp, __pmd(0));
  598. }
  599. #endif /* CONFIG_X86_PAE */
  600. #define __HAVE_ARCH_START_CONTEXT_SWITCH
  601. static inline void arch_start_context_switch(struct task_struct *prev)
  602. {
  603. PVOP_VCALL1(pv_cpu_ops.start_context_switch, prev);
  604. }
  605. static inline void arch_end_context_switch(struct task_struct *next)
  606. {
  607. PVOP_VCALL1(pv_cpu_ops.end_context_switch, next);
  608. }
  609. #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
  610. static inline void arch_enter_lazy_mmu_mode(void)
  611. {
  612. PVOP_VCALL0(pv_mmu_ops.lazy_mode.enter);
  613. }
  614. static inline void arch_leave_lazy_mmu_mode(void)
  615. {
  616. PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
  617. }
  618. void arch_flush_lazy_mmu_mode(void);
  619. static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
  620. phys_addr_t phys, pgprot_t flags)
  621. {
  622. pv_mmu_ops.set_fixmap(idx, phys, flags);
  623. }
  624. #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
  625. static inline int arch_spin_is_locked(struct arch_spinlock *lock)
  626. {
  627. return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock);
  628. }
  629. static inline int arch_spin_is_contended(struct arch_spinlock *lock)
  630. {
  631. return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock);
  632. }
  633. #define arch_spin_is_contended arch_spin_is_contended
  634. static __always_inline void arch_spin_lock(struct arch_spinlock *lock)
  635. {
  636. PVOP_VCALL1(pv_lock_ops.spin_lock, lock);
  637. }
  638. static __always_inline void arch_spin_lock_flags(struct arch_spinlock *lock,
  639. unsigned long flags)
  640. {
  641. PVOP_VCALL2(pv_lock_ops.spin_lock_flags, lock, flags);
  642. }
  643. static __always_inline int arch_spin_trylock(struct arch_spinlock *lock)
  644. {
  645. return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock);
  646. }
  647. static __always_inline void arch_spin_unlock(struct arch_spinlock *lock)
  648. {
  649. PVOP_VCALL1(pv_lock_ops.spin_unlock, lock);
  650. }
  651. #endif
  652. #ifdef CONFIG_X86_32
  653. #define PV_SAVE_REGS "pushl %ecx; pushl %edx;"
  654. #define PV_RESTORE_REGS "popl %edx; popl %ecx;"
  655. /* save and restore all caller-save registers, except return value */
  656. #define PV_SAVE_ALL_CALLER_REGS "pushl %ecx;"
  657. #define PV_RESTORE_ALL_CALLER_REGS "popl %ecx;"
  658. #define PV_FLAGS_ARG "0"
  659. #define PV_EXTRA_CLOBBERS
  660. #define PV_VEXTRA_CLOBBERS
  661. #else
  662. /* save and restore all caller-save registers, except return value */
  663. #define PV_SAVE_ALL_CALLER_REGS \
  664. "push %rcx;" \
  665. "push %rdx;" \
  666. "push %rsi;" \
  667. "push %rdi;" \
  668. "push %r8;" \
  669. "push %r9;" \
  670. "push %r10;" \
  671. "push %r11;"
  672. #define PV_RESTORE_ALL_CALLER_REGS \
  673. "pop %r11;" \
  674. "pop %r10;" \
  675. "pop %r9;" \
  676. "pop %r8;" \
  677. "pop %rdi;" \
  678. "pop %rsi;" \
  679. "pop %rdx;" \
  680. "pop %rcx;"
  681. /* We save some registers, but all of them, that's too much. We clobber all
  682. * caller saved registers but the argument parameter */
  683. #define PV_SAVE_REGS "pushq %%rdi;"
  684. #define PV_RESTORE_REGS "popq %%rdi;"
  685. #define PV_EXTRA_CLOBBERS EXTRA_CLOBBERS, "rcx" , "rdx", "rsi"
  686. #define PV_VEXTRA_CLOBBERS EXTRA_CLOBBERS, "rdi", "rcx" , "rdx", "rsi"
  687. #define PV_FLAGS_ARG "D"
  688. #endif
  689. /*
  690. * Generate a thunk around a function which saves all caller-save
  691. * registers except for the return value. This allows C functions to
  692. * be called from assembler code where fewer than normal registers are
  693. * available. It may also help code generation around calls from C
  694. * code if the common case doesn't use many registers.
  695. *
  696. * When a callee is wrapped in a thunk, the caller can assume that all
  697. * arg regs and all scratch registers are preserved across the
  698. * call. The return value in rax/eax will not be saved, even for void
  699. * functions.
  700. */
  701. #define PV_CALLEE_SAVE_REGS_THUNK(func) \
  702. extern typeof(func) __raw_callee_save_##func; \
  703. static void *__##func##__ __used = func; \
  704. \
  705. asm(".pushsection .text;" \
  706. "__raw_callee_save_" #func ": " \
  707. PV_SAVE_ALL_CALLER_REGS \
  708. "call " #func ";" \
  709. PV_RESTORE_ALL_CALLER_REGS \
  710. "ret;" \
  711. ".popsection")
  712. /* Get a reference to a callee-save function */
  713. #define PV_CALLEE_SAVE(func) \
  714. ((struct paravirt_callee_save) { __raw_callee_save_##func })
  715. /* Promise that "func" already uses the right calling convention */
  716. #define __PV_IS_CALLEE_SAVE(func) \
  717. ((struct paravirt_callee_save) { func })
  718. static inline notrace unsigned long arch_local_save_flags(void)
  719. {
  720. return PVOP_CALLEE0(unsigned long, pv_irq_ops.save_fl);
  721. }
  722. static inline notrace void arch_local_irq_restore(unsigned long f)
  723. {
  724. PVOP_VCALLEE1(pv_irq_ops.restore_fl, f);
  725. }
  726. static inline notrace void arch_local_irq_disable(void)
  727. {
  728. PVOP_VCALLEE0(pv_irq_ops.irq_disable);
  729. }
  730. static inline notrace void arch_local_irq_enable(void)
  731. {
  732. PVOP_VCALLEE0(pv_irq_ops.irq_enable);
  733. }
  734. static inline notrace unsigned long arch_local_irq_save(void)
  735. {
  736. unsigned long f;
  737. f = arch_local_save_flags();
  738. arch_local_irq_disable();
  739. return f;
  740. }
  741. /* Make sure as little as possible of this mess escapes. */
  742. #undef PARAVIRT_CALL
  743. #undef __PVOP_CALL
  744. #undef __PVOP_VCALL
  745. #undef PVOP_VCALL0
  746. #undef PVOP_CALL0
  747. #undef PVOP_VCALL1
  748. #undef PVOP_CALL1
  749. #undef PVOP_VCALL2
  750. #undef PVOP_CALL2
  751. #undef PVOP_VCALL3
  752. #undef PVOP_CALL3
  753. #undef PVOP_VCALL4
  754. #undef PVOP_CALL4
  755. extern void default_banner(void);
  756. #else /* __ASSEMBLY__ */
  757. #define _PVSITE(ptype, clobbers, ops, word, algn) \
  758. 771:; \
  759. ops; \
  760. 772:; \
  761. .pushsection .parainstructions,"a"; \
  762. .align algn; \
  763. word 771b; \
  764. .byte ptype; \
  765. .byte 772b-771b; \
  766. .short clobbers; \
  767. .popsection
  768. #define COND_PUSH(set, mask, reg) \
  769. .if ((~(set)) & mask); push %reg; .endif
  770. #define COND_POP(set, mask, reg) \
  771. .if ((~(set)) & mask); pop %reg; .endif
  772. #ifdef CONFIG_X86_64
  773. #define PV_SAVE_REGS(set) \
  774. COND_PUSH(set, CLBR_RAX, rax); \
  775. COND_PUSH(set, CLBR_RCX, rcx); \
  776. COND_PUSH(set, CLBR_RDX, rdx); \
  777. COND_PUSH(set, CLBR_RSI, rsi); \
  778. COND_PUSH(set, CLBR_RDI, rdi); \
  779. COND_PUSH(set, CLBR_R8, r8); \
  780. COND_PUSH(set, CLBR_R9, r9); \
  781. COND_PUSH(set, CLBR_R10, r10); \
  782. COND_PUSH(set, CLBR_R11, r11)
  783. #define PV_RESTORE_REGS(set) \
  784. COND_POP(set, CLBR_R11, r11); \
  785. COND_POP(set, CLBR_R10, r10); \
  786. COND_POP(set, CLBR_R9, r9); \
  787. COND_POP(set, CLBR_R8, r8); \
  788. COND_POP(set, CLBR_RDI, rdi); \
  789. COND_POP(set, CLBR_RSI, rsi); \
  790. COND_POP(set, CLBR_RDX, rdx); \
  791. COND_POP(set, CLBR_RCX, rcx); \
  792. COND_POP(set, CLBR_RAX, rax)
  793. #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 8)
  794. #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8)
  795. #define PARA_INDIRECT(addr) *addr(%rip)
  796. #else
  797. #define PV_SAVE_REGS(set) \
  798. COND_PUSH(set, CLBR_EAX, eax); \
  799. COND_PUSH(set, CLBR_EDI, edi); \
  800. COND_PUSH(set, CLBR_ECX, ecx); \
  801. COND_PUSH(set, CLBR_EDX, edx)
  802. #define PV_RESTORE_REGS(set) \
  803. COND_POP(set, CLBR_EDX, edx); \
  804. COND_POP(set, CLBR_ECX, ecx); \
  805. COND_POP(set, CLBR_EDI, edi); \
  806. COND_POP(set, CLBR_EAX, eax)
  807. #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
  808. #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
  809. #define PARA_INDIRECT(addr) *%cs:addr
  810. #endif
  811. #define INTERRUPT_RETURN \
  812. PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE, \
  813. jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret))
  814. #define DISABLE_INTERRUPTS(clobbers) \
  815. PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
  816. PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
  817. call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable); \
  818. PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
  819. #define ENABLE_INTERRUPTS(clobbers) \
  820. PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers, \
  821. PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
  822. call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable); \
  823. PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
  824. #define USERGS_SYSRET32 \
  825. PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret32), \
  826. CLBR_NONE, \
  827. jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret32))
  828. #ifdef CONFIG_X86_32
  829. #define GET_CR0_INTO_EAX \
  830. push %ecx; push %edx; \
  831. call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
  832. pop %edx; pop %ecx
  833. #define ENABLE_INTERRUPTS_SYSEXIT \
  834. PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
  835. CLBR_NONE, \
  836. jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
  837. #else /* !CONFIG_X86_32 */
  838. /*
  839. * If swapgs is used while the userspace stack is still current,
  840. * there's no way to call a pvop. The PV replacement *must* be
  841. * inlined, or the swapgs instruction must be trapped and emulated.
  842. */
  843. #define SWAPGS_UNSAFE_STACK \
  844. PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
  845. swapgs)
  846. /*
  847. * Note: swapgs is very special, and in practise is either going to be
  848. * implemented with a single "swapgs" instruction or something very
  849. * special. Either way, we don't need to save any registers for
  850. * it.
  851. */
  852. #define SWAPGS \
  853. PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
  854. call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs) \
  855. )
  856. #define GET_CR2_INTO_RCX \
  857. call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2); \
  858. movq %rax, %rcx; \
  859. xorq %rax, %rax;
  860. #define PARAVIRT_ADJUST_EXCEPTION_FRAME \
  861. PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_adjust_exception_frame), \
  862. CLBR_NONE, \
  863. call PARA_INDIRECT(pv_irq_ops+PV_IRQ_adjust_exception_frame))
  864. #define USERGS_SYSRET64 \
  865. PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64), \
  866. CLBR_NONE, \
  867. jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64))
  868. #define ENABLE_INTERRUPTS_SYSEXIT32 \
  869. PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
  870. CLBR_NONE, \
  871. jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
  872. #endif /* CONFIG_X86_32 */
  873. #endif /* __ASSEMBLY__ */
  874. #else /* CONFIG_PARAVIRT */
  875. # define default_banner x86_init_noop
  876. #endif /* !CONFIG_PARAVIRT */
  877. #endif /* _ASM_X86_PARAVIRT_H */