paravirt.h 25 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022
  1. #ifndef _ASM_X86_PARAVIRT_H
  2. #define _ASM_X86_PARAVIRT_H
  3. /* Various instructions on x86 need to be replaced for
  4. * para-virtualization: those hooks are defined here. */
  5. #ifdef CONFIG_PARAVIRT
  6. #include <asm/pgtable_types.h>
  7. #include <asm/asm.h>
  8. #include <asm/paravirt_types.h>
  9. #ifndef __ASSEMBLY__
  10. #include <linux/types.h>
  11. #include <linux/cpumask.h>
  12. static inline int paravirt_enabled(void)
  13. {
  14. return pv_info.paravirt_enabled;
  15. }
  16. static inline void load_sp0(struct tss_struct *tss,
  17. struct thread_struct *thread)
  18. {
  19. PVOP_VCALL2(pv_cpu_ops.load_sp0, tss, thread);
  20. }
  21. /* The paravirtualized CPUID instruction. */
  22. static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
  23. unsigned int *ecx, unsigned int *edx)
  24. {
  25. PVOP_VCALL4(pv_cpu_ops.cpuid, eax, ebx, ecx, edx);
  26. }
  27. /*
  28. * These special macros can be used to get or set a debugging register
  29. */
  30. static inline unsigned long paravirt_get_debugreg(int reg)
  31. {
  32. return PVOP_CALL1(unsigned long, pv_cpu_ops.get_debugreg, reg);
  33. }
  34. #define get_debugreg(var, reg) var = paravirt_get_debugreg(reg)
  35. static inline void set_debugreg(unsigned long val, int reg)
  36. {
  37. PVOP_VCALL2(pv_cpu_ops.set_debugreg, reg, val);
  38. }
  39. static inline void clts(void)
  40. {
  41. PVOP_VCALL0(pv_cpu_ops.clts);
  42. }
  43. static inline unsigned long read_cr0(void)
  44. {
  45. return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr0);
  46. }
  47. static inline void write_cr0(unsigned long x)
  48. {
  49. PVOP_VCALL1(pv_cpu_ops.write_cr0, x);
  50. }
  51. static inline unsigned long read_cr2(void)
  52. {
  53. return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr2);
  54. }
  55. static inline void write_cr2(unsigned long x)
  56. {
  57. PVOP_VCALL1(pv_mmu_ops.write_cr2, x);
  58. }
  59. static inline unsigned long read_cr3(void)
  60. {
  61. return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr3);
  62. }
  63. static inline void write_cr3(unsigned long x)
  64. {
  65. PVOP_VCALL1(pv_mmu_ops.write_cr3, x);
  66. }
  67. static inline unsigned long read_cr4(void)
  68. {
  69. return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4);
  70. }
  71. static inline unsigned long read_cr4_safe(void)
  72. {
  73. return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4_safe);
  74. }
  75. static inline void write_cr4(unsigned long x)
  76. {
  77. PVOP_VCALL1(pv_cpu_ops.write_cr4, x);
  78. }
  79. #ifdef CONFIG_X86_64
  80. static inline unsigned long read_cr8(void)
  81. {
  82. return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr8);
  83. }
  84. static inline void write_cr8(unsigned long x)
  85. {
  86. PVOP_VCALL1(pv_cpu_ops.write_cr8, x);
  87. }
  88. #endif
  89. static inline void raw_safe_halt(void)
  90. {
  91. PVOP_VCALL0(pv_irq_ops.safe_halt);
  92. }
  93. static inline void halt(void)
  94. {
  95. PVOP_VCALL0(pv_irq_ops.safe_halt);
  96. }
  97. static inline void wbinvd(void)
  98. {
  99. PVOP_VCALL0(pv_cpu_ops.wbinvd);
  100. }
  101. #define get_kernel_rpl() (pv_info.kernel_rpl)
  102. static inline u64 paravirt_read_msr(unsigned msr, int *err)
  103. {
  104. return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err);
  105. }
  106. static inline u64 paravirt_read_msr_amd(unsigned msr, int *err)
  107. {
  108. return PVOP_CALL2(u64, pv_cpu_ops.read_msr_amd, msr, err);
  109. }
  110. static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high)
  111. {
  112. return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high);
  113. }
  114. /* These should all do BUG_ON(_err), but our headers are too tangled. */
  115. #define rdmsr(msr, val1, val2) \
  116. do { \
  117. int _err; \
  118. u64 _l = paravirt_read_msr(msr, &_err); \
  119. val1 = (u32)_l; \
  120. val2 = _l >> 32; \
  121. } while (0)
  122. #define wrmsr(msr, val1, val2) \
  123. do { \
  124. paravirt_write_msr(msr, val1, val2); \
  125. } while (0)
  126. #define rdmsrl(msr, val) \
  127. do { \
  128. int _err; \
  129. val = paravirt_read_msr(msr, &_err); \
  130. } while (0)
  131. #define wrmsrl(msr, val) wrmsr(msr, (u32)((u64)(val)), ((u64)(val))>>32)
  132. #define wrmsr_safe(msr, a, b) paravirt_write_msr(msr, a, b)
  133. /* rdmsr with exception handling */
  134. #define rdmsr_safe(msr, a, b) \
  135. ({ \
  136. int _err; \
  137. u64 _l = paravirt_read_msr(msr, &_err); \
  138. (*a) = (u32)_l; \
  139. (*b) = _l >> 32; \
  140. _err; \
  141. })
  142. static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
  143. {
  144. int err;
  145. *p = paravirt_read_msr(msr, &err);
  146. return err;
  147. }
  148. static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
  149. {
  150. int err;
  151. *p = paravirt_read_msr_amd(msr, &err);
  152. return err;
  153. }
  154. static inline u64 paravirt_read_tsc(void)
  155. {
  156. return PVOP_CALL0(u64, pv_cpu_ops.read_tsc);
  157. }
  158. #define rdtscl(low) \
  159. do { \
  160. u64 _l = paravirt_read_tsc(); \
  161. low = (int)_l; \
  162. } while (0)
  163. #define rdtscll(val) (val = paravirt_read_tsc())
  164. static inline unsigned long long paravirt_sched_clock(void)
  165. {
  166. return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock);
  167. }
  168. static inline unsigned long long paravirt_read_pmc(int counter)
  169. {
  170. return PVOP_CALL1(u64, pv_cpu_ops.read_pmc, counter);
  171. }
  172. #define rdpmc(counter, low, high) \
  173. do { \
  174. u64 _l = paravirt_read_pmc(counter); \
  175. low = (u32)_l; \
  176. high = _l >> 32; \
  177. } while (0)
  178. static inline unsigned long long paravirt_rdtscp(unsigned int *aux)
  179. {
  180. return PVOP_CALL1(u64, pv_cpu_ops.read_tscp, aux);
  181. }
  182. #define rdtscp(low, high, aux) \
  183. do { \
  184. int __aux; \
  185. unsigned long __val = paravirt_rdtscp(&__aux); \
  186. (low) = (u32)__val; \
  187. (high) = (u32)(__val >> 32); \
  188. (aux) = __aux; \
  189. } while (0)
  190. #define rdtscpll(val, aux) \
  191. do { \
  192. unsigned long __aux; \
  193. val = paravirt_rdtscp(&__aux); \
  194. (aux) = __aux; \
  195. } while (0)
  196. static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
  197. {
  198. PVOP_VCALL2(pv_cpu_ops.alloc_ldt, ldt, entries);
  199. }
  200. static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
  201. {
  202. PVOP_VCALL2(pv_cpu_ops.free_ldt, ldt, entries);
  203. }
  204. static inline void load_TR_desc(void)
  205. {
  206. PVOP_VCALL0(pv_cpu_ops.load_tr_desc);
  207. }
  208. static inline void load_gdt(const struct desc_ptr *dtr)
  209. {
  210. PVOP_VCALL1(pv_cpu_ops.load_gdt, dtr);
  211. }
  212. static inline void load_idt(const struct desc_ptr *dtr)
  213. {
  214. PVOP_VCALL1(pv_cpu_ops.load_idt, dtr);
  215. }
  216. static inline void set_ldt(const void *addr, unsigned entries)
  217. {
  218. PVOP_VCALL2(pv_cpu_ops.set_ldt, addr, entries);
  219. }
  220. static inline void store_gdt(struct desc_ptr *dtr)
  221. {
  222. PVOP_VCALL1(pv_cpu_ops.store_gdt, dtr);
  223. }
  224. static inline void store_idt(struct desc_ptr *dtr)
  225. {
  226. PVOP_VCALL1(pv_cpu_ops.store_idt, dtr);
  227. }
  228. static inline unsigned long paravirt_store_tr(void)
  229. {
  230. return PVOP_CALL0(unsigned long, pv_cpu_ops.store_tr);
  231. }
  232. #define store_tr(tr) ((tr) = paravirt_store_tr())
  233. static inline void load_TLS(struct thread_struct *t, unsigned cpu)
  234. {
  235. PVOP_VCALL2(pv_cpu_ops.load_tls, t, cpu);
  236. }
  237. #ifdef CONFIG_X86_64
  238. static inline void load_gs_index(unsigned int gs)
  239. {
  240. PVOP_VCALL1(pv_cpu_ops.load_gs_index, gs);
  241. }
  242. #endif
  243. static inline void write_ldt_entry(struct desc_struct *dt, int entry,
  244. const void *desc)
  245. {
  246. PVOP_VCALL3(pv_cpu_ops.write_ldt_entry, dt, entry, desc);
  247. }
  248. static inline void write_gdt_entry(struct desc_struct *dt, int entry,
  249. void *desc, int type)
  250. {
  251. PVOP_VCALL4(pv_cpu_ops.write_gdt_entry, dt, entry, desc, type);
  252. }
  253. static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
  254. {
  255. PVOP_VCALL3(pv_cpu_ops.write_idt_entry, dt, entry, g);
  256. }
  257. static inline void set_iopl_mask(unsigned mask)
  258. {
  259. PVOP_VCALL1(pv_cpu_ops.set_iopl_mask, mask);
  260. }
  261. /* The paravirtualized I/O functions */
  262. static inline void slow_down_io(void)
  263. {
  264. pv_cpu_ops.io_delay();
  265. #ifdef REALLY_SLOW_IO
  266. pv_cpu_ops.io_delay();
  267. pv_cpu_ops.io_delay();
  268. pv_cpu_ops.io_delay();
  269. #endif
  270. }
  271. #ifdef CONFIG_SMP
  272. static inline void startup_ipi_hook(int phys_apicid, unsigned long start_eip,
  273. unsigned long start_esp)
  274. {
  275. PVOP_VCALL3(pv_apic_ops.startup_ipi_hook,
  276. phys_apicid, start_eip, start_esp);
  277. }
  278. #endif
  279. static inline void paravirt_activate_mm(struct mm_struct *prev,
  280. struct mm_struct *next)
  281. {
  282. PVOP_VCALL2(pv_mmu_ops.activate_mm, prev, next);
  283. }
  284. static inline void arch_dup_mmap(struct mm_struct *oldmm,
  285. struct mm_struct *mm)
  286. {
  287. PVOP_VCALL2(pv_mmu_ops.dup_mmap, oldmm, mm);
  288. }
  289. static inline void arch_exit_mmap(struct mm_struct *mm)
  290. {
  291. PVOP_VCALL1(pv_mmu_ops.exit_mmap, mm);
  292. }
  293. static inline void __flush_tlb(void)
  294. {
  295. PVOP_VCALL0(pv_mmu_ops.flush_tlb_user);
  296. }
  297. static inline void __flush_tlb_global(void)
  298. {
  299. PVOP_VCALL0(pv_mmu_ops.flush_tlb_kernel);
  300. }
  301. static inline void __flush_tlb_single(unsigned long addr)
  302. {
  303. PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr);
  304. }
  305. static inline void flush_tlb_others(const struct cpumask *cpumask,
  306. struct mm_struct *mm,
  307. unsigned long va)
  308. {
  309. PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, cpumask, mm, va);
  310. }
  311. static inline int paravirt_pgd_alloc(struct mm_struct *mm)
  312. {
  313. return PVOP_CALL1(int, pv_mmu_ops.pgd_alloc, mm);
  314. }
  315. static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd)
  316. {
  317. PVOP_VCALL2(pv_mmu_ops.pgd_free, mm, pgd);
  318. }
  319. static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn)
  320. {
  321. PVOP_VCALL2(pv_mmu_ops.alloc_pte, mm, pfn);
  322. }
  323. static inline void paravirt_release_pte(unsigned long pfn)
  324. {
  325. PVOP_VCALL1(pv_mmu_ops.release_pte, pfn);
  326. }
  327. static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
  328. {
  329. PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn);
  330. }
  331. static inline void paravirt_alloc_pmd_clone(unsigned long pfn, unsigned long clonepfn,
  332. unsigned long start, unsigned long count)
  333. {
  334. PVOP_VCALL4(pv_mmu_ops.alloc_pmd_clone, pfn, clonepfn, start, count);
  335. }
  336. static inline void paravirt_release_pmd(unsigned long pfn)
  337. {
  338. PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn);
  339. }
  340. static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn)
  341. {
  342. PVOP_VCALL2(pv_mmu_ops.alloc_pud, mm, pfn);
  343. }
  344. static inline void paravirt_release_pud(unsigned long pfn)
  345. {
  346. PVOP_VCALL1(pv_mmu_ops.release_pud, pfn);
  347. }
  348. #ifdef CONFIG_HIGHPTE
  349. static inline void *kmap_atomic_pte(struct page *page, enum km_type type)
  350. {
  351. unsigned long ret;
  352. ret = PVOP_CALL2(unsigned long, pv_mmu_ops.kmap_atomic_pte, page, type);
  353. return (void *)ret;
  354. }
  355. #endif
  356. static inline void pte_update(struct mm_struct *mm, unsigned long addr,
  357. pte_t *ptep)
  358. {
  359. PVOP_VCALL3(pv_mmu_ops.pte_update, mm, addr, ptep);
  360. }
  361. static inline void pte_update_defer(struct mm_struct *mm, unsigned long addr,
  362. pte_t *ptep)
  363. {
  364. PVOP_VCALL3(pv_mmu_ops.pte_update_defer, mm, addr, ptep);
  365. }
  366. static inline pte_t __pte(pteval_t val)
  367. {
  368. pteval_t ret;
  369. if (sizeof(pteval_t) > sizeof(long))
  370. ret = PVOP_CALLEE2(pteval_t,
  371. pv_mmu_ops.make_pte,
  372. val, (u64)val >> 32);
  373. else
  374. ret = PVOP_CALLEE1(pteval_t,
  375. pv_mmu_ops.make_pte,
  376. val);
  377. return (pte_t) { .pte = ret };
  378. }
  379. static inline pteval_t pte_val(pte_t pte)
  380. {
  381. pteval_t ret;
  382. if (sizeof(pteval_t) > sizeof(long))
  383. ret = PVOP_CALLEE2(pteval_t, pv_mmu_ops.pte_val,
  384. pte.pte, (u64)pte.pte >> 32);
  385. else
  386. ret = PVOP_CALLEE1(pteval_t, pv_mmu_ops.pte_val,
  387. pte.pte);
  388. return ret;
  389. }
  390. static inline pgd_t __pgd(pgdval_t val)
  391. {
  392. pgdval_t ret;
  393. if (sizeof(pgdval_t) > sizeof(long))
  394. ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.make_pgd,
  395. val, (u64)val >> 32);
  396. else
  397. ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.make_pgd,
  398. val);
  399. return (pgd_t) { ret };
  400. }
  401. static inline pgdval_t pgd_val(pgd_t pgd)
  402. {
  403. pgdval_t ret;
  404. if (sizeof(pgdval_t) > sizeof(long))
  405. ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.pgd_val,
  406. pgd.pgd, (u64)pgd.pgd >> 32);
  407. else
  408. ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.pgd_val,
  409. pgd.pgd);
  410. return ret;
  411. }
  412. #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
  413. static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr,
  414. pte_t *ptep)
  415. {
  416. pteval_t ret;
  417. ret = PVOP_CALL3(pteval_t, pv_mmu_ops.ptep_modify_prot_start,
  418. mm, addr, ptep);
  419. return (pte_t) { .pte = ret };
  420. }
  421. static inline void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
  422. pte_t *ptep, pte_t pte)
  423. {
  424. if (sizeof(pteval_t) > sizeof(long))
  425. /* 5 arg words */
  426. pv_mmu_ops.ptep_modify_prot_commit(mm, addr, ptep, pte);
  427. else
  428. PVOP_VCALL4(pv_mmu_ops.ptep_modify_prot_commit,
  429. mm, addr, ptep, pte.pte);
  430. }
  431. static inline void set_pte(pte_t *ptep, pte_t pte)
  432. {
  433. if (sizeof(pteval_t) > sizeof(long))
  434. PVOP_VCALL3(pv_mmu_ops.set_pte, ptep,
  435. pte.pte, (u64)pte.pte >> 32);
  436. else
  437. PVOP_VCALL2(pv_mmu_ops.set_pte, ptep,
  438. pte.pte);
  439. }
  440. static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
  441. pte_t *ptep, pte_t pte)
  442. {
  443. if (sizeof(pteval_t) > sizeof(long))
  444. /* 5 arg words */
  445. pv_mmu_ops.set_pte_at(mm, addr, ptep, pte);
  446. else
  447. PVOP_VCALL4(pv_mmu_ops.set_pte_at, mm, addr, ptep, pte.pte);
  448. }
  449. static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
  450. {
  451. pmdval_t val = native_pmd_val(pmd);
  452. if (sizeof(pmdval_t) > sizeof(long))
  453. PVOP_VCALL3(pv_mmu_ops.set_pmd, pmdp, val, (u64)val >> 32);
  454. else
  455. PVOP_VCALL2(pv_mmu_ops.set_pmd, pmdp, val);
  456. }
  457. #if PAGETABLE_LEVELS >= 3
  458. static inline pmd_t __pmd(pmdval_t val)
  459. {
  460. pmdval_t ret;
  461. if (sizeof(pmdval_t) > sizeof(long))
  462. ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.make_pmd,
  463. val, (u64)val >> 32);
  464. else
  465. ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.make_pmd,
  466. val);
  467. return (pmd_t) { ret };
  468. }
  469. static inline pmdval_t pmd_val(pmd_t pmd)
  470. {
  471. pmdval_t ret;
  472. if (sizeof(pmdval_t) > sizeof(long))
  473. ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.pmd_val,
  474. pmd.pmd, (u64)pmd.pmd >> 32);
  475. else
  476. ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.pmd_val,
  477. pmd.pmd);
  478. return ret;
  479. }
  480. static inline void set_pud(pud_t *pudp, pud_t pud)
  481. {
  482. pudval_t val = native_pud_val(pud);
  483. if (sizeof(pudval_t) > sizeof(long))
  484. PVOP_VCALL3(pv_mmu_ops.set_pud, pudp,
  485. val, (u64)val >> 32);
  486. else
  487. PVOP_VCALL2(pv_mmu_ops.set_pud, pudp,
  488. val);
  489. }
  490. #if PAGETABLE_LEVELS == 4
  491. static inline pud_t __pud(pudval_t val)
  492. {
  493. pudval_t ret;
  494. if (sizeof(pudval_t) > sizeof(long))
  495. ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.make_pud,
  496. val, (u64)val >> 32);
  497. else
  498. ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.make_pud,
  499. val);
  500. return (pud_t) { ret };
  501. }
  502. static inline pudval_t pud_val(pud_t pud)
  503. {
  504. pudval_t ret;
  505. if (sizeof(pudval_t) > sizeof(long))
  506. ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.pud_val,
  507. pud.pud, (u64)pud.pud >> 32);
  508. else
  509. ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.pud_val,
  510. pud.pud);
  511. return ret;
  512. }
  513. static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
  514. {
  515. pgdval_t val = native_pgd_val(pgd);
  516. if (sizeof(pgdval_t) > sizeof(long))
  517. PVOP_VCALL3(pv_mmu_ops.set_pgd, pgdp,
  518. val, (u64)val >> 32);
  519. else
  520. PVOP_VCALL2(pv_mmu_ops.set_pgd, pgdp,
  521. val);
  522. }
  523. static inline void pgd_clear(pgd_t *pgdp)
  524. {
  525. set_pgd(pgdp, __pgd(0));
  526. }
  527. static inline void pud_clear(pud_t *pudp)
  528. {
  529. set_pud(pudp, __pud(0));
  530. }
  531. #endif /* PAGETABLE_LEVELS == 4 */
  532. #endif /* PAGETABLE_LEVELS >= 3 */
  533. #ifdef CONFIG_X86_PAE
  534. /* Special-case pte-setting operations for PAE, which can't update a
  535. 64-bit pte atomically */
  536. static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
  537. {
  538. PVOP_VCALL3(pv_mmu_ops.set_pte_atomic, ptep,
  539. pte.pte, pte.pte >> 32);
  540. }
  541. static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
  542. pte_t *ptep)
  543. {
  544. PVOP_VCALL3(pv_mmu_ops.pte_clear, mm, addr, ptep);
  545. }
  546. static inline void pmd_clear(pmd_t *pmdp)
  547. {
  548. PVOP_VCALL1(pv_mmu_ops.pmd_clear, pmdp);
  549. }
  550. #else /* !CONFIG_X86_PAE */
  551. static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
  552. {
  553. set_pte(ptep, pte);
  554. }
  555. static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
  556. pte_t *ptep)
  557. {
  558. set_pte_at(mm, addr, ptep, __pte(0));
  559. }
  560. static inline void pmd_clear(pmd_t *pmdp)
  561. {
  562. set_pmd(pmdp, __pmd(0));
  563. }
  564. #endif /* CONFIG_X86_PAE */
  565. #define __HAVE_ARCH_START_CONTEXT_SWITCH
  566. static inline void arch_start_context_switch(struct task_struct *prev)
  567. {
  568. PVOP_VCALL1(pv_cpu_ops.start_context_switch, prev);
  569. }
  570. static inline void arch_end_context_switch(struct task_struct *next)
  571. {
  572. PVOP_VCALL1(pv_cpu_ops.end_context_switch, next);
  573. }
  574. #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
  575. static inline void arch_enter_lazy_mmu_mode(void)
  576. {
  577. PVOP_VCALL0(pv_mmu_ops.lazy_mode.enter);
  578. }
  579. static inline void arch_leave_lazy_mmu_mode(void)
  580. {
  581. PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
  582. }
  583. void arch_flush_lazy_mmu_mode(void);
  584. static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
  585. phys_addr_t phys, pgprot_t flags)
  586. {
  587. pv_mmu_ops.set_fixmap(idx, phys, flags);
  588. }
  589. #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
  590. static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
  591. {
  592. return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock);
  593. }
  594. static inline int __raw_spin_is_contended(struct raw_spinlock *lock)
  595. {
  596. return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock);
  597. }
  598. #define __raw_spin_is_contended __raw_spin_is_contended
  599. static __always_inline void __raw_spin_lock(struct raw_spinlock *lock)
  600. {
  601. PVOP_VCALL1(pv_lock_ops.spin_lock, lock);
  602. }
  603. static __always_inline void __raw_spin_lock_flags(struct raw_spinlock *lock,
  604. unsigned long flags)
  605. {
  606. PVOP_VCALL2(pv_lock_ops.spin_lock_flags, lock, flags);
  607. }
  608. static __always_inline int __raw_spin_trylock(struct raw_spinlock *lock)
  609. {
  610. return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock);
  611. }
  612. static __always_inline void __raw_spin_unlock(struct raw_spinlock *lock)
  613. {
  614. PVOP_VCALL1(pv_lock_ops.spin_unlock, lock);
  615. }
  616. #endif
  617. #ifdef CONFIG_X86_32
  618. #define PV_SAVE_REGS "pushl %ecx; pushl %edx;"
  619. #define PV_RESTORE_REGS "popl %edx; popl %ecx;"
  620. /* save and restore all caller-save registers, except return value */
  621. #define PV_SAVE_ALL_CALLER_REGS "pushl %ecx;"
  622. #define PV_RESTORE_ALL_CALLER_REGS "popl %ecx;"
  623. #define PV_FLAGS_ARG "0"
  624. #define PV_EXTRA_CLOBBERS
  625. #define PV_VEXTRA_CLOBBERS
  626. #else
  627. /* save and restore all caller-save registers, except return value */
  628. #define PV_SAVE_ALL_CALLER_REGS \
  629. "push %rcx;" \
  630. "push %rdx;" \
  631. "push %rsi;" \
  632. "push %rdi;" \
  633. "push %r8;" \
  634. "push %r9;" \
  635. "push %r10;" \
  636. "push %r11;"
  637. #define PV_RESTORE_ALL_CALLER_REGS \
  638. "pop %r11;" \
  639. "pop %r10;" \
  640. "pop %r9;" \
  641. "pop %r8;" \
  642. "pop %rdi;" \
  643. "pop %rsi;" \
  644. "pop %rdx;" \
  645. "pop %rcx;"
  646. /* We save some registers, but all of them, that's too much. We clobber all
  647. * caller saved registers but the argument parameter */
  648. #define PV_SAVE_REGS "pushq %%rdi;"
  649. #define PV_RESTORE_REGS "popq %%rdi;"
  650. #define PV_EXTRA_CLOBBERS EXTRA_CLOBBERS, "rcx" , "rdx", "rsi"
  651. #define PV_VEXTRA_CLOBBERS EXTRA_CLOBBERS, "rdi", "rcx" , "rdx", "rsi"
  652. #define PV_FLAGS_ARG "D"
  653. #endif
  654. /*
  655. * Generate a thunk around a function which saves all caller-save
  656. * registers except for the return value. This allows C functions to
  657. * be called from assembler code where fewer than normal registers are
  658. * available. It may also help code generation around calls from C
  659. * code if the common case doesn't use many registers.
  660. *
  661. * When a callee is wrapped in a thunk, the caller can assume that all
  662. * arg regs and all scratch registers are preserved across the
  663. * call. The return value in rax/eax will not be saved, even for void
  664. * functions.
  665. */
  666. #define PV_CALLEE_SAVE_REGS_THUNK(func) \
  667. extern typeof(func) __raw_callee_save_##func; \
  668. static void *__##func##__ __used = func; \
  669. \
  670. asm(".pushsection .text;" \
  671. "__raw_callee_save_" #func ": " \
  672. PV_SAVE_ALL_CALLER_REGS \
  673. "call " #func ";" \
  674. PV_RESTORE_ALL_CALLER_REGS \
  675. "ret;" \
  676. ".popsection")
  677. /* Get a reference to a callee-save function */
  678. #define PV_CALLEE_SAVE(func) \
  679. ((struct paravirt_callee_save) { __raw_callee_save_##func })
  680. /* Promise that "func" already uses the right calling convention */
  681. #define __PV_IS_CALLEE_SAVE(func) \
  682. ((struct paravirt_callee_save) { func })
  683. static inline unsigned long __raw_local_save_flags(void)
  684. {
  685. unsigned long f;
  686. asm volatile(paravirt_alt(PARAVIRT_CALL)
  687. : "=a"(f)
  688. : paravirt_type(pv_irq_ops.save_fl),
  689. paravirt_clobber(CLBR_EAX)
  690. : "memory", "cc");
  691. return f;
  692. }
  693. static inline void raw_local_irq_restore(unsigned long f)
  694. {
  695. asm volatile(paravirt_alt(PARAVIRT_CALL)
  696. : "=a"(f)
  697. : PV_FLAGS_ARG(f),
  698. paravirt_type(pv_irq_ops.restore_fl),
  699. paravirt_clobber(CLBR_EAX)
  700. : "memory", "cc");
  701. }
  702. static inline void raw_local_irq_disable(void)
  703. {
  704. asm volatile(paravirt_alt(PARAVIRT_CALL)
  705. :
  706. : paravirt_type(pv_irq_ops.irq_disable),
  707. paravirt_clobber(CLBR_EAX)
  708. : "memory", "eax", "cc");
  709. }
  710. static inline void raw_local_irq_enable(void)
  711. {
  712. asm volatile(paravirt_alt(PARAVIRT_CALL)
  713. :
  714. : paravirt_type(pv_irq_ops.irq_enable),
  715. paravirt_clobber(CLBR_EAX)
  716. : "memory", "eax", "cc");
  717. }
  718. static inline unsigned long __raw_local_irq_save(void)
  719. {
  720. unsigned long f;
  721. f = __raw_local_save_flags();
  722. raw_local_irq_disable();
  723. return f;
  724. }
  725. /* Make sure as little as possible of this mess escapes. */
  726. #undef PARAVIRT_CALL
  727. #undef __PVOP_CALL
  728. #undef __PVOP_VCALL
  729. #undef PVOP_VCALL0
  730. #undef PVOP_CALL0
  731. #undef PVOP_VCALL1
  732. #undef PVOP_CALL1
  733. #undef PVOP_VCALL2
  734. #undef PVOP_CALL2
  735. #undef PVOP_VCALL3
  736. #undef PVOP_CALL3
  737. #undef PVOP_VCALL4
  738. #undef PVOP_CALL4
  739. extern void default_banner(void);
  740. #else /* __ASSEMBLY__ */
  741. #define _PVSITE(ptype, clobbers, ops, word, algn) \
  742. 771:; \
  743. ops; \
  744. 772:; \
  745. .pushsection .parainstructions,"a"; \
  746. .align algn; \
  747. word 771b; \
  748. .byte ptype; \
  749. .byte 772b-771b; \
  750. .short clobbers; \
  751. .popsection
  752. #define COND_PUSH(set, mask, reg) \
  753. .if ((~(set)) & mask); push %reg; .endif
  754. #define COND_POP(set, mask, reg) \
  755. .if ((~(set)) & mask); pop %reg; .endif
  756. #ifdef CONFIG_X86_64
  757. #define PV_SAVE_REGS(set) \
  758. COND_PUSH(set, CLBR_RAX, rax); \
  759. COND_PUSH(set, CLBR_RCX, rcx); \
  760. COND_PUSH(set, CLBR_RDX, rdx); \
  761. COND_PUSH(set, CLBR_RSI, rsi); \
  762. COND_PUSH(set, CLBR_RDI, rdi); \
  763. COND_PUSH(set, CLBR_R8, r8); \
  764. COND_PUSH(set, CLBR_R9, r9); \
  765. COND_PUSH(set, CLBR_R10, r10); \
  766. COND_PUSH(set, CLBR_R11, r11)
  767. #define PV_RESTORE_REGS(set) \
  768. COND_POP(set, CLBR_R11, r11); \
  769. COND_POP(set, CLBR_R10, r10); \
  770. COND_POP(set, CLBR_R9, r9); \
  771. COND_POP(set, CLBR_R8, r8); \
  772. COND_POP(set, CLBR_RDI, rdi); \
  773. COND_POP(set, CLBR_RSI, rsi); \
  774. COND_POP(set, CLBR_RDX, rdx); \
  775. COND_POP(set, CLBR_RCX, rcx); \
  776. COND_POP(set, CLBR_RAX, rax)
  777. #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 8)
  778. #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8)
  779. #define PARA_INDIRECT(addr) *addr(%rip)
  780. #else
  781. #define PV_SAVE_REGS(set) \
  782. COND_PUSH(set, CLBR_EAX, eax); \
  783. COND_PUSH(set, CLBR_EDI, edi); \
  784. COND_PUSH(set, CLBR_ECX, ecx); \
  785. COND_PUSH(set, CLBR_EDX, edx)
  786. #define PV_RESTORE_REGS(set) \
  787. COND_POP(set, CLBR_EDX, edx); \
  788. COND_POP(set, CLBR_ECX, ecx); \
  789. COND_POP(set, CLBR_EDI, edi); \
  790. COND_POP(set, CLBR_EAX, eax)
  791. #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
  792. #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
  793. #define PARA_INDIRECT(addr) *%cs:addr
  794. #endif
  795. #define INTERRUPT_RETURN \
  796. PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE, \
  797. jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret))
  798. #define DISABLE_INTERRUPTS(clobbers) \
  799. PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
  800. PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
  801. call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable); \
  802. PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
  803. #define ENABLE_INTERRUPTS(clobbers) \
  804. PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers, \
  805. PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
  806. call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable); \
  807. PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
  808. #define USERGS_SYSRET32 \
  809. PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret32), \
  810. CLBR_NONE, \
  811. jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret32))
  812. #ifdef CONFIG_X86_32
  813. #define GET_CR0_INTO_EAX \
  814. push %ecx; push %edx; \
  815. call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
  816. pop %edx; pop %ecx
  817. #define ENABLE_INTERRUPTS_SYSEXIT \
  818. PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
  819. CLBR_NONE, \
  820. jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
  821. #else /* !CONFIG_X86_32 */
  822. /*
  823. * If swapgs is used while the userspace stack is still current,
  824. * there's no way to call a pvop. The PV replacement *must* be
  825. * inlined, or the swapgs instruction must be trapped and emulated.
  826. */
  827. #define SWAPGS_UNSAFE_STACK \
  828. PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
  829. swapgs)
  830. /*
  831. * Note: swapgs is very special, and in practise is either going to be
  832. * implemented with a single "swapgs" instruction or something very
  833. * special. Either way, we don't need to save any registers for
  834. * it.
  835. */
  836. #define SWAPGS \
  837. PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
  838. call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs) \
  839. )
  840. #define GET_CR2_INTO_RCX \
  841. call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2); \
  842. movq %rax, %rcx; \
  843. xorq %rax, %rax;
  844. #define PARAVIRT_ADJUST_EXCEPTION_FRAME \
  845. PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_adjust_exception_frame), \
  846. CLBR_NONE, \
  847. call PARA_INDIRECT(pv_irq_ops+PV_IRQ_adjust_exception_frame))
  848. #define USERGS_SYSRET64 \
  849. PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64), \
  850. CLBR_NONE, \
  851. jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64))
  852. #define ENABLE_INTERRUPTS_SYSEXIT32 \
  853. PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
  854. CLBR_NONE, \
  855. jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
  856. #endif /* CONFIG_X86_32 */
  857. #endif /* __ASSEMBLY__ */
  858. #else /* CONFIG_PARAVIRT */
  859. # define default_banner x86_init_noop
  860. #endif /* !CONFIG_PARAVIRT */
  861. #endif /* _ASM_X86_PARAVIRT_H */