vmi_32.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892
  1. /*
  2. * VMI specific paravirt-ops implementation
  3. *
  4. * Copyright (C) 2005, VMware, Inc.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  14. * NON INFRINGEMENT. See the GNU General Public License for more
  15. * details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program; if not, write to the Free Software
  19. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  20. *
  21. * Send feedback to zach@vmware.com
  22. *
  23. */
  24. #include <linux/module.h>
  25. #include <linux/cpu.h>
  26. #include <linux/bootmem.h>
  27. #include <linux/mm.h>
  28. #include <linux/highmem.h>
  29. #include <linux/sched.h>
  30. #include <asm/vmi.h>
  31. #include <asm/io.h>
  32. #include <asm/fixmap.h>
  33. #include <asm/apicdef.h>
  34. #include <asm/apic.h>
  35. #include <asm/pgalloc.h>
  36. #include <asm/processor.h>
  37. #include <asm/timer.h>
  38. #include <asm/vmi_time.h>
  39. #include <asm/kmap_types.h>
  40. #include <asm/setup.h>
  41. /* Convenient for calling VMI functions indirectly in the ROM */
  42. typedef u32 __attribute__((regparm(1))) (VROMFUNC)(void);
  43. typedef u64 __attribute__((regparm(2))) (VROMLONGFUNC)(int);
  44. #define call_vrom_func(rom,func) \
  45. (((VROMFUNC *)(rom->func))())
  46. #define call_vrom_long_func(rom,func,arg) \
  47. (((VROMLONGFUNC *)(rom->func)) (arg))
  48. static struct vrom_header *vmi_rom;
  49. static int disable_pge;
  50. static int disable_pse;
  51. static int disable_sep;
  52. static int disable_tsc;
  53. static int disable_mtrr;
  54. static int disable_noidle;
  55. static int disable_vmi_timer;
  56. /* Cached VMI operations */
  57. static struct {
  58. void (*cpuid)(void /* non-c */);
  59. void (*_set_ldt)(u32 selector);
  60. void (*set_tr)(u32 selector);
  61. void (*write_idt_entry)(struct desc_struct *, int, u32, u32);
  62. void (*write_gdt_entry)(struct desc_struct *, int, u32, u32);
  63. void (*write_ldt_entry)(struct desc_struct *, int, u32, u32);
  64. void (*set_kernel_stack)(u32 selector, u32 sp0);
  65. void (*allocate_page)(u32, u32, u32, u32, u32);
  66. void (*release_page)(u32, u32);
  67. void (*set_pte)(pte_t, pte_t *, unsigned);
  68. void (*update_pte)(pte_t *, unsigned);
  69. void (*set_linear_mapping)(int, void *, u32, u32);
  70. void (*_flush_tlb)(int);
  71. void (*set_initial_ap_state)(int, int);
  72. void (*halt)(void);
  73. void (*set_lazy_mode)(int mode);
  74. } vmi_ops;
  75. /* Cached VMI operations */
  76. struct vmi_timer_ops vmi_timer_ops;
  77. /*
  78. * VMI patching routines.
  79. */
  80. #define MNEM_CALL 0xe8
  81. #define MNEM_JMP 0xe9
  82. #define MNEM_RET 0xc3
  83. #define IRQ_PATCH_INT_MASK 0
  84. #define IRQ_PATCH_DISABLE 5
  85. static inline void patch_offset(void *insnbuf,
  86. unsigned long ip, unsigned long dest)
  87. {
  88. *(unsigned long *)(insnbuf+1) = dest-ip-5;
  89. }
  90. static unsigned patch_internal(int call, unsigned len, void *insnbuf,
  91. unsigned long ip)
  92. {
  93. u64 reloc;
  94. struct vmi_relocation_info *const rel = (struct vmi_relocation_info *)&reloc;
  95. reloc = call_vrom_long_func(vmi_rom, get_reloc, call);
  96. switch(rel->type) {
  97. case VMI_RELOCATION_CALL_REL:
  98. BUG_ON(len < 5);
  99. *(char *)insnbuf = MNEM_CALL;
  100. patch_offset(insnbuf, ip, (unsigned long)rel->eip);
  101. return 5;
  102. case VMI_RELOCATION_JUMP_REL:
  103. BUG_ON(len < 5);
  104. *(char *)insnbuf = MNEM_JMP;
  105. patch_offset(insnbuf, ip, (unsigned long)rel->eip);
  106. return 5;
  107. case VMI_RELOCATION_NOP:
  108. /* obliterate the whole thing */
  109. return 0;
  110. case VMI_RELOCATION_NONE:
  111. /* leave native code in place */
  112. break;
  113. default:
  114. BUG();
  115. }
  116. return len;
  117. }
  118. /*
  119. * Apply patch if appropriate, return length of new instruction
  120. * sequence. The callee does nop padding for us.
  121. */
  122. static unsigned vmi_patch(u8 type, u16 clobbers, void *insns,
  123. unsigned long ip, unsigned len)
  124. {
  125. switch (type) {
  126. case PARAVIRT_PATCH(pv_irq_ops.irq_disable):
  127. return patch_internal(VMI_CALL_DisableInterrupts, len,
  128. insns, ip);
  129. case PARAVIRT_PATCH(pv_irq_ops.irq_enable):
  130. return patch_internal(VMI_CALL_EnableInterrupts, len,
  131. insns, ip);
  132. case PARAVIRT_PATCH(pv_irq_ops.restore_fl):
  133. return patch_internal(VMI_CALL_SetInterruptMask, len,
  134. insns, ip);
  135. case PARAVIRT_PATCH(pv_irq_ops.save_fl):
  136. return patch_internal(VMI_CALL_GetInterruptMask, len,
  137. insns, ip);
  138. case PARAVIRT_PATCH(pv_cpu_ops.iret):
  139. return patch_internal(VMI_CALL_IRET, len, insns, ip);
  140. case PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit):
  141. return patch_internal(VMI_CALL_SYSEXIT, len, insns, ip);
  142. default:
  143. break;
  144. }
  145. return len;
  146. }
  147. /* CPUID has non-C semantics, and paravirt-ops API doesn't match hardware ISA */
  148. static void vmi_cpuid(unsigned int *ax, unsigned int *bx,
  149. unsigned int *cx, unsigned int *dx)
  150. {
  151. int override = 0;
  152. if (*ax == 1)
  153. override = 1;
  154. asm volatile ("call *%6"
  155. : "=a" (*ax),
  156. "=b" (*bx),
  157. "=c" (*cx),
  158. "=d" (*dx)
  159. : "0" (*ax), "2" (*cx), "r" (vmi_ops.cpuid));
  160. if (override) {
  161. if (disable_pse)
  162. *dx &= ~X86_FEATURE_PSE;
  163. if (disable_pge)
  164. *dx &= ~X86_FEATURE_PGE;
  165. if (disable_sep)
  166. *dx &= ~X86_FEATURE_SEP;
  167. if (disable_tsc)
  168. *dx &= ~X86_FEATURE_TSC;
  169. if (disable_mtrr)
  170. *dx &= ~X86_FEATURE_MTRR;
  171. }
  172. }
  173. static inline void vmi_maybe_load_tls(struct desc_struct *gdt, int nr, struct desc_struct *new)
  174. {
  175. if (gdt[nr].a != new->a || gdt[nr].b != new->b)
  176. write_gdt_entry(gdt, nr, new, 0);
  177. }
  178. static void vmi_load_tls(struct thread_struct *t, unsigned int cpu)
  179. {
  180. struct desc_struct *gdt = get_cpu_gdt_table(cpu);
  181. vmi_maybe_load_tls(gdt, GDT_ENTRY_TLS_MIN + 0, &t->tls_array[0]);
  182. vmi_maybe_load_tls(gdt, GDT_ENTRY_TLS_MIN + 1, &t->tls_array[1]);
  183. vmi_maybe_load_tls(gdt, GDT_ENTRY_TLS_MIN + 2, &t->tls_array[2]);
  184. }
  185. static void vmi_set_ldt(const void *addr, unsigned entries)
  186. {
  187. unsigned cpu = smp_processor_id();
  188. struct desc_struct desc;
  189. pack_descriptor(&desc, (unsigned long)addr,
  190. entries * sizeof(struct desc_struct) - 1,
  191. DESC_LDT, 0);
  192. write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_LDT, &desc, DESC_LDT);
  193. vmi_ops._set_ldt(entries ? GDT_ENTRY_LDT*sizeof(struct desc_struct) : 0);
  194. }
  195. static void vmi_set_tr(void)
  196. {
  197. vmi_ops.set_tr(GDT_ENTRY_TSS*sizeof(struct desc_struct));
  198. }
  199. static void vmi_write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
  200. {
  201. u32 *idt_entry = (u32 *)g;
  202. vmi_ops.write_idt_entry(dt, entry, idt_entry[0], idt_entry[1]);
  203. }
  204. static void vmi_write_gdt_entry(struct desc_struct *dt, int entry,
  205. const void *desc, int type)
  206. {
  207. u32 *gdt_entry = (u32 *)desc;
  208. vmi_ops.write_gdt_entry(dt, entry, gdt_entry[0], gdt_entry[1]);
  209. }
  210. static void vmi_write_ldt_entry(struct desc_struct *dt, int entry,
  211. const void *desc)
  212. {
  213. u32 *ldt_entry = (u32 *)desc;
  214. vmi_ops.write_ldt_entry(dt, entry, ldt_entry[0], ldt_entry[1]);
  215. }
  216. static void vmi_load_sp0(struct tss_struct *tss,
  217. struct thread_struct *thread)
  218. {
  219. tss->x86_tss.sp0 = thread->sp0;
  220. /* This can only happen when SEP is enabled, no need to test "SEP"arately */
  221. if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) {
  222. tss->x86_tss.ss1 = thread->sysenter_cs;
  223. wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
  224. }
  225. vmi_ops.set_kernel_stack(__KERNEL_DS, tss->x86_tss.sp0);
  226. }
  227. static void vmi_flush_tlb_user(void)
  228. {
  229. vmi_ops._flush_tlb(VMI_FLUSH_TLB);
  230. }
  231. static void vmi_flush_tlb_kernel(void)
  232. {
  233. vmi_ops._flush_tlb(VMI_FLUSH_TLB | VMI_FLUSH_GLOBAL);
  234. }
  235. /* Stub to do nothing at all; used for delays and unimplemented calls */
  236. static void vmi_nop(void)
  237. {
  238. }
  239. static void vmi_allocate_pte(struct mm_struct *mm, unsigned long pfn)
  240. {
  241. vmi_ops.allocate_page(pfn, VMI_PAGE_L1, 0, 0, 0);
  242. }
  243. static void vmi_allocate_pmd(struct mm_struct *mm, unsigned long pfn)
  244. {
  245. /*
  246. * This call comes in very early, before mem_map is setup.
  247. * It is called only for swapper_pg_dir, which already has
  248. * data on it.
  249. */
  250. vmi_ops.allocate_page(pfn, VMI_PAGE_L2, 0, 0, 0);
  251. }
  252. static void vmi_allocate_pmd_clone(unsigned long pfn, unsigned long clonepfn, unsigned long start, unsigned long count)
  253. {
  254. vmi_ops.allocate_page(pfn, VMI_PAGE_L2 | VMI_PAGE_CLONE, clonepfn, start, count);
  255. }
  256. static void vmi_release_pte(unsigned long pfn)
  257. {
  258. vmi_ops.release_page(pfn, VMI_PAGE_L1);
  259. }
  260. static void vmi_release_pmd(unsigned long pfn)
  261. {
  262. vmi_ops.release_page(pfn, VMI_PAGE_L2);
  263. }
  264. /*
  265. * We use the pgd_free hook for releasing the pgd page:
  266. */
  267. static void vmi_pgd_free(struct mm_struct *mm, pgd_t *pgd)
  268. {
  269. unsigned long pfn = __pa(pgd) >> PAGE_SHIFT;
  270. vmi_ops.release_page(pfn, VMI_PAGE_L2);
  271. }
  272. /*
  273. * Helper macros for MMU update flags. We can defer updates until a flush
  274. * or page invalidation only if the update is to the current address space
  275. * (otherwise, there is no flush). We must check against init_mm, since
  276. * this could be a kernel update, which usually passes init_mm, although
  277. * sometimes this check can be skipped if we know the particular function
  278. * is only called on user mode PTEs. We could change the kernel to pass
  279. * current->active_mm here, but in particular, I was unsure if changing
  280. * mm/highmem.c to do this would still be correct on other architectures.
  281. */
  282. #define is_current_as(mm, mustbeuser) ((mm) == current->active_mm || \
  283. (!mustbeuser && (mm) == &init_mm))
  284. #define vmi_flags_addr(mm, addr, level, user) \
  285. ((level) | (is_current_as(mm, user) ? \
  286. (VMI_PAGE_CURRENT_AS | ((addr) & VMI_PAGE_VA_MASK)) : 0))
  287. #define vmi_flags_addr_defer(mm, addr, level, user) \
  288. ((level) | (is_current_as(mm, user) ? \
  289. (VMI_PAGE_DEFER | VMI_PAGE_CURRENT_AS | ((addr) & VMI_PAGE_VA_MASK)) : 0))
  290. static void vmi_update_pte(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
  291. {
  292. vmi_ops.update_pte(ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0));
  293. }
  294. static void vmi_update_pte_defer(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
  295. {
  296. vmi_ops.update_pte(ptep, vmi_flags_addr_defer(mm, addr, VMI_PAGE_PT, 0));
  297. }
  298. static void vmi_set_pte(pte_t *ptep, pte_t pte)
  299. {
  300. /* XXX because of set_pmd_pte, this can be called on PT or PD layers */
  301. vmi_ops.set_pte(pte, ptep, VMI_PAGE_PT);
  302. }
  303. static void vmi_set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte)
  304. {
  305. vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0));
  306. }
  307. static void vmi_set_pmd(pmd_t *pmdp, pmd_t pmdval)
  308. {
  309. #ifdef CONFIG_X86_PAE
  310. const pte_t pte = { .pte = pmdval.pmd };
  311. #else
  312. const pte_t pte = { pmdval.pud.pgd.pgd };
  313. #endif
  314. vmi_ops.set_pte(pte, (pte_t *)pmdp, VMI_PAGE_PD);
  315. }
  316. #ifdef CONFIG_X86_PAE
  317. static void vmi_set_pte_atomic(pte_t *ptep, pte_t pteval)
  318. {
  319. /*
  320. * XXX This is called from set_pmd_pte, but at both PT
  321. * and PD layers so the VMI_PAGE_PT flag is wrong. But
  322. * it is only called for large page mapping changes,
  323. * the Xen backend, doesn't support large pages, and the
  324. * ESX backend doesn't depend on the flag.
  325. */
  326. set_64bit((unsigned long long *)ptep,pte_val(pteval));
  327. vmi_ops.update_pte(ptep, VMI_PAGE_PT);
  328. }
  329. static void vmi_set_pud(pud_t *pudp, pud_t pudval)
  330. {
  331. /* Um, eww */
  332. const pte_t pte = { .pte = pudval.pgd.pgd };
  333. vmi_ops.set_pte(pte, (pte_t *)pudp, VMI_PAGE_PDP);
  334. }
  335. static void vmi_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
  336. {
  337. const pte_t pte = { .pte = 0 };
  338. vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0));
  339. }
  340. static void vmi_pmd_clear(pmd_t *pmd)
  341. {
  342. const pte_t pte = { .pte = 0 };
  343. vmi_ops.set_pte(pte, (pte_t *)pmd, VMI_PAGE_PD);
  344. }
  345. #endif
  346. #ifdef CONFIG_SMP
  347. static void __devinit
  348. vmi_startup_ipi_hook(int phys_apicid, unsigned long start_eip,
  349. unsigned long start_esp)
  350. {
  351. struct vmi_ap_state ap;
  352. /* Default everything to zero. This is fine for most GPRs. */
  353. memset(&ap, 0, sizeof(struct vmi_ap_state));
  354. ap.gdtr_limit = GDT_SIZE - 1;
  355. ap.gdtr_base = (unsigned long) get_cpu_gdt_table(phys_apicid);
  356. ap.idtr_limit = IDT_ENTRIES * 8 - 1;
  357. ap.idtr_base = (unsigned long) idt_table;
  358. ap.ldtr = 0;
  359. ap.cs = __KERNEL_CS;
  360. ap.eip = (unsigned long) start_eip;
  361. ap.ss = __KERNEL_DS;
  362. ap.esp = (unsigned long) start_esp;
  363. ap.ds = __USER_DS;
  364. ap.es = __USER_DS;
  365. ap.fs = __KERNEL_PERCPU;
  366. ap.gs = __KERNEL_STACK_CANARY;
  367. ap.eflags = 0;
  368. #ifdef CONFIG_X86_PAE
  369. /* efer should match BSP efer. */
  370. if (cpu_has_nx) {
  371. unsigned l, h;
  372. rdmsr(MSR_EFER, l, h);
  373. ap.efer = (unsigned long long) h << 32 | l;
  374. }
  375. #endif
  376. ap.cr3 = __pa(swapper_pg_dir);
  377. /* Protected mode, paging, AM, WP, NE, MP. */
  378. ap.cr0 = 0x80050023;
  379. ap.cr4 = mmu_cr4_features;
  380. vmi_ops.set_initial_ap_state((u32)&ap, phys_apicid);
  381. }
  382. #endif
  383. static void vmi_start_context_switch(struct task_struct *prev)
  384. {
  385. paravirt_start_context_switch(prev);
  386. vmi_ops.set_lazy_mode(2);
  387. }
  388. static void vmi_end_context_switch(struct task_struct *next)
  389. {
  390. vmi_ops.set_lazy_mode(0);
  391. paravirt_end_context_switch(next);
  392. }
  393. static void vmi_enter_lazy_mmu(void)
  394. {
  395. paravirt_enter_lazy_mmu();
  396. vmi_ops.set_lazy_mode(1);
  397. }
  398. static void vmi_leave_lazy_mmu(void)
  399. {
  400. vmi_ops.set_lazy_mode(0);
  401. paravirt_leave_lazy_mmu();
  402. }
  403. static inline int __init check_vmi_rom(struct vrom_header *rom)
  404. {
  405. struct pci_header *pci;
  406. struct pnp_header *pnp;
  407. const char *manufacturer = "UNKNOWN";
  408. const char *product = "UNKNOWN";
  409. const char *license = "unspecified";
  410. if (rom->rom_signature != 0xaa55)
  411. return 0;
  412. if (rom->vrom_signature != VMI_SIGNATURE)
  413. return 0;
  414. if (rom->api_version_maj != VMI_API_REV_MAJOR ||
  415. rom->api_version_min+1 < VMI_API_REV_MINOR+1) {
  416. printk(KERN_WARNING "VMI: Found mismatched rom version %d.%d\n",
  417. rom->api_version_maj,
  418. rom->api_version_min);
  419. return 0;
  420. }
  421. /*
  422. * Relying on the VMI_SIGNATURE field is not 100% safe, so check
  423. * the PCI header and device type to make sure this is really a
  424. * VMI device.
  425. */
  426. if (!rom->pci_header_offs) {
  427. printk(KERN_WARNING "VMI: ROM does not contain PCI header.\n");
  428. return 0;
  429. }
  430. pci = (struct pci_header *)((char *)rom+rom->pci_header_offs);
  431. if (pci->vendorID != PCI_VENDOR_ID_VMWARE ||
  432. pci->deviceID != PCI_DEVICE_ID_VMWARE_VMI) {
  433. /* Allow it to run... anyways, but warn */
  434. printk(KERN_WARNING "VMI: ROM from unknown manufacturer\n");
  435. }
  436. if (rom->pnp_header_offs) {
  437. pnp = (struct pnp_header *)((char *)rom+rom->pnp_header_offs);
  438. if (pnp->manufacturer_offset)
  439. manufacturer = (const char *)rom+pnp->manufacturer_offset;
  440. if (pnp->product_offset)
  441. product = (const char *)rom+pnp->product_offset;
  442. }
  443. if (rom->license_offs)
  444. license = (char *)rom+rom->license_offs;
  445. printk(KERN_INFO "VMI: Found %s %s, API version %d.%d, ROM version %d.%d\n",
  446. manufacturer, product,
  447. rom->api_version_maj, rom->api_version_min,
  448. pci->rom_version_maj, pci->rom_version_min);
  449. /* Don't allow BSD/MIT here for now because we don't want to end up
  450. with any binary only shim layers */
  451. if (strcmp(license, "GPL") && strcmp(license, "GPL v2")) {
  452. printk(KERN_WARNING "VMI: Non GPL license `%s' found for ROM. Not used.\n",
  453. license);
  454. return 0;
  455. }
  456. return 1;
  457. }
  458. /*
  459. * Probe for the VMI option ROM
  460. */
  461. static inline int __init probe_vmi_rom(void)
  462. {
  463. unsigned long base;
  464. /* VMI ROM is in option ROM area, check signature */
  465. for (base = 0xC0000; base < 0xE0000; base += 2048) {
  466. struct vrom_header *romstart;
  467. romstart = (struct vrom_header *)isa_bus_to_virt(base);
  468. if (check_vmi_rom(romstart)) {
  469. vmi_rom = romstart;
  470. return 1;
  471. }
  472. }
  473. return 0;
  474. }
  475. /*
  476. * VMI setup common to all processors
  477. */
  478. void vmi_bringup(void)
  479. {
  480. /* We must establish the lowmem mapping for MMU ops to work */
  481. if (vmi_ops.set_linear_mapping)
  482. vmi_ops.set_linear_mapping(0, (void *)__PAGE_OFFSET, MAXMEM_PFN, 0);
  483. }
  484. /*
  485. * Return a pointer to a VMI function or NULL if unimplemented
  486. */
  487. static void *vmi_get_function(int vmicall)
  488. {
  489. u64 reloc;
  490. const struct vmi_relocation_info *rel = (struct vmi_relocation_info *)&reloc;
  491. reloc = call_vrom_long_func(vmi_rom, get_reloc, vmicall);
  492. BUG_ON(rel->type == VMI_RELOCATION_JUMP_REL);
  493. if (rel->type == VMI_RELOCATION_CALL_REL)
  494. return (void *)rel->eip;
  495. else
  496. return NULL;
  497. }
  498. /*
  499. * Helper macro for making the VMI paravirt-ops fill code readable.
  500. * For unimplemented operations, fall back to default, unless nop
  501. * is returned by the ROM.
  502. */
  503. #define para_fill(opname, vmicall) \
  504. do { \
  505. reloc = call_vrom_long_func(vmi_rom, get_reloc, \
  506. VMI_CALL_##vmicall); \
  507. if (rel->type == VMI_RELOCATION_CALL_REL) \
  508. opname = (void *)rel->eip; \
  509. else if (rel->type == VMI_RELOCATION_NOP) \
  510. opname = (void *)vmi_nop; \
  511. else if (rel->type != VMI_RELOCATION_NONE) \
  512. printk(KERN_WARNING "VMI: Unknown relocation " \
  513. "type %d for " #vmicall"\n",\
  514. rel->type); \
  515. } while (0)
  516. /*
  517. * Helper macro for making the VMI paravirt-ops fill code readable.
  518. * For cached operations which do not match the VMI ROM ABI and must
  519. * go through a tranlation stub. Ignore NOPs, since it is not clear
  520. * a NOP * VMI function corresponds to a NOP paravirt-op when the
  521. * functions are not in 1-1 correspondence.
  522. */
  523. #define para_wrap(opname, wrapper, cache, vmicall) \
  524. do { \
  525. reloc = call_vrom_long_func(vmi_rom, get_reloc, \
  526. VMI_CALL_##vmicall); \
  527. BUG_ON(rel->type == VMI_RELOCATION_JUMP_REL); \
  528. if (rel->type == VMI_RELOCATION_CALL_REL) { \
  529. opname = wrapper; \
  530. vmi_ops.cache = (void *)rel->eip; \
  531. } \
  532. } while (0)
  533. /*
  534. * Activate the VMI interface and switch into paravirtualized mode
  535. */
  536. static inline int __init activate_vmi(void)
  537. {
  538. short kernel_cs;
  539. u64 reloc;
  540. const struct vmi_relocation_info *rel = (struct vmi_relocation_info *)&reloc;
  541. /*
  542. * Prevent page tables from being allocated in highmem, even if
  543. * CONFIG_HIGHPTE is enabled.
  544. */
  545. __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
  546. if (call_vrom_func(vmi_rom, vmi_init) != 0) {
  547. printk(KERN_ERR "VMI ROM failed to initialize!");
  548. return 0;
  549. }
  550. savesegment(cs, kernel_cs);
  551. pv_info.paravirt_enabled = 1;
  552. pv_info.kernel_rpl = kernel_cs & SEGMENT_RPL_MASK;
  553. pv_info.name = "vmi [deprecated]";
  554. pv_init_ops.patch = vmi_patch;
  555. /*
  556. * Many of these operations are ABI compatible with VMI.
  557. * This means we can fill in the paravirt-ops with direct
  558. * pointers into the VMI ROM. If the calling convention for
  559. * these operations changes, this code needs to be updated.
  560. *
  561. * Exceptions
  562. * CPUID paravirt-op uses pointers, not the native ISA
  563. * halt has no VMI equivalent; all VMI halts are "safe"
  564. * no MSR support yet - just trap and emulate. VMI uses the
  565. * same ABI as the native ISA, but Linux wants exceptions
  566. * from bogus MSR read / write handled
  567. * rdpmc is not yet used in Linux
  568. */
  569. /* CPUID is special, so very special it gets wrapped like a present */
  570. para_wrap(pv_cpu_ops.cpuid, vmi_cpuid, cpuid, CPUID);
  571. para_fill(pv_cpu_ops.clts, CLTS);
  572. para_fill(pv_cpu_ops.get_debugreg, GetDR);
  573. para_fill(pv_cpu_ops.set_debugreg, SetDR);
  574. para_fill(pv_cpu_ops.read_cr0, GetCR0);
  575. para_fill(pv_mmu_ops.read_cr2, GetCR2);
  576. para_fill(pv_mmu_ops.read_cr3, GetCR3);
  577. para_fill(pv_cpu_ops.read_cr4, GetCR4);
  578. para_fill(pv_cpu_ops.write_cr0, SetCR0);
  579. para_fill(pv_mmu_ops.write_cr2, SetCR2);
  580. para_fill(pv_mmu_ops.write_cr3, SetCR3);
  581. para_fill(pv_cpu_ops.write_cr4, SetCR4);
  582. para_fill(pv_irq_ops.save_fl.func, GetInterruptMask);
  583. para_fill(pv_irq_ops.restore_fl.func, SetInterruptMask);
  584. para_fill(pv_irq_ops.irq_disable.func, DisableInterrupts);
  585. para_fill(pv_irq_ops.irq_enable.func, EnableInterrupts);
  586. para_fill(pv_cpu_ops.wbinvd, WBINVD);
  587. para_fill(pv_cpu_ops.read_tsc, RDTSC);
  588. /* The following we emulate with trap and emulate for now */
  589. /* paravirt_ops.read_msr = vmi_rdmsr */
  590. /* paravirt_ops.write_msr = vmi_wrmsr */
  591. /* paravirt_ops.rdpmc = vmi_rdpmc */
  592. /* TR interface doesn't pass TR value, wrap */
  593. para_wrap(pv_cpu_ops.load_tr_desc, vmi_set_tr, set_tr, SetTR);
  594. /* LDT is special, too */
  595. para_wrap(pv_cpu_ops.set_ldt, vmi_set_ldt, _set_ldt, SetLDT);
  596. para_fill(pv_cpu_ops.load_gdt, SetGDT);
  597. para_fill(pv_cpu_ops.load_idt, SetIDT);
  598. para_fill(pv_cpu_ops.store_gdt, GetGDT);
  599. para_fill(pv_cpu_ops.store_idt, GetIDT);
  600. para_fill(pv_cpu_ops.store_tr, GetTR);
  601. pv_cpu_ops.load_tls = vmi_load_tls;
  602. para_wrap(pv_cpu_ops.write_ldt_entry, vmi_write_ldt_entry,
  603. write_ldt_entry, WriteLDTEntry);
  604. para_wrap(pv_cpu_ops.write_gdt_entry, vmi_write_gdt_entry,
  605. write_gdt_entry, WriteGDTEntry);
  606. para_wrap(pv_cpu_ops.write_idt_entry, vmi_write_idt_entry,
  607. write_idt_entry, WriteIDTEntry);
  608. para_wrap(pv_cpu_ops.load_sp0, vmi_load_sp0, set_kernel_stack, UpdateKernelStack);
  609. para_fill(pv_cpu_ops.set_iopl_mask, SetIOPLMask);
  610. para_fill(pv_cpu_ops.io_delay, IODelay);
  611. para_wrap(pv_cpu_ops.start_context_switch, vmi_start_context_switch,
  612. set_lazy_mode, SetLazyMode);
  613. para_wrap(pv_cpu_ops.end_context_switch, vmi_end_context_switch,
  614. set_lazy_mode, SetLazyMode);
  615. para_wrap(pv_mmu_ops.lazy_mode.enter, vmi_enter_lazy_mmu,
  616. set_lazy_mode, SetLazyMode);
  617. para_wrap(pv_mmu_ops.lazy_mode.leave, vmi_leave_lazy_mmu,
  618. set_lazy_mode, SetLazyMode);
  619. /* user and kernel flush are just handled with different flags to FlushTLB */
  620. para_wrap(pv_mmu_ops.flush_tlb_user, vmi_flush_tlb_user, _flush_tlb, FlushTLB);
  621. para_wrap(pv_mmu_ops.flush_tlb_kernel, vmi_flush_tlb_kernel, _flush_tlb, FlushTLB);
  622. para_fill(pv_mmu_ops.flush_tlb_single, InvalPage);
  623. /*
  624. * Until a standard flag format can be agreed on, we need to
  625. * implement these as wrappers in Linux. Get the VMI ROM
  626. * function pointers for the two backend calls.
  627. */
  628. #ifdef CONFIG_X86_PAE
  629. vmi_ops.set_pte = vmi_get_function(VMI_CALL_SetPxELong);
  630. vmi_ops.update_pte = vmi_get_function(VMI_CALL_UpdatePxELong);
  631. #else
  632. vmi_ops.set_pte = vmi_get_function(VMI_CALL_SetPxE);
  633. vmi_ops.update_pte = vmi_get_function(VMI_CALL_UpdatePxE);
  634. #endif
  635. if (vmi_ops.set_pte) {
  636. pv_mmu_ops.set_pte = vmi_set_pte;
  637. pv_mmu_ops.set_pte_at = vmi_set_pte_at;
  638. pv_mmu_ops.set_pmd = vmi_set_pmd;
  639. #ifdef CONFIG_X86_PAE
  640. pv_mmu_ops.set_pte_atomic = vmi_set_pte_atomic;
  641. pv_mmu_ops.set_pud = vmi_set_pud;
  642. pv_mmu_ops.pte_clear = vmi_pte_clear;
  643. pv_mmu_ops.pmd_clear = vmi_pmd_clear;
  644. #endif
  645. }
  646. if (vmi_ops.update_pte) {
  647. pv_mmu_ops.pte_update = vmi_update_pte;
  648. pv_mmu_ops.pte_update_defer = vmi_update_pte_defer;
  649. }
  650. vmi_ops.allocate_page = vmi_get_function(VMI_CALL_AllocatePage);
  651. if (vmi_ops.allocate_page) {
  652. pv_mmu_ops.alloc_pte = vmi_allocate_pte;
  653. pv_mmu_ops.alloc_pmd = vmi_allocate_pmd;
  654. pv_mmu_ops.alloc_pmd_clone = vmi_allocate_pmd_clone;
  655. }
  656. vmi_ops.release_page = vmi_get_function(VMI_CALL_ReleasePage);
  657. if (vmi_ops.release_page) {
  658. pv_mmu_ops.release_pte = vmi_release_pte;
  659. pv_mmu_ops.release_pmd = vmi_release_pmd;
  660. pv_mmu_ops.pgd_free = vmi_pgd_free;
  661. }
  662. /* Set linear is needed in all cases */
  663. vmi_ops.set_linear_mapping = vmi_get_function(VMI_CALL_SetLinearMapping);
  664. /*
  665. * These MUST always be patched. Don't support indirect jumps
  666. * through these operations, as the VMI interface may use either
  667. * a jump or a call to get to these operations, depending on
  668. * the backend. They are performance critical anyway, so requiring
  669. * a patch is not a big problem.
  670. */
  671. pv_cpu_ops.irq_enable_sysexit = (void *)0xfeedbab0;
  672. pv_cpu_ops.iret = (void *)0xbadbab0;
  673. #ifdef CONFIG_SMP
  674. para_wrap(pv_apic_ops.startup_ipi_hook, vmi_startup_ipi_hook, set_initial_ap_state, SetInitialAPState);
  675. #endif
  676. #ifdef CONFIG_X86_LOCAL_APIC
  677. para_fill(apic->read, APICRead);
  678. para_fill(apic->write, APICWrite);
  679. #endif
  680. /*
  681. * Check for VMI timer functionality by probing for a cycle frequency method
  682. */
  683. reloc = call_vrom_long_func(vmi_rom, get_reloc, VMI_CALL_GetCycleFrequency);
  684. if (!disable_vmi_timer && rel->type != VMI_RELOCATION_NONE) {
  685. vmi_timer_ops.get_cycle_frequency = (void *)rel->eip;
  686. vmi_timer_ops.get_cycle_counter =
  687. vmi_get_function(VMI_CALL_GetCycleCounter);
  688. vmi_timer_ops.get_wallclock =
  689. vmi_get_function(VMI_CALL_GetWallclockTime);
  690. vmi_timer_ops.wallclock_updated =
  691. vmi_get_function(VMI_CALL_WallclockUpdated);
  692. vmi_timer_ops.set_alarm = vmi_get_function(VMI_CALL_SetAlarm);
  693. vmi_timer_ops.cancel_alarm =
  694. vmi_get_function(VMI_CALL_CancelAlarm);
  695. x86_init.timers.timer_init = vmi_time_init;
  696. #ifdef CONFIG_X86_LOCAL_APIC
  697. x86_init.timers.setup_percpu_clockev = vmi_time_bsp_init;
  698. x86_cpuinit.setup_percpu_clockev = vmi_time_ap_init;
  699. #endif
  700. pv_time_ops.sched_clock = vmi_sched_clock;
  701. x86_platform.calibrate_tsc = vmi_tsc_khz;
  702. x86_platform.get_wallclock = vmi_get_wallclock;
  703. x86_platform.set_wallclock = vmi_set_wallclock;
  704. /* We have true wallclock functions; disable CMOS clock sync */
  705. no_sync_cmos_clock = 1;
  706. } else {
  707. disable_noidle = 1;
  708. disable_vmi_timer = 1;
  709. }
  710. para_fill(pv_irq_ops.safe_halt, Halt);
  711. /*
  712. * Alternative instruction rewriting doesn't happen soon enough
  713. * to convert VMI_IRET to a call instead of a jump; so we have
  714. * to do this before IRQs get reenabled. Fortunately, it is
  715. * idempotent.
  716. */
  717. apply_paravirt(__parainstructions, __parainstructions_end);
  718. vmi_bringup();
  719. return 1;
  720. }
  721. #undef para_fill
  722. void __init vmi_init(void)
  723. {
  724. if (!vmi_rom)
  725. probe_vmi_rom();
  726. else
  727. check_vmi_rom(vmi_rom);
  728. /* In case probing for or validating the ROM failed, basil */
  729. if (!vmi_rom)
  730. return;
  731. reserve_top_address(-vmi_rom->virtual_top);
  732. #ifdef CONFIG_X86_IO_APIC
  733. /* This is virtual hardware; timer routing is wired correctly */
  734. no_timer_check = 1;
  735. #endif
  736. }
  737. void __init vmi_activate(void)
  738. {
  739. unsigned long flags;
  740. if (!vmi_rom)
  741. return;
  742. local_irq_save(flags);
  743. activate_vmi();
  744. local_irq_restore(flags & X86_EFLAGS_IF);
  745. }
  746. static int __init parse_vmi(char *arg)
  747. {
  748. if (!arg)
  749. return -EINVAL;
  750. if (!strcmp(arg, "disable_pge")) {
  751. clear_cpu_cap(&boot_cpu_data, X86_FEATURE_PGE);
  752. disable_pge = 1;
  753. } else if (!strcmp(arg, "disable_pse")) {
  754. clear_cpu_cap(&boot_cpu_data, X86_FEATURE_PSE);
  755. disable_pse = 1;
  756. } else if (!strcmp(arg, "disable_sep")) {
  757. clear_cpu_cap(&boot_cpu_data, X86_FEATURE_SEP);
  758. disable_sep = 1;
  759. } else if (!strcmp(arg, "disable_tsc")) {
  760. clear_cpu_cap(&boot_cpu_data, X86_FEATURE_TSC);
  761. disable_tsc = 1;
  762. } else if (!strcmp(arg, "disable_mtrr")) {
  763. clear_cpu_cap(&boot_cpu_data, X86_FEATURE_MTRR);
  764. disable_mtrr = 1;
  765. } else if (!strcmp(arg, "disable_timer")) {
  766. disable_vmi_timer = 1;
  767. disable_noidle = 1;
  768. } else if (!strcmp(arg, "disable_noidle"))
  769. disable_noidle = 1;
  770. return 0;
  771. }
  772. early_param("vmi", parse_vmi);