core.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462
  1. /* World's simplest hypervisor, to test paravirt_ops and show
  2. * unbelievers that virtualization is the future. Plus, it's fun! */
  3. #include <linux/module.h>
  4. #include <linux/stringify.h>
  5. #include <linux/stddef.h>
  6. #include <linux/io.h>
  7. #include <linux/mm.h>
  8. #include <linux/vmalloc.h>
  9. #include <linux/cpu.h>
  10. #include <linux/freezer.h>
  11. #include <asm/paravirt.h>
  12. #include <asm/desc.h>
  13. #include <asm/pgtable.h>
  14. #include <asm/uaccess.h>
  15. #include <asm/poll.h>
  16. #include <asm/highmem.h>
  17. #include <asm/asm-offsets.h>
  18. #include <asm/i387.h>
  19. #include "lg.h"
  20. /* Found in switcher.S */
  21. extern char start_switcher_text[], end_switcher_text[], switch_to_guest[];
  22. extern unsigned long default_idt_entries[];
  23. /* Every guest maps the core switcher code. */
  24. #define SHARED_SWITCHER_PAGES \
  25. DIV_ROUND_UP(end_switcher_text - start_switcher_text, PAGE_SIZE)
  26. /* Pages for switcher itself, then two pages per cpu */
  27. #define TOTAL_SWITCHER_PAGES (SHARED_SWITCHER_PAGES + 2 * NR_CPUS)
  28. /* We map at -4M for ease of mapping into the guest (one PTE page). */
  29. #define SWITCHER_ADDR 0xFFC00000
  30. static struct vm_struct *switcher_vma;
  31. static struct page **switcher_page;
  32. static int cpu_had_pge;
  33. static struct {
  34. unsigned long offset;
  35. unsigned short segment;
  36. } lguest_entry;
  37. /* This One Big lock protects all inter-guest data structures. */
  38. DEFINE_MUTEX(lguest_lock);
  39. static DEFINE_PER_CPU(struct lguest *, last_guest);
  40. /* FIXME: Make dynamic. */
  41. #define MAX_LGUEST_GUESTS 16
  42. struct lguest lguests[MAX_LGUEST_GUESTS];
  43. /* Offset from where switcher.S was compiled to where we've copied it */
  44. static unsigned long switcher_offset(void)
  45. {
  46. return SWITCHER_ADDR - (unsigned long)start_switcher_text;
  47. }
  48. /* This cpu's struct lguest_pages. */
  49. static struct lguest_pages *lguest_pages(unsigned int cpu)
  50. {
  51. return &(((struct lguest_pages *)
  52. (SWITCHER_ADDR + SHARED_SWITCHER_PAGES*PAGE_SIZE))[cpu]);
  53. }
  54. static __init int map_switcher(void)
  55. {
  56. int i, err;
  57. struct page **pagep;
  58. switcher_page = kmalloc(sizeof(switcher_page[0])*TOTAL_SWITCHER_PAGES,
  59. GFP_KERNEL);
  60. if (!switcher_page) {
  61. err = -ENOMEM;
  62. goto out;
  63. }
  64. for (i = 0; i < TOTAL_SWITCHER_PAGES; i++) {
  65. unsigned long addr = get_zeroed_page(GFP_KERNEL);
  66. if (!addr) {
  67. err = -ENOMEM;
  68. goto free_some_pages;
  69. }
  70. switcher_page[i] = virt_to_page(addr);
  71. }
  72. switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
  73. VM_ALLOC, SWITCHER_ADDR, VMALLOC_END);
  74. if (!switcher_vma) {
  75. err = -ENOMEM;
  76. printk("lguest: could not map switcher pages high\n");
  77. goto free_pages;
  78. }
  79. pagep = switcher_page;
  80. err = map_vm_area(switcher_vma, PAGE_KERNEL, &pagep);
  81. if (err) {
  82. printk("lguest: map_vm_area failed: %i\n", err);
  83. goto free_vma;
  84. }
  85. memcpy(switcher_vma->addr, start_switcher_text,
  86. end_switcher_text - start_switcher_text);
  87. /* Fix up IDT entries to point into copied text. */
  88. for (i = 0; i < IDT_ENTRIES; i++)
  89. default_idt_entries[i] += switcher_offset();
  90. for_each_possible_cpu(i) {
  91. struct lguest_pages *pages = lguest_pages(i);
  92. struct lguest_ro_state *state = &pages->state;
  93. /* These fields are static: rest done in copy_in_guest_info */
  94. state->host_gdt_desc.size = GDT_SIZE-1;
  95. state->host_gdt_desc.address = (long)get_cpu_gdt_table(i);
  96. store_idt(&state->host_idt_desc);
  97. state->guest_idt_desc.size = sizeof(state->guest_idt)-1;
  98. state->guest_idt_desc.address = (long)&state->guest_idt;
  99. state->guest_gdt_desc.size = sizeof(state->guest_gdt)-1;
  100. state->guest_gdt_desc.address = (long)&state->guest_gdt;
  101. state->guest_tss.esp0 = (long)(&pages->regs + 1);
  102. state->guest_tss.ss0 = LGUEST_DS;
  103. /* No I/O for you! */
  104. state->guest_tss.io_bitmap_base = sizeof(state->guest_tss);
  105. setup_default_gdt_entries(state);
  106. setup_default_idt_entries(state, default_idt_entries);
  107. /* Setup LGUEST segments on all cpus */
  108. get_cpu_gdt_table(i)[GDT_ENTRY_LGUEST_CS] = FULL_EXEC_SEGMENT;
  109. get_cpu_gdt_table(i)[GDT_ENTRY_LGUEST_DS] = FULL_SEGMENT;
  110. }
  111. /* Initialize entry point into switcher. */
  112. lguest_entry.offset = (long)switch_to_guest + switcher_offset();
  113. lguest_entry.segment = LGUEST_CS;
  114. printk(KERN_INFO "lguest: mapped switcher at %p\n",
  115. switcher_vma->addr);
  116. return 0;
  117. free_vma:
  118. vunmap(switcher_vma->addr);
  119. free_pages:
  120. i = TOTAL_SWITCHER_PAGES;
  121. free_some_pages:
  122. for (--i; i >= 0; i--)
  123. __free_pages(switcher_page[i], 0);
  124. kfree(switcher_page);
  125. out:
  126. return err;
  127. }
  128. static void unmap_switcher(void)
  129. {
  130. unsigned int i;
  131. vunmap(switcher_vma->addr);
  132. for (i = 0; i < TOTAL_SWITCHER_PAGES; i++)
  133. __free_pages(switcher_page[i], 0);
  134. }
  135. /* IN/OUT insns: enough to get us past boot-time probing. */
  136. static int emulate_insn(struct lguest *lg)
  137. {
  138. u8 insn;
  139. unsigned int insnlen = 0, in = 0, shift = 0;
  140. unsigned long physaddr = guest_pa(lg, lg->regs->eip);
  141. /* This only works for addresses in linear mapping... */
  142. if (lg->regs->eip < lg->page_offset)
  143. return 0;
  144. lgread(lg, &insn, physaddr, 1);
  145. /* Operand size prefix means it's actually for ax. */
  146. if (insn == 0x66) {
  147. shift = 16;
  148. insnlen = 1;
  149. lgread(lg, &insn, physaddr + insnlen, 1);
  150. }
  151. switch (insn & 0xFE) {
  152. case 0xE4: /* in <next byte>,%al */
  153. insnlen += 2;
  154. in = 1;
  155. break;
  156. case 0xEC: /* in (%dx),%al */
  157. insnlen += 1;
  158. in = 1;
  159. break;
  160. case 0xE6: /* out %al,<next byte> */
  161. insnlen += 2;
  162. break;
  163. case 0xEE: /* out %al,(%dx) */
  164. insnlen += 1;
  165. break;
  166. default:
  167. return 0;
  168. }
  169. if (in) {
  170. /* Lower bit tells is whether it's a 16 or 32 bit access */
  171. if (insn & 0x1)
  172. lg->regs->eax = 0xFFFFFFFF;
  173. else
  174. lg->regs->eax |= (0xFFFF << shift);
  175. }
  176. lg->regs->eip += insnlen;
  177. return 1;
  178. }
  179. int lguest_address_ok(const struct lguest *lg,
  180. unsigned long addr, unsigned long len)
  181. {
  182. return (addr+len) / PAGE_SIZE < lg->pfn_limit && (addr+len >= addr);
  183. }
  184. /* Just like get_user, but don't let guest access lguest binary. */
  185. u32 lgread_u32(struct lguest *lg, unsigned long addr)
  186. {
  187. u32 val = 0;
  188. /* Don't let them access lguest binary */
  189. if (!lguest_address_ok(lg, addr, sizeof(val))
  190. || get_user(val, (u32 __user *)addr) != 0)
  191. kill_guest(lg, "bad read address %#lx", addr);
  192. return val;
  193. }
  194. void lgwrite_u32(struct lguest *lg, unsigned long addr, u32 val)
  195. {
  196. if (!lguest_address_ok(lg, addr, sizeof(val))
  197. || put_user(val, (u32 __user *)addr) != 0)
  198. kill_guest(lg, "bad write address %#lx", addr);
  199. }
  200. void lgread(struct lguest *lg, void *b, unsigned long addr, unsigned bytes)
  201. {
  202. if (!lguest_address_ok(lg, addr, bytes)
  203. || copy_from_user(b, (void __user *)addr, bytes) != 0) {
  204. /* copy_from_user should do this, but as we rely on it... */
  205. memset(b, 0, bytes);
  206. kill_guest(lg, "bad read address %#lx len %u", addr, bytes);
  207. }
  208. }
  209. void lgwrite(struct lguest *lg, unsigned long addr, const void *b,
  210. unsigned bytes)
  211. {
  212. if (!lguest_address_ok(lg, addr, bytes)
  213. || copy_to_user((void __user *)addr, b, bytes) != 0)
  214. kill_guest(lg, "bad write address %#lx len %u", addr, bytes);
  215. }
  216. static void set_ts(void)
  217. {
  218. u32 cr0;
  219. cr0 = read_cr0();
  220. if (!(cr0 & 8))
  221. write_cr0(cr0|8);
  222. }
  223. static void copy_in_guest_info(struct lguest *lg, struct lguest_pages *pages)
  224. {
  225. if (__get_cpu_var(last_guest) != lg || lg->last_pages != pages) {
  226. __get_cpu_var(last_guest) = lg;
  227. lg->last_pages = pages;
  228. lg->changed = CHANGED_ALL;
  229. }
  230. /* These are pretty cheap, so we do them unconditionally. */
  231. pages->state.host_cr3 = __pa(current->mm->pgd);
  232. map_switcher_in_guest(lg, pages);
  233. pages->state.guest_tss.esp1 = lg->esp1;
  234. pages->state.guest_tss.ss1 = lg->ss1;
  235. /* Copy direct trap entries. */
  236. if (lg->changed & CHANGED_IDT)
  237. copy_traps(lg, pages->state.guest_idt, default_idt_entries);
  238. /* Copy all GDT entries but the TSS. */
  239. if (lg->changed & CHANGED_GDT)
  240. copy_gdt(lg, pages->state.guest_gdt);
  241. /* If only the TLS entries have changed, copy them. */
  242. else if (lg->changed & CHANGED_GDT_TLS)
  243. copy_gdt_tls(lg, pages->state.guest_gdt);
  244. lg->changed = 0;
  245. }
  246. static void run_guest_once(struct lguest *lg, struct lguest_pages *pages)
  247. {
  248. unsigned int clobber;
  249. copy_in_guest_info(lg, pages);
  250. /* Put eflags on stack, lcall does rest: suitable for iret return. */
  251. asm volatile("pushf; lcall *lguest_entry"
  252. : "=a"(clobber), "=b"(clobber)
  253. : "0"(pages), "1"(__pa(lg->pgdirs[lg->pgdidx].pgdir))
  254. : "memory", "%edx", "%ecx", "%edi", "%esi");
  255. }
  256. int run_guest(struct lguest *lg, unsigned long __user *user)
  257. {
  258. while (!lg->dead) {
  259. unsigned int cr2 = 0; /* Damn gcc */
  260. /* Hypercalls first: we might have been out to userspace */
  261. do_hypercalls(lg);
  262. if (lg->dma_is_pending) {
  263. if (put_user(lg->pending_dma, user) ||
  264. put_user(lg->pending_key, user+1))
  265. return -EFAULT;
  266. return sizeof(unsigned long)*2;
  267. }
  268. if (signal_pending(current))
  269. return -ERESTARTSYS;
  270. /* If Waker set break_out, return to Launcher. */
  271. if (lg->break_out)
  272. return -EAGAIN;
  273. maybe_do_interrupt(lg);
  274. try_to_freeze();
  275. if (lg->dead)
  276. break;
  277. if (lg->halted) {
  278. set_current_state(TASK_INTERRUPTIBLE);
  279. schedule();
  280. continue;
  281. }
  282. local_irq_disable();
  283. /* Even if *we* don't want FPU trap, guest might... */
  284. if (lg->ts)
  285. set_ts();
  286. /* Don't let Guest do SYSENTER: we can't handle it. */
  287. if (boot_cpu_has(X86_FEATURE_SEP))
  288. wrmsr(MSR_IA32_SYSENTER_CS, 0, 0);
  289. run_guest_once(lg, lguest_pages(raw_smp_processor_id()));
  290. /* Save cr2 now if we page-faulted. */
  291. if (lg->regs->trapnum == 14)
  292. cr2 = read_cr2();
  293. else if (lg->regs->trapnum == 7)
  294. math_state_restore();
  295. if (boot_cpu_has(X86_FEATURE_SEP))
  296. wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0);
  297. local_irq_enable();
  298. switch (lg->regs->trapnum) {
  299. case 13: /* We've intercepted a GPF. */
  300. if (lg->regs->errcode == 0) {
  301. if (emulate_insn(lg))
  302. continue;
  303. }
  304. break;
  305. case 14: /* We've intercepted a page fault. */
  306. if (demand_page(lg, cr2, lg->regs->errcode))
  307. continue;
  308. /* If lguest_data is NULL, this won't hurt. */
  309. if (put_user(cr2, &lg->lguest_data->cr2))
  310. kill_guest(lg, "Writing cr2");
  311. break;
  312. case 7: /* We've intercepted a Device Not Available fault. */
  313. /* If they don't want to know, just absorb it. */
  314. if (!lg->ts)
  315. continue;
  316. break;
  317. case 32 ... 255: /* Real interrupt, fall thru */
  318. cond_resched();
  319. case LGUEST_TRAP_ENTRY: /* Handled at top of loop */
  320. continue;
  321. }
  322. if (deliver_trap(lg, lg->regs->trapnum))
  323. continue;
  324. kill_guest(lg, "unhandled trap %li at %#lx (%#lx)",
  325. lg->regs->trapnum, lg->regs->eip,
  326. lg->regs->trapnum == 14 ? cr2 : lg->regs->errcode);
  327. }
  328. return -ENOENT;
  329. }
  330. int find_free_guest(void)
  331. {
  332. unsigned int i;
  333. for (i = 0; i < MAX_LGUEST_GUESTS; i++)
  334. if (!lguests[i].tsk)
  335. return i;
  336. return -1;
  337. }
  338. static void adjust_pge(void *on)
  339. {
  340. if (on)
  341. write_cr4(read_cr4() | X86_CR4_PGE);
  342. else
  343. write_cr4(read_cr4() & ~X86_CR4_PGE);
  344. }
  345. static int __init init(void)
  346. {
  347. int err;
  348. if (paravirt_enabled()) {
  349. printk("lguest is afraid of %s\n", paravirt_ops.name);
  350. return -EPERM;
  351. }
  352. err = map_switcher();
  353. if (err)
  354. return err;
  355. err = init_pagetables(switcher_page, SHARED_SWITCHER_PAGES);
  356. if (err) {
  357. unmap_switcher();
  358. return err;
  359. }
  360. lguest_io_init();
  361. err = lguest_device_init();
  362. if (err) {
  363. free_pagetables();
  364. unmap_switcher();
  365. return err;
  366. }
  367. lock_cpu_hotplug();
  368. if (cpu_has_pge) { /* We have a broader idea of "global". */
  369. cpu_had_pge = 1;
  370. on_each_cpu(adjust_pge, (void *)0, 0, 1);
  371. clear_bit(X86_FEATURE_PGE, boot_cpu_data.x86_capability);
  372. }
  373. unlock_cpu_hotplug();
  374. return 0;
  375. }
  376. static void __exit fini(void)
  377. {
  378. lguest_device_remove();
  379. free_pagetables();
  380. unmap_switcher();
  381. lock_cpu_hotplug();
  382. if (cpu_had_pge) {
  383. set_bit(X86_FEATURE_PGE, boot_cpu_data.x86_capability);
  384. on_each_cpu(adjust_pge, (void *)1, 0, 1);
  385. }
  386. unlock_cpu_hotplug();
  387. }
  388. module_init(init);
  389. module_exit(fini);
  390. MODULE_LICENSE("GPL");
  391. MODULE_AUTHOR("Rusty Russell <rusty@rustcorp.com.au>");