enlighten.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197
  1. /*
  2. * Core of Xen paravirt_ops implementation.
  3. *
  4. * This file contains the xen_paravirt_ops structure itself, and the
  5. * implementations for:
  6. * - privileged instructions
  7. * - interrupt flags
  8. * - segment operations
  9. * - booting and setup
  10. *
  11. * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
  12. */
  13. #include <linux/kernel.h>
  14. #include <linux/init.h>
  15. #include <linux/smp.h>
  16. #include <linux/preempt.h>
  17. #include <linux/hardirq.h>
  18. #include <linux/percpu.h>
  19. #include <linux/delay.h>
  20. #include <linux/start_kernel.h>
  21. #include <linux/sched.h>
  22. #include <linux/bootmem.h>
  23. #include <linux/module.h>
  24. #include <linux/mm.h>
  25. #include <linux/page-flags.h>
  26. #include <linux/highmem.h>
  27. #include <xen/interface/xen.h>
  28. #include <xen/interface/physdev.h>
  29. #include <xen/interface/vcpu.h>
  30. #include <xen/interface/sched.h>
  31. #include <xen/features.h>
  32. #include <xen/page.h>
  33. #include <asm/paravirt.h>
  34. #include <asm/page.h>
  35. #include <asm/xen/hypercall.h>
  36. #include <asm/xen/hypervisor.h>
  37. #include <asm/fixmap.h>
  38. #include <asm/processor.h>
  39. #include <asm/setup.h>
  40. #include <asm/desc.h>
  41. #include <asm/pgtable.h>
  42. #include <asm/tlbflush.h>
  43. #include <asm/reboot.h>
  44. #include "xen-ops.h"
  45. #include "mmu.h"
  46. #include "multicalls.h"
  47. EXPORT_SYMBOL_GPL(hypercall_page);
  48. DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
  49. DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info);
  50. /*
  51. * Note about cr3 (pagetable base) values:
  52. *
  53. * xen_cr3 contains the current logical cr3 value; it contains the
  54. * last set cr3. This may not be the current effective cr3, because
  55. * its update may be being lazily deferred. However, a vcpu looking
  56. * at its own cr3 can use this value knowing that it everything will
  57. * be self-consistent.
  58. *
  59. * xen_current_cr3 contains the actual vcpu cr3; it is set once the
  60. * hypercall to set the vcpu cr3 is complete (so it may be a little
  61. * out of date, but it will never be set early). If one vcpu is
  62. * looking at another vcpu's cr3 value, it should use this variable.
  63. */
  64. DEFINE_PER_CPU(unsigned long, xen_cr3); /* cr3 stored as physaddr */
  65. DEFINE_PER_CPU(unsigned long, xen_current_cr3); /* actual vcpu cr3 */
  66. struct start_info *xen_start_info;
  67. EXPORT_SYMBOL_GPL(xen_start_info);
  68. static /* __initdata */ struct shared_info dummy_shared_info;
  69. /*
  70. * Point at some empty memory to start with. We map the real shared_info
  71. * page as soon as fixmap is up and running.
  72. */
  73. struct shared_info *HYPERVISOR_shared_info = (void *)&dummy_shared_info;
  74. /*
  75. * Flag to determine whether vcpu info placement is available on all
  76. * VCPUs. We assume it is to start with, and then set it to zero on
  77. * the first failure. This is because it can succeed on some VCPUs
  78. * and not others, since it can involve hypervisor memory allocation,
  79. * or because the guest failed to guarantee all the appropriate
  80. * constraints on all VCPUs (ie buffer can't cross a page boundary).
  81. *
  82. * Note that any particular CPU may be using a placed vcpu structure,
  83. * but we can only optimise if the all are.
  84. *
  85. * 0: not available, 1: available
  86. */
  87. static int have_vcpu_info_placement = 0;
  88. static void __init xen_vcpu_setup(int cpu)
  89. {
  90. struct vcpu_register_vcpu_info info;
  91. int err;
  92. struct vcpu_info *vcpup;
  93. per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu];
  94. if (!have_vcpu_info_placement)
  95. return; /* already tested, not available */
  96. vcpup = &per_cpu(xen_vcpu_info, cpu);
  97. info.mfn = virt_to_mfn(vcpup);
  98. info.offset = offset_in_page(vcpup);
  99. printk(KERN_DEBUG "trying to map vcpu_info %d at %p, mfn %llx, offset %d\n",
  100. cpu, vcpup, info.mfn, info.offset);
  101. /* Check to see if the hypervisor will put the vcpu_info
  102. structure where we want it, which allows direct access via
  103. a percpu-variable. */
  104. err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, cpu, &info);
  105. if (err) {
  106. printk(KERN_DEBUG "register_vcpu_info failed: err=%d\n", err);
  107. have_vcpu_info_placement = 0;
  108. } else {
  109. /* This cpu is using the registered vcpu info, even if
  110. later ones fail to. */
  111. per_cpu(xen_vcpu, cpu) = vcpup;
  112. printk(KERN_DEBUG "cpu %d using vcpu_info at %p\n",
  113. cpu, vcpup);
  114. }
  115. }
  116. static void __init xen_banner(void)
  117. {
  118. printk(KERN_INFO "Booting paravirtualized kernel on %s\n",
  119. pv_info.name);
  120. printk(KERN_INFO "Hypervisor signature: %s\n", xen_start_info->magic);
  121. }
  122. static void xen_cpuid(unsigned int *ax, unsigned int *bx,
  123. unsigned int *cx, unsigned int *dx)
  124. {
  125. unsigned maskedx = ~0;
  126. /*
  127. * Mask out inconvenient features, to try and disable as many
  128. * unsupported kernel subsystems as possible.
  129. */
  130. if (*ax == 1)
  131. maskedx = ~((1 << X86_FEATURE_APIC) | /* disable APIC */
  132. (1 << X86_FEATURE_ACPI) | /* disable ACPI */
  133. (1 << X86_FEATURE_ACC)); /* thermal monitoring */
  134. asm(XEN_EMULATE_PREFIX "cpuid"
  135. : "=a" (*ax),
  136. "=b" (*bx),
  137. "=c" (*cx),
  138. "=d" (*dx)
  139. : "0" (*ax), "2" (*cx));
  140. *dx &= maskedx;
  141. }
  142. static void xen_set_debugreg(int reg, unsigned long val)
  143. {
  144. HYPERVISOR_set_debugreg(reg, val);
  145. }
  146. static unsigned long xen_get_debugreg(int reg)
  147. {
  148. return HYPERVISOR_get_debugreg(reg);
  149. }
  150. static unsigned long xen_save_fl(void)
  151. {
  152. struct vcpu_info *vcpu;
  153. unsigned long flags;
  154. vcpu = x86_read_percpu(xen_vcpu);
  155. /* flag has opposite sense of mask */
  156. flags = !vcpu->evtchn_upcall_mask;
  157. /* convert to IF type flag
  158. -0 -> 0x00000000
  159. -1 -> 0xffffffff
  160. */
  161. return (-flags) & X86_EFLAGS_IF;
  162. }
  163. static void xen_restore_fl(unsigned long flags)
  164. {
  165. struct vcpu_info *vcpu;
  166. /* convert from IF type flag */
  167. flags = !(flags & X86_EFLAGS_IF);
  168. /* There's a one instruction preempt window here. We need to
  169. make sure we're don't switch CPUs between getting the vcpu
  170. pointer and updating the mask. */
  171. preempt_disable();
  172. vcpu = x86_read_percpu(xen_vcpu);
  173. vcpu->evtchn_upcall_mask = flags;
  174. preempt_enable_no_resched();
  175. /* Doesn't matter if we get preempted here, because any
  176. pending event will get dealt with anyway. */
  177. if (flags == 0) {
  178. preempt_check_resched();
  179. barrier(); /* unmask then check (avoid races) */
  180. if (unlikely(vcpu->evtchn_upcall_pending))
  181. force_evtchn_callback();
  182. }
  183. }
  184. static void xen_irq_disable(void)
  185. {
  186. /* There's a one instruction preempt window here. We need to
  187. make sure we're don't switch CPUs between getting the vcpu
  188. pointer and updating the mask. */
  189. preempt_disable();
  190. x86_read_percpu(xen_vcpu)->evtchn_upcall_mask = 1;
  191. preempt_enable_no_resched();
  192. }
  193. static void xen_irq_enable(void)
  194. {
  195. struct vcpu_info *vcpu;
  196. /* There's a one instruction preempt window here. We need to
  197. make sure we're don't switch CPUs between getting the vcpu
  198. pointer and updating the mask. */
  199. preempt_disable();
  200. vcpu = x86_read_percpu(xen_vcpu);
  201. vcpu->evtchn_upcall_mask = 0;
  202. preempt_enable_no_resched();
  203. /* Doesn't matter if we get preempted here, because any
  204. pending event will get dealt with anyway. */
  205. barrier(); /* unmask then check (avoid races) */
  206. if (unlikely(vcpu->evtchn_upcall_pending))
  207. force_evtchn_callback();
  208. }
  209. static void xen_safe_halt(void)
  210. {
  211. /* Blocking includes an implicit local_irq_enable(). */
  212. if (HYPERVISOR_sched_op(SCHEDOP_block, 0) != 0)
  213. BUG();
  214. }
  215. static void xen_halt(void)
  216. {
  217. if (irqs_disabled())
  218. HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
  219. else
  220. xen_safe_halt();
  221. }
  222. static void xen_leave_lazy(void)
  223. {
  224. paravirt_leave_lazy(paravirt_get_lazy_mode());
  225. xen_mc_flush();
  226. }
  227. static unsigned long xen_store_tr(void)
  228. {
  229. return 0;
  230. }
  231. static void xen_set_ldt(const void *addr, unsigned entries)
  232. {
  233. unsigned long linear_addr = (unsigned long)addr;
  234. struct mmuext_op *op;
  235. struct multicall_space mcs = xen_mc_entry(sizeof(*op));
  236. op = mcs.args;
  237. op->cmd = MMUEXT_SET_LDT;
  238. if (linear_addr) {
  239. /* ldt my be vmalloced, use arbitrary_virt_to_machine */
  240. xmaddr_t maddr;
  241. maddr = arbitrary_virt_to_machine((unsigned long)addr);
  242. linear_addr = (unsigned long)maddr.maddr;
  243. }
  244. op->arg1.linear_addr = linear_addr;
  245. op->arg2.nr_ents = entries;
  246. MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
  247. xen_mc_issue(PARAVIRT_LAZY_CPU);
  248. }
  249. static void xen_load_gdt(const struct desc_ptr *dtr)
  250. {
  251. unsigned long *frames;
  252. unsigned long va = dtr->address;
  253. unsigned int size = dtr->size + 1;
  254. unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
  255. int f;
  256. struct multicall_space mcs;
  257. /* A GDT can be up to 64k in size, which corresponds to 8192
  258. 8-byte entries, or 16 4k pages.. */
  259. BUG_ON(size > 65536);
  260. BUG_ON(va & ~PAGE_MASK);
  261. mcs = xen_mc_entry(sizeof(*frames) * pages);
  262. frames = mcs.args;
  263. for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
  264. frames[f] = virt_to_mfn(va);
  265. make_lowmem_page_readonly((void *)va);
  266. }
  267. MULTI_set_gdt(mcs.mc, frames, size / sizeof(struct desc_struct));
  268. xen_mc_issue(PARAVIRT_LAZY_CPU);
  269. }
  270. static void load_TLS_descriptor(struct thread_struct *t,
  271. unsigned int cpu, unsigned int i)
  272. {
  273. struct desc_struct *gdt = get_cpu_gdt_table(cpu);
  274. xmaddr_t maddr = virt_to_machine(&gdt[GDT_ENTRY_TLS_MIN+i]);
  275. struct multicall_space mc = __xen_mc_entry(0);
  276. MULTI_update_descriptor(mc.mc, maddr.maddr, t->tls_array[i]);
  277. }
  278. static void xen_load_tls(struct thread_struct *t, unsigned int cpu)
  279. {
  280. xen_mc_batch();
  281. load_TLS_descriptor(t, cpu, 0);
  282. load_TLS_descriptor(t, cpu, 1);
  283. load_TLS_descriptor(t, cpu, 2);
  284. xen_mc_issue(PARAVIRT_LAZY_CPU);
  285. /*
  286. * XXX sleazy hack: If we're being called in a lazy-cpu zone,
  287. * it means we're in a context switch, and %gs has just been
  288. * saved. This means we can zero it out to prevent faults on
  289. * exit from the hypervisor if the next process has no %gs.
  290. * Either way, it has been saved, and the new value will get
  291. * loaded properly. This will go away as soon as Xen has been
  292. * modified to not save/restore %gs for normal hypercalls.
  293. */
  294. if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU)
  295. loadsegment(gs, 0);
  296. }
  297. static void xen_write_ldt_entry(struct desc_struct *dt, int entrynum,
  298. const void *ptr)
  299. {
  300. unsigned long lp = (unsigned long)&dt[entrynum];
  301. xmaddr_t mach_lp = virt_to_machine(lp);
  302. u64 entry = *(u64 *)ptr;
  303. preempt_disable();
  304. xen_mc_flush();
  305. if (HYPERVISOR_update_descriptor(mach_lp.maddr, entry))
  306. BUG();
  307. preempt_enable();
  308. }
  309. static int cvt_gate_to_trap(int vector, u32 low, u32 high,
  310. struct trap_info *info)
  311. {
  312. u8 type, dpl;
  313. type = (high >> 8) & 0x1f;
  314. dpl = (high >> 13) & 3;
  315. if (type != 0xf && type != 0xe)
  316. return 0;
  317. info->vector = vector;
  318. info->address = (high & 0xffff0000) | (low & 0x0000ffff);
  319. info->cs = low >> 16;
  320. info->flags = dpl;
  321. /* interrupt gates clear IF */
  322. if (type == 0xe)
  323. info->flags |= 4;
  324. return 1;
  325. }
  326. /* Locations of each CPU's IDT */
  327. static DEFINE_PER_CPU(struct desc_ptr, idt_desc);
  328. /* Set an IDT entry. If the entry is part of the current IDT, then
  329. also update Xen. */
  330. static void xen_write_idt_entry(gate_desc *dt, int entrynum, const gate_desc *g)
  331. {
  332. unsigned long p = (unsigned long)&dt[entrynum];
  333. unsigned long start, end;
  334. preempt_disable();
  335. start = __get_cpu_var(idt_desc).address;
  336. end = start + __get_cpu_var(idt_desc).size + 1;
  337. xen_mc_flush();
  338. native_write_idt_entry(dt, entrynum, g);
  339. if (p >= start && (p + 8) <= end) {
  340. struct trap_info info[2];
  341. u32 *desc = (u32 *)g;
  342. info[1].address = 0;
  343. if (cvt_gate_to_trap(entrynum, desc[0], desc[1], &info[0]))
  344. if (HYPERVISOR_set_trap_table(info))
  345. BUG();
  346. }
  347. preempt_enable();
  348. }
  349. static void xen_convert_trap_info(const struct desc_ptr *desc,
  350. struct trap_info *traps)
  351. {
  352. unsigned in, out, count;
  353. count = (desc->size+1) / 8;
  354. BUG_ON(count > 256);
  355. for (in = out = 0; in < count; in++) {
  356. const u32 *entry = (u32 *)(desc->address + in * 8);
  357. if (cvt_gate_to_trap(in, entry[0], entry[1], &traps[out]))
  358. out++;
  359. }
  360. traps[out].address = 0;
  361. }
  362. void xen_copy_trap_info(struct trap_info *traps)
  363. {
  364. const struct desc_ptr *desc = &__get_cpu_var(idt_desc);
  365. xen_convert_trap_info(desc, traps);
  366. }
  367. /* Load a new IDT into Xen. In principle this can be per-CPU, so we
  368. hold a spinlock to protect the static traps[] array (static because
  369. it avoids allocation, and saves stack space). */
  370. static void xen_load_idt(const struct desc_ptr *desc)
  371. {
  372. static DEFINE_SPINLOCK(lock);
  373. static struct trap_info traps[257];
  374. spin_lock(&lock);
  375. __get_cpu_var(idt_desc) = *desc;
  376. xen_convert_trap_info(desc, traps);
  377. xen_mc_flush();
  378. if (HYPERVISOR_set_trap_table(traps))
  379. BUG();
  380. spin_unlock(&lock);
  381. }
  382. /* Write a GDT descriptor entry. Ignore LDT descriptors, since
  383. they're handled differently. */
  384. static void xen_write_gdt_entry(struct desc_struct *dt, int entry,
  385. const void *desc, int type)
  386. {
  387. preempt_disable();
  388. switch (type) {
  389. case DESC_LDT:
  390. case DESC_TSS:
  391. /* ignore */
  392. break;
  393. default: {
  394. xmaddr_t maddr = virt_to_machine(&dt[entry]);
  395. xen_mc_flush();
  396. if (HYPERVISOR_update_descriptor(maddr.maddr, *(u64 *)desc))
  397. BUG();
  398. }
  399. }
  400. preempt_enable();
  401. }
  402. static void xen_load_sp0(struct tss_struct *tss,
  403. struct thread_struct *thread)
  404. {
  405. struct multicall_space mcs = xen_mc_entry(0);
  406. MULTI_stack_switch(mcs.mc, __KERNEL_DS, thread->sp0);
  407. xen_mc_issue(PARAVIRT_LAZY_CPU);
  408. }
  409. static void xen_set_iopl_mask(unsigned mask)
  410. {
  411. struct physdev_set_iopl set_iopl;
  412. /* Force the change at ring 0. */
  413. set_iopl.iopl = (mask == 0) ? 1 : (mask >> 12) & 3;
  414. HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);
  415. }
  416. static void xen_io_delay(void)
  417. {
  418. }
  419. #ifdef CONFIG_X86_LOCAL_APIC
  420. static u32 xen_apic_read(unsigned long reg)
  421. {
  422. return 0;
  423. }
  424. static void xen_apic_write(unsigned long reg, u32 val)
  425. {
  426. /* Warn to see if there's any stray references */
  427. WARN_ON(1);
  428. }
  429. #endif
  430. static void xen_flush_tlb(void)
  431. {
  432. struct mmuext_op *op;
  433. struct multicall_space mcs = xen_mc_entry(sizeof(*op));
  434. op = mcs.args;
  435. op->cmd = MMUEXT_TLB_FLUSH_LOCAL;
  436. MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
  437. xen_mc_issue(PARAVIRT_LAZY_MMU);
  438. }
  439. static void xen_flush_tlb_single(unsigned long addr)
  440. {
  441. struct mmuext_op *op;
  442. struct multicall_space mcs = xen_mc_entry(sizeof(*op));
  443. op = mcs.args;
  444. op->cmd = MMUEXT_INVLPG_LOCAL;
  445. op->arg1.linear_addr = addr & PAGE_MASK;
  446. MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
  447. xen_mc_issue(PARAVIRT_LAZY_MMU);
  448. }
  449. static void xen_flush_tlb_others(const cpumask_t *cpus, struct mm_struct *mm,
  450. unsigned long va)
  451. {
  452. struct {
  453. struct mmuext_op op;
  454. cpumask_t mask;
  455. } *args;
  456. cpumask_t cpumask = *cpus;
  457. struct multicall_space mcs;
  458. /*
  459. * A couple of (to be removed) sanity checks:
  460. *
  461. * - current CPU must not be in mask
  462. * - mask must exist :)
  463. */
  464. BUG_ON(cpus_empty(cpumask));
  465. BUG_ON(cpu_isset(smp_processor_id(), cpumask));
  466. BUG_ON(!mm);
  467. /* If a CPU which we ran on has gone down, OK. */
  468. cpus_and(cpumask, cpumask, cpu_online_map);
  469. if (cpus_empty(cpumask))
  470. return;
  471. mcs = xen_mc_entry(sizeof(*args));
  472. args = mcs.args;
  473. args->mask = cpumask;
  474. args->op.arg2.vcpumask = &args->mask;
  475. if (va == TLB_FLUSH_ALL) {
  476. args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
  477. } else {
  478. args->op.cmd = MMUEXT_INVLPG_MULTI;
  479. args->op.arg1.linear_addr = va;
  480. }
  481. MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF);
  482. xen_mc_issue(PARAVIRT_LAZY_MMU);
  483. }
  484. static void xen_write_cr2(unsigned long cr2)
  485. {
  486. x86_read_percpu(xen_vcpu)->arch.cr2 = cr2;
  487. }
  488. static unsigned long xen_read_cr2(void)
  489. {
  490. return x86_read_percpu(xen_vcpu)->arch.cr2;
  491. }
  492. static unsigned long xen_read_cr2_direct(void)
  493. {
  494. return x86_read_percpu(xen_vcpu_info.arch.cr2);
  495. }
  496. static void xen_write_cr4(unsigned long cr4)
  497. {
  498. /* Just ignore cr4 changes; Xen doesn't allow us to do
  499. anything anyway. */
  500. }
  501. static unsigned long xen_read_cr3(void)
  502. {
  503. return x86_read_percpu(xen_cr3);
  504. }
  505. static void set_current_cr3(void *v)
  506. {
  507. x86_write_percpu(xen_current_cr3, (unsigned long)v);
  508. }
  509. static void xen_write_cr3(unsigned long cr3)
  510. {
  511. struct mmuext_op *op;
  512. struct multicall_space mcs;
  513. unsigned long mfn = pfn_to_mfn(PFN_DOWN(cr3));
  514. BUG_ON(preemptible());
  515. mcs = xen_mc_entry(sizeof(*op)); /* disables interrupts */
  516. /* Update while interrupts are disabled, so its atomic with
  517. respect to ipis */
  518. x86_write_percpu(xen_cr3, cr3);
  519. op = mcs.args;
  520. op->cmd = MMUEXT_NEW_BASEPTR;
  521. op->arg1.mfn = mfn;
  522. MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
  523. /* Update xen_update_cr3 once the batch has actually
  524. been submitted. */
  525. xen_mc_callback(set_current_cr3, (void *)cr3);
  526. xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */
  527. }
  528. /* Early in boot, while setting up the initial pagetable, assume
  529. everything is pinned. */
  530. static __init void xen_alloc_pt_init(struct mm_struct *mm, u32 pfn)
  531. {
  532. BUG_ON(mem_map); /* should only be used early */
  533. make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
  534. }
  535. static void pin_pagetable_pfn(unsigned level, unsigned long pfn)
  536. {
  537. struct mmuext_op op;
  538. op.cmd = level;
  539. op.arg1.mfn = pfn_to_mfn(pfn);
  540. if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
  541. BUG();
  542. }
  543. /* This needs to make sure the new pte page is pinned iff its being
  544. attached to a pinned pagetable. */
  545. static void xen_alloc_pt(struct mm_struct *mm, u32 pfn)
  546. {
  547. struct page *page = pfn_to_page(pfn);
  548. if (PagePinned(virt_to_page(mm->pgd))) {
  549. SetPagePinned(page);
  550. if (!PageHighMem(page)) {
  551. make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
  552. pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
  553. } else
  554. /* make sure there are no stray mappings of
  555. this page */
  556. kmap_flush_unused();
  557. }
  558. }
  559. /* This should never happen until we're OK to use struct page */
  560. static void xen_release_pt(u32 pfn)
  561. {
  562. struct page *page = pfn_to_page(pfn);
  563. if (PagePinned(page)) {
  564. if (!PageHighMem(page)) {
  565. pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
  566. make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
  567. }
  568. }
  569. }
  570. #ifdef CONFIG_HIGHPTE
  571. static void *xen_kmap_atomic_pte(struct page *page, enum km_type type)
  572. {
  573. pgprot_t prot = PAGE_KERNEL;
  574. if (PagePinned(page))
  575. prot = PAGE_KERNEL_RO;
  576. if (0 && PageHighMem(page))
  577. printk("mapping highpte %lx type %d prot %s\n",
  578. page_to_pfn(page), type,
  579. (unsigned long)pgprot_val(prot) & _PAGE_RW ? "WRITE" : "READ");
  580. return kmap_atomic_prot(page, type, prot);
  581. }
  582. #endif
  583. static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte)
  584. {
  585. /* If there's an existing pte, then don't allow _PAGE_RW to be set */
  586. if (pte_val_ma(*ptep) & _PAGE_PRESENT)
  587. pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
  588. pte_val_ma(pte));
  589. return pte;
  590. }
  591. /* Init-time set_pte while constructing initial pagetables, which
  592. doesn't allow RO pagetable pages to be remapped RW */
  593. static __init void xen_set_pte_init(pte_t *ptep, pte_t pte)
  594. {
  595. pte = mask_rw_pte(ptep, pte);
  596. xen_set_pte(ptep, pte);
  597. }
  598. static __init void xen_pagetable_setup_start(pgd_t *base)
  599. {
  600. pgd_t *xen_pgd = (pgd_t *)xen_start_info->pt_base;
  601. /* special set_pte for pagetable initialization */
  602. pv_mmu_ops.set_pte = xen_set_pte_init;
  603. init_mm.pgd = base;
  604. /*
  605. * copy top-level of Xen-supplied pagetable into place. For
  606. * !PAE we can use this as-is, but for PAE it is a stand-in
  607. * while we copy the pmd pages.
  608. */
  609. memcpy(base, xen_pgd, PTRS_PER_PGD * sizeof(pgd_t));
  610. if (PTRS_PER_PMD > 1) {
  611. int i;
  612. /*
  613. * For PAE, need to allocate new pmds, rather than
  614. * share Xen's, since Xen doesn't like pmd's being
  615. * shared between address spaces.
  616. */
  617. for (i = 0; i < PTRS_PER_PGD; i++) {
  618. if (pgd_val_ma(xen_pgd[i]) & _PAGE_PRESENT) {
  619. pmd_t *pmd = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE);
  620. memcpy(pmd, (void *)pgd_page_vaddr(xen_pgd[i]),
  621. PAGE_SIZE);
  622. make_lowmem_page_readonly(pmd);
  623. set_pgd(&base[i], __pgd(1 + __pa(pmd)));
  624. } else
  625. pgd_clear(&base[i]);
  626. }
  627. }
  628. /* make sure zero_page is mapped RO so we can use it in pagetables */
  629. make_lowmem_page_readonly(empty_zero_page);
  630. make_lowmem_page_readonly(base);
  631. /*
  632. * Switch to new pagetable. This is done before
  633. * pagetable_init has done anything so that the new pages
  634. * added to the table can be prepared properly for Xen.
  635. */
  636. xen_write_cr3(__pa(base));
  637. }
  638. static __init void xen_pagetable_setup_done(pgd_t *base)
  639. {
  640. /* This will work as long as patching hasn't happened yet
  641. (which it hasn't) */
  642. pv_mmu_ops.alloc_pt = xen_alloc_pt;
  643. pv_mmu_ops.set_pte = xen_set_pte;
  644. if (!xen_feature(XENFEAT_auto_translated_physmap)) {
  645. /*
  646. * Create a mapping for the shared info page.
  647. * Should be set_fixmap(), but shared_info is a machine
  648. * address with no corresponding pseudo-phys address.
  649. */
  650. set_pte_mfn(fix_to_virt(FIX_PARAVIRT_BOOTMAP),
  651. PFN_DOWN(xen_start_info->shared_info),
  652. PAGE_KERNEL);
  653. HYPERVISOR_shared_info =
  654. (struct shared_info *)fix_to_virt(FIX_PARAVIRT_BOOTMAP);
  655. } else
  656. HYPERVISOR_shared_info =
  657. (struct shared_info *)__va(xen_start_info->shared_info);
  658. /* Actually pin the pagetable down, but we can't set PG_pinned
  659. yet because the page structures don't exist yet. */
  660. {
  661. unsigned level;
  662. #ifdef CONFIG_X86_PAE
  663. level = MMUEXT_PIN_L3_TABLE;
  664. #else
  665. level = MMUEXT_PIN_L2_TABLE;
  666. #endif
  667. pin_pagetable_pfn(level, PFN_DOWN(__pa(base)));
  668. }
  669. }
  670. /* This is called once we have the cpu_possible_map */
  671. void __init xen_setup_vcpu_info_placement(void)
  672. {
  673. int cpu;
  674. for_each_possible_cpu(cpu)
  675. xen_vcpu_setup(cpu);
  676. /* xen_vcpu_setup managed to place the vcpu_info within the
  677. percpu area for all cpus, so make use of it */
  678. if (have_vcpu_info_placement) {
  679. printk(KERN_INFO "Xen: using vcpu_info placement\n");
  680. pv_irq_ops.save_fl = xen_save_fl_direct;
  681. pv_irq_ops.restore_fl = xen_restore_fl_direct;
  682. pv_irq_ops.irq_disable = xen_irq_disable_direct;
  683. pv_irq_ops.irq_enable = xen_irq_enable_direct;
  684. pv_mmu_ops.read_cr2 = xen_read_cr2_direct;
  685. pv_cpu_ops.iret = xen_iret_direct;
  686. }
  687. }
  688. static unsigned xen_patch(u8 type, u16 clobbers, void *insnbuf,
  689. unsigned long addr, unsigned len)
  690. {
  691. char *start, *end, *reloc;
  692. unsigned ret;
  693. start = end = reloc = NULL;
  694. #define SITE(op, x) \
  695. case PARAVIRT_PATCH(op.x): \
  696. if (have_vcpu_info_placement) { \
  697. start = (char *)xen_##x##_direct; \
  698. end = xen_##x##_direct_end; \
  699. reloc = xen_##x##_direct_reloc; \
  700. } \
  701. goto patch_site
  702. switch (type) {
  703. SITE(pv_irq_ops, irq_enable);
  704. SITE(pv_irq_ops, irq_disable);
  705. SITE(pv_irq_ops, save_fl);
  706. SITE(pv_irq_ops, restore_fl);
  707. #undef SITE
  708. patch_site:
  709. if (start == NULL || (end-start) > len)
  710. goto default_patch;
  711. ret = paravirt_patch_insns(insnbuf, len, start, end);
  712. /* Note: because reloc is assigned from something that
  713. appears to be an array, gcc assumes it's non-null,
  714. but doesn't know its relationship with start and
  715. end. */
  716. if (reloc > start && reloc < end) {
  717. int reloc_off = reloc - start;
  718. long *relocp = (long *)(insnbuf + reloc_off);
  719. long delta = start - (char *)addr;
  720. *relocp += delta;
  721. }
  722. break;
  723. default_patch:
  724. default:
  725. ret = paravirt_patch_default(type, clobbers, insnbuf,
  726. addr, len);
  727. break;
  728. }
  729. return ret;
  730. }
  731. static const struct pv_info xen_info __initdata = {
  732. .paravirt_enabled = 1,
  733. .shared_kernel_pmd = 0,
  734. .name = "Xen",
  735. };
  736. static const struct pv_init_ops xen_init_ops __initdata = {
  737. .patch = xen_patch,
  738. .banner = xen_banner,
  739. .memory_setup = xen_memory_setup,
  740. .arch_setup = xen_arch_setup,
  741. .post_allocator_init = xen_mark_init_mm_pinned,
  742. };
  743. static const struct pv_time_ops xen_time_ops __initdata = {
  744. .time_init = xen_time_init,
  745. .set_wallclock = xen_set_wallclock,
  746. .get_wallclock = xen_get_wallclock,
  747. .get_cpu_khz = xen_cpu_khz,
  748. .sched_clock = xen_sched_clock,
  749. };
  750. static const struct pv_cpu_ops xen_cpu_ops __initdata = {
  751. .cpuid = xen_cpuid,
  752. .set_debugreg = xen_set_debugreg,
  753. .get_debugreg = xen_get_debugreg,
  754. .clts = native_clts,
  755. .read_cr0 = native_read_cr0,
  756. .write_cr0 = native_write_cr0,
  757. .read_cr4 = native_read_cr4,
  758. .read_cr4_safe = native_read_cr4_safe,
  759. .write_cr4 = xen_write_cr4,
  760. .wbinvd = native_wbinvd,
  761. .read_msr = native_read_msr_safe,
  762. .write_msr = native_write_msr_safe,
  763. .read_tsc = native_read_tsc,
  764. .read_pmc = native_read_pmc,
  765. .iret = (void *)&hypercall_page[__HYPERVISOR_iret],
  766. .irq_enable_syscall_ret = NULL, /* never called */
  767. .load_tr_desc = paravirt_nop,
  768. .set_ldt = xen_set_ldt,
  769. .load_gdt = xen_load_gdt,
  770. .load_idt = xen_load_idt,
  771. .load_tls = xen_load_tls,
  772. .store_gdt = native_store_gdt,
  773. .store_idt = native_store_idt,
  774. .store_tr = xen_store_tr,
  775. .write_ldt_entry = xen_write_ldt_entry,
  776. .write_gdt_entry = xen_write_gdt_entry,
  777. .write_idt_entry = xen_write_idt_entry,
  778. .load_sp0 = xen_load_sp0,
  779. .set_iopl_mask = xen_set_iopl_mask,
  780. .io_delay = xen_io_delay,
  781. .lazy_mode = {
  782. .enter = paravirt_enter_lazy_cpu,
  783. .leave = xen_leave_lazy,
  784. },
  785. };
  786. static const struct pv_irq_ops xen_irq_ops __initdata = {
  787. .init_IRQ = xen_init_IRQ,
  788. .save_fl = xen_save_fl,
  789. .restore_fl = xen_restore_fl,
  790. .irq_disable = xen_irq_disable,
  791. .irq_enable = xen_irq_enable,
  792. .safe_halt = xen_safe_halt,
  793. .halt = xen_halt,
  794. };
  795. static const struct pv_apic_ops xen_apic_ops __initdata = {
  796. #ifdef CONFIG_X86_LOCAL_APIC
  797. .apic_write = xen_apic_write,
  798. .apic_write_atomic = xen_apic_write,
  799. .apic_read = xen_apic_read,
  800. .setup_boot_clock = paravirt_nop,
  801. .setup_secondary_clock = paravirt_nop,
  802. .startup_ipi_hook = paravirt_nop,
  803. #endif
  804. };
  805. static const struct pv_mmu_ops xen_mmu_ops __initdata = {
  806. .pagetable_setup_start = xen_pagetable_setup_start,
  807. .pagetable_setup_done = xen_pagetable_setup_done,
  808. .read_cr2 = xen_read_cr2,
  809. .write_cr2 = xen_write_cr2,
  810. .read_cr3 = xen_read_cr3,
  811. .write_cr3 = xen_write_cr3,
  812. .flush_tlb_user = xen_flush_tlb,
  813. .flush_tlb_kernel = xen_flush_tlb,
  814. .flush_tlb_single = xen_flush_tlb_single,
  815. .flush_tlb_others = xen_flush_tlb_others,
  816. .pte_update = paravirt_nop,
  817. .pte_update_defer = paravirt_nop,
  818. .alloc_pt = xen_alloc_pt_init,
  819. .release_pt = xen_release_pt,
  820. .alloc_pd = paravirt_nop,
  821. .alloc_pd_clone = paravirt_nop,
  822. .release_pd = paravirt_nop,
  823. #ifdef CONFIG_HIGHPTE
  824. .kmap_atomic_pte = xen_kmap_atomic_pte,
  825. #endif
  826. .set_pte = NULL, /* see xen_pagetable_setup_* */
  827. .set_pte_at = xen_set_pte_at,
  828. .set_pmd = xen_set_pmd,
  829. .pte_val = xen_pte_val,
  830. .pgd_val = xen_pgd_val,
  831. .make_pte = xen_make_pte,
  832. .make_pgd = xen_make_pgd,
  833. #ifdef CONFIG_X86_PAE
  834. .set_pte_atomic = xen_set_pte_atomic,
  835. .set_pte_present = xen_set_pte_at,
  836. .set_pud = xen_set_pud,
  837. .pte_clear = xen_pte_clear,
  838. .pmd_clear = xen_pmd_clear,
  839. .make_pmd = xen_make_pmd,
  840. .pmd_val = xen_pmd_val,
  841. #endif /* PAE */
  842. .activate_mm = xen_activate_mm,
  843. .dup_mmap = xen_dup_mmap,
  844. .exit_mmap = xen_exit_mmap,
  845. .lazy_mode = {
  846. .enter = paravirt_enter_lazy_mmu,
  847. .leave = xen_leave_lazy,
  848. },
  849. };
  850. #ifdef CONFIG_SMP
  851. static const struct smp_ops xen_smp_ops __initdata = {
  852. .smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu,
  853. .smp_prepare_cpus = xen_smp_prepare_cpus,
  854. .cpu_up = xen_cpu_up,
  855. .smp_cpus_done = xen_smp_cpus_done,
  856. .smp_send_stop = xen_smp_send_stop,
  857. .smp_send_reschedule = xen_smp_send_reschedule,
  858. .smp_call_function_mask = xen_smp_call_function_mask,
  859. };
  860. #endif /* CONFIG_SMP */
  861. static void xen_reboot(int reason)
  862. {
  863. #ifdef CONFIG_SMP
  864. smp_send_stop();
  865. #endif
  866. if (HYPERVISOR_sched_op(SCHEDOP_shutdown, reason))
  867. BUG();
  868. }
  869. static void xen_restart(char *msg)
  870. {
  871. xen_reboot(SHUTDOWN_reboot);
  872. }
  873. static void xen_emergency_restart(void)
  874. {
  875. xen_reboot(SHUTDOWN_reboot);
  876. }
  877. static void xen_machine_halt(void)
  878. {
  879. xen_reboot(SHUTDOWN_poweroff);
  880. }
  881. static void xen_crash_shutdown(struct pt_regs *regs)
  882. {
  883. xen_reboot(SHUTDOWN_crash);
  884. }
  885. static const struct machine_ops __initdata xen_machine_ops = {
  886. .restart = xen_restart,
  887. .halt = xen_machine_halt,
  888. .power_off = xen_machine_halt,
  889. .shutdown = xen_machine_halt,
  890. .crash_shutdown = xen_crash_shutdown,
  891. .emergency_restart = xen_emergency_restart,
  892. };
  893. static void __init xen_reserve_top(void)
  894. {
  895. unsigned long top = HYPERVISOR_VIRT_START;
  896. struct xen_platform_parameters pp;
  897. if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0)
  898. top = pp.virt_start;
  899. reserve_top_address(-top + 2 * PAGE_SIZE);
  900. }
  901. /* First C function to be called on Xen boot */
  902. asmlinkage void __init xen_start_kernel(void)
  903. {
  904. pgd_t *pgd;
  905. if (!xen_start_info)
  906. return;
  907. BUG_ON(memcmp(xen_start_info->magic, "xen-3", 5) != 0);
  908. /* Install Xen paravirt ops */
  909. pv_info = xen_info;
  910. pv_init_ops = xen_init_ops;
  911. pv_time_ops = xen_time_ops;
  912. pv_cpu_ops = xen_cpu_ops;
  913. pv_irq_ops = xen_irq_ops;
  914. pv_apic_ops = xen_apic_ops;
  915. pv_mmu_ops = xen_mmu_ops;
  916. machine_ops = xen_machine_ops;
  917. #ifdef CONFIG_SMP
  918. smp_ops = xen_smp_ops;
  919. #endif
  920. xen_setup_features();
  921. /* Get mfn list */
  922. if (!xen_feature(XENFEAT_auto_translated_physmap))
  923. phys_to_machine_mapping = (unsigned long *)xen_start_info->mfn_list;
  924. pgd = (pgd_t *)xen_start_info->pt_base;
  925. init_pg_tables_end = __pa(pgd) + xen_start_info->nr_pt_frames*PAGE_SIZE;
  926. init_mm.pgd = pgd; /* use the Xen pagetables to start */
  927. /* keep using Xen gdt for now; no urgent need to change it */
  928. x86_write_percpu(xen_cr3, __pa(pgd));
  929. x86_write_percpu(xen_current_cr3, __pa(pgd));
  930. #ifdef CONFIG_SMP
  931. /* Don't do the full vcpu_info placement stuff until we have a
  932. possible map. */
  933. per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0];
  934. #else
  935. /* May as well do it now, since there's no good time to call
  936. it later on UP. */
  937. xen_setup_vcpu_info_placement();
  938. #endif
  939. pv_info.kernel_rpl = 1;
  940. if (xen_feature(XENFEAT_supervisor_mode_kernel))
  941. pv_info.kernel_rpl = 0;
  942. /* set the limit of our address space */
  943. xen_reserve_top();
  944. /* set up basic CPUID stuff */
  945. cpu_detect(&new_cpu_data);
  946. new_cpu_data.hard_math = 1;
  947. new_cpu_data.x86_capability[0] = cpuid_edx(1);
  948. /* Poke various useful things into boot_params */
  949. boot_params.hdr.type_of_loader = (9 << 4) | 0;
  950. boot_params.hdr.ramdisk_image = xen_start_info->mod_start
  951. ? __pa(xen_start_info->mod_start) : 0;
  952. boot_params.hdr.ramdisk_size = xen_start_info->mod_len;
  953. /* Start the world */
  954. start_kernel();
  955. }