book3s_hv.c 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536
  1. /*
  2. * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
  3. * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
  4. *
  5. * Authors:
  6. * Paul Mackerras <paulus@au1.ibm.com>
  7. * Alexander Graf <agraf@suse.de>
  8. * Kevin Wolf <mail@kevin-wolf.de>
  9. *
  10. * Description: KVM functions specific to running on Book 3S
  11. * processors in hypervisor mode (specifically POWER7 and later).
  12. *
  13. * This file is derived from arch/powerpc/kvm/book3s.c,
  14. * by Alexander Graf <agraf@suse.de>.
  15. *
  16. * This program is free software; you can redistribute it and/or modify
  17. * it under the terms of the GNU General Public License, version 2, as
  18. * published by the Free Software Foundation.
  19. */
  20. #include <linux/kvm_host.h>
  21. #include <linux/err.h>
  22. #include <linux/slab.h>
  23. #include <linux/preempt.h>
  24. #include <linux/sched.h>
  25. #include <linux/delay.h>
  26. #include <linux/export.h>
  27. #include <linux/fs.h>
  28. #include <linux/anon_inodes.h>
  29. #include <linux/cpumask.h>
  30. #include <linux/spinlock.h>
  31. #include <linux/page-flags.h>
  32. #include <asm/reg.h>
  33. #include <asm/cputable.h>
  34. #include <asm/cacheflush.h>
  35. #include <asm/tlbflush.h>
  36. #include <asm/uaccess.h>
  37. #include <asm/io.h>
  38. #include <asm/kvm_ppc.h>
  39. #include <asm/kvm_book3s.h>
  40. #include <asm/mmu_context.h>
  41. #include <asm/lppaca.h>
  42. #include <asm/processor.h>
  43. #include <asm/cputhreads.h>
  44. #include <asm/page.h>
  45. #include <asm/hvcall.h>
  46. #include <asm/switch_to.h>
  47. #include <linux/gfp.h>
  48. #include <linux/vmalloc.h>
  49. #include <linux/highmem.h>
  50. #include <linux/hugetlb.h>
  51. /* #define EXIT_DEBUG */
  52. /* #define EXIT_DEBUG_SIMPLE */
  53. /* #define EXIT_DEBUG_INT */
  54. static void kvmppc_end_cede(struct kvm_vcpu *vcpu);
  55. static int kvmppc_hv_setup_rma(struct kvm_vcpu *vcpu);
  56. void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
  57. {
  58. struct kvmppc_vcore *vc = vcpu->arch.vcore;
  59. local_paca->kvm_hstate.kvm_vcpu = vcpu;
  60. local_paca->kvm_hstate.kvm_vcore = vc;
  61. if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE)
  62. vc->stolen_tb += mftb() - vc->preempt_tb;
  63. }
  64. void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
  65. {
  66. struct kvmppc_vcore *vc = vcpu->arch.vcore;
  67. if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE)
  68. vc->preempt_tb = mftb();
  69. }
  70. void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
  71. {
  72. vcpu->arch.shregs.msr = msr;
  73. kvmppc_end_cede(vcpu);
  74. }
  75. void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr)
  76. {
  77. vcpu->arch.pvr = pvr;
  78. }
  79. void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
  80. {
  81. int r;
  82. pr_err("vcpu %p (%d):\n", vcpu, vcpu->vcpu_id);
  83. pr_err("pc = %.16lx msr = %.16llx trap = %x\n",
  84. vcpu->arch.pc, vcpu->arch.shregs.msr, vcpu->arch.trap);
  85. for (r = 0; r < 16; ++r)
  86. pr_err("r%2d = %.16lx r%d = %.16lx\n",
  87. r, kvmppc_get_gpr(vcpu, r),
  88. r+16, kvmppc_get_gpr(vcpu, r+16));
  89. pr_err("ctr = %.16lx lr = %.16lx\n",
  90. vcpu->arch.ctr, vcpu->arch.lr);
  91. pr_err("srr0 = %.16llx srr1 = %.16llx\n",
  92. vcpu->arch.shregs.srr0, vcpu->arch.shregs.srr1);
  93. pr_err("sprg0 = %.16llx sprg1 = %.16llx\n",
  94. vcpu->arch.shregs.sprg0, vcpu->arch.shregs.sprg1);
  95. pr_err("sprg2 = %.16llx sprg3 = %.16llx\n",
  96. vcpu->arch.shregs.sprg2, vcpu->arch.shregs.sprg3);
  97. pr_err("cr = %.8x xer = %.16lx dsisr = %.8x\n",
  98. vcpu->arch.cr, vcpu->arch.xer, vcpu->arch.shregs.dsisr);
  99. pr_err("dar = %.16llx\n", vcpu->arch.shregs.dar);
  100. pr_err("fault dar = %.16lx dsisr = %.8x\n",
  101. vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
  102. pr_err("SLB (%d entries):\n", vcpu->arch.slb_max);
  103. for (r = 0; r < vcpu->arch.slb_max; ++r)
  104. pr_err(" ESID = %.16llx VSID = %.16llx\n",
  105. vcpu->arch.slb[r].orige, vcpu->arch.slb[r].origv);
  106. pr_err("lpcr = %.16lx sdr1 = %.16lx last_inst = %.8x\n",
  107. vcpu->kvm->arch.lpcr, vcpu->kvm->arch.sdr1,
  108. vcpu->arch.last_inst);
  109. }
  110. struct kvm_vcpu *kvmppc_find_vcpu(struct kvm *kvm, int id)
  111. {
  112. int r;
  113. struct kvm_vcpu *v, *ret = NULL;
  114. mutex_lock(&kvm->lock);
  115. kvm_for_each_vcpu(r, v, kvm) {
  116. if (v->vcpu_id == id) {
  117. ret = v;
  118. break;
  119. }
  120. }
  121. mutex_unlock(&kvm->lock);
  122. return ret;
  123. }
  124. static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa)
  125. {
  126. vpa->shared_proc = 1;
  127. vpa->yield_count = 1;
  128. }
  129. /* Length for a per-processor buffer is passed in at offset 4 in the buffer */
  130. struct reg_vpa {
  131. u32 dummy;
  132. union {
  133. u16 hword;
  134. u32 word;
  135. } length;
  136. };
  137. static int vpa_is_registered(struct kvmppc_vpa *vpap)
  138. {
  139. if (vpap->update_pending)
  140. return vpap->next_gpa != 0;
  141. return vpap->pinned_addr != NULL;
  142. }
  143. static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu,
  144. unsigned long flags,
  145. unsigned long vcpuid, unsigned long vpa)
  146. {
  147. struct kvm *kvm = vcpu->kvm;
  148. unsigned long len, nb;
  149. void *va;
  150. struct kvm_vcpu *tvcpu;
  151. int err;
  152. int subfunc;
  153. struct kvmppc_vpa *vpap;
  154. tvcpu = kvmppc_find_vcpu(kvm, vcpuid);
  155. if (!tvcpu)
  156. return H_PARAMETER;
  157. subfunc = (flags >> H_VPA_FUNC_SHIFT) & H_VPA_FUNC_MASK;
  158. if (subfunc == H_VPA_REG_VPA || subfunc == H_VPA_REG_DTL ||
  159. subfunc == H_VPA_REG_SLB) {
  160. /* Registering new area - address must be cache-line aligned */
  161. if ((vpa & (L1_CACHE_BYTES - 1)) || !vpa)
  162. return H_PARAMETER;
  163. /* convert logical addr to kernel addr and read length */
  164. va = kvmppc_pin_guest_page(kvm, vpa, &nb);
  165. if (va == NULL)
  166. return H_PARAMETER;
  167. if (subfunc == H_VPA_REG_VPA)
  168. len = ((struct reg_vpa *)va)->length.hword;
  169. else
  170. len = ((struct reg_vpa *)va)->length.word;
  171. kvmppc_unpin_guest_page(kvm, va);
  172. /* Check length */
  173. if (len > nb || len < sizeof(struct reg_vpa))
  174. return H_PARAMETER;
  175. } else {
  176. vpa = 0;
  177. len = 0;
  178. }
  179. err = H_PARAMETER;
  180. vpap = NULL;
  181. spin_lock(&tvcpu->arch.vpa_update_lock);
  182. switch (subfunc) {
  183. case H_VPA_REG_VPA: /* register VPA */
  184. if (len < sizeof(struct lppaca))
  185. break;
  186. vpap = &tvcpu->arch.vpa;
  187. err = 0;
  188. break;
  189. case H_VPA_REG_DTL: /* register DTL */
  190. if (len < sizeof(struct dtl_entry))
  191. break;
  192. len -= len % sizeof(struct dtl_entry);
  193. /* Check that they have previously registered a VPA */
  194. err = H_RESOURCE;
  195. if (!vpa_is_registered(&tvcpu->arch.vpa))
  196. break;
  197. vpap = &tvcpu->arch.dtl;
  198. err = 0;
  199. break;
  200. case H_VPA_REG_SLB: /* register SLB shadow buffer */
  201. /* Check that they have previously registered a VPA */
  202. err = H_RESOURCE;
  203. if (!vpa_is_registered(&tvcpu->arch.vpa))
  204. break;
  205. vpap = &tvcpu->arch.slb_shadow;
  206. err = 0;
  207. break;
  208. case H_VPA_DEREG_VPA: /* deregister VPA */
  209. /* Check they don't still have a DTL or SLB buf registered */
  210. err = H_RESOURCE;
  211. if (vpa_is_registered(&tvcpu->arch.dtl) ||
  212. vpa_is_registered(&tvcpu->arch.slb_shadow))
  213. break;
  214. vpap = &tvcpu->arch.vpa;
  215. err = 0;
  216. break;
  217. case H_VPA_DEREG_DTL: /* deregister DTL */
  218. vpap = &tvcpu->arch.dtl;
  219. err = 0;
  220. break;
  221. case H_VPA_DEREG_SLB: /* deregister SLB shadow buffer */
  222. vpap = &tvcpu->arch.slb_shadow;
  223. err = 0;
  224. break;
  225. }
  226. if (vpap) {
  227. vpap->next_gpa = vpa;
  228. vpap->len = len;
  229. vpap->update_pending = 1;
  230. }
  231. spin_unlock(&tvcpu->arch.vpa_update_lock);
  232. return err;
  233. }
  234. static void kvmppc_update_vpa(struct kvm *kvm, struct kvmppc_vpa *vpap)
  235. {
  236. void *va;
  237. unsigned long nb;
  238. vpap->update_pending = 0;
  239. va = NULL;
  240. if (vpap->next_gpa) {
  241. va = kvmppc_pin_guest_page(kvm, vpap->next_gpa, &nb);
  242. if (nb < vpap->len) {
  243. /*
  244. * If it's now too short, it must be that userspace
  245. * has changed the mappings underlying guest memory,
  246. * so unregister the region.
  247. */
  248. kvmppc_unpin_guest_page(kvm, va);
  249. va = NULL;
  250. }
  251. }
  252. if (vpap->pinned_addr)
  253. kvmppc_unpin_guest_page(kvm, vpap->pinned_addr);
  254. vpap->pinned_addr = va;
  255. if (va)
  256. vpap->pinned_end = va + vpap->len;
  257. }
  258. static void kvmppc_update_vpas(struct kvm_vcpu *vcpu)
  259. {
  260. struct kvm *kvm = vcpu->kvm;
  261. spin_lock(&vcpu->arch.vpa_update_lock);
  262. if (vcpu->arch.vpa.update_pending) {
  263. kvmppc_update_vpa(kvm, &vcpu->arch.vpa);
  264. init_vpa(vcpu, vcpu->arch.vpa.pinned_addr);
  265. }
  266. if (vcpu->arch.dtl.update_pending) {
  267. kvmppc_update_vpa(kvm, &vcpu->arch.dtl);
  268. vcpu->arch.dtl_ptr = vcpu->arch.dtl.pinned_addr;
  269. vcpu->arch.dtl_index = 0;
  270. }
  271. if (vcpu->arch.slb_shadow.update_pending)
  272. kvmppc_update_vpa(kvm, &vcpu->arch.slb_shadow);
  273. spin_unlock(&vcpu->arch.vpa_update_lock);
  274. }
  275. static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu,
  276. struct kvmppc_vcore *vc)
  277. {
  278. struct dtl_entry *dt;
  279. struct lppaca *vpa;
  280. unsigned long old_stolen;
  281. dt = vcpu->arch.dtl_ptr;
  282. vpa = vcpu->arch.vpa.pinned_addr;
  283. old_stolen = vcpu->arch.stolen_logged;
  284. vcpu->arch.stolen_logged = vc->stolen_tb;
  285. if (!dt || !vpa)
  286. return;
  287. memset(dt, 0, sizeof(struct dtl_entry));
  288. dt->dispatch_reason = 7;
  289. dt->processor_id = vc->pcpu + vcpu->arch.ptid;
  290. dt->timebase = mftb();
  291. dt->enqueue_to_dispatch_time = vc->stolen_tb - old_stolen;
  292. dt->srr0 = kvmppc_get_pc(vcpu);
  293. dt->srr1 = vcpu->arch.shregs.msr;
  294. ++dt;
  295. if (dt == vcpu->arch.dtl.pinned_end)
  296. dt = vcpu->arch.dtl.pinned_addr;
  297. vcpu->arch.dtl_ptr = dt;
  298. /* order writing *dt vs. writing vpa->dtl_idx */
  299. smp_wmb();
  300. vpa->dtl_idx = ++vcpu->arch.dtl_index;
  301. }
  302. int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
  303. {
  304. unsigned long req = kvmppc_get_gpr(vcpu, 3);
  305. unsigned long target, ret = H_SUCCESS;
  306. struct kvm_vcpu *tvcpu;
  307. switch (req) {
  308. case H_ENTER:
  309. ret = kvmppc_virtmode_h_enter(vcpu, kvmppc_get_gpr(vcpu, 4),
  310. kvmppc_get_gpr(vcpu, 5),
  311. kvmppc_get_gpr(vcpu, 6),
  312. kvmppc_get_gpr(vcpu, 7));
  313. break;
  314. case H_CEDE:
  315. break;
  316. case H_PROD:
  317. target = kvmppc_get_gpr(vcpu, 4);
  318. tvcpu = kvmppc_find_vcpu(vcpu->kvm, target);
  319. if (!tvcpu) {
  320. ret = H_PARAMETER;
  321. break;
  322. }
  323. tvcpu->arch.prodded = 1;
  324. smp_mb();
  325. if (vcpu->arch.ceded) {
  326. if (waitqueue_active(&vcpu->wq)) {
  327. wake_up_interruptible(&vcpu->wq);
  328. vcpu->stat.halt_wakeup++;
  329. }
  330. }
  331. break;
  332. case H_CONFER:
  333. break;
  334. case H_REGISTER_VPA:
  335. ret = do_h_register_vpa(vcpu, kvmppc_get_gpr(vcpu, 4),
  336. kvmppc_get_gpr(vcpu, 5),
  337. kvmppc_get_gpr(vcpu, 6));
  338. break;
  339. default:
  340. return RESUME_HOST;
  341. }
  342. kvmppc_set_gpr(vcpu, 3, ret);
  343. vcpu->arch.hcall_needed = 0;
  344. return RESUME_GUEST;
  345. }
  346. static int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
  347. struct task_struct *tsk)
  348. {
  349. int r = RESUME_HOST;
  350. vcpu->stat.sum_exits++;
  351. run->exit_reason = KVM_EXIT_UNKNOWN;
  352. run->ready_for_interrupt_injection = 1;
  353. switch (vcpu->arch.trap) {
  354. /* We're good on these - the host merely wanted to get our attention */
  355. case BOOK3S_INTERRUPT_HV_DECREMENTER:
  356. vcpu->stat.dec_exits++;
  357. r = RESUME_GUEST;
  358. break;
  359. case BOOK3S_INTERRUPT_EXTERNAL:
  360. vcpu->stat.ext_intr_exits++;
  361. r = RESUME_GUEST;
  362. break;
  363. case BOOK3S_INTERRUPT_PERFMON:
  364. r = RESUME_GUEST;
  365. break;
  366. case BOOK3S_INTERRUPT_PROGRAM:
  367. {
  368. ulong flags;
  369. /*
  370. * Normally program interrupts are delivered directly
  371. * to the guest by the hardware, but we can get here
  372. * as a result of a hypervisor emulation interrupt
  373. * (e40) getting turned into a 700 by BML RTAS.
  374. */
  375. flags = vcpu->arch.shregs.msr & 0x1f0000ull;
  376. kvmppc_core_queue_program(vcpu, flags);
  377. r = RESUME_GUEST;
  378. break;
  379. }
  380. case BOOK3S_INTERRUPT_SYSCALL:
  381. {
  382. /* hcall - punt to userspace */
  383. int i;
  384. if (vcpu->arch.shregs.msr & MSR_PR) {
  385. /* sc 1 from userspace - reflect to guest syscall */
  386. kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_SYSCALL);
  387. r = RESUME_GUEST;
  388. break;
  389. }
  390. run->papr_hcall.nr = kvmppc_get_gpr(vcpu, 3);
  391. for (i = 0; i < 9; ++i)
  392. run->papr_hcall.args[i] = kvmppc_get_gpr(vcpu, 4 + i);
  393. run->exit_reason = KVM_EXIT_PAPR_HCALL;
  394. vcpu->arch.hcall_needed = 1;
  395. r = RESUME_HOST;
  396. break;
  397. }
  398. /*
  399. * We get these next two if the guest accesses a page which it thinks
  400. * it has mapped but which is not actually present, either because
  401. * it is for an emulated I/O device or because the corresonding
  402. * host page has been paged out. Any other HDSI/HISI interrupts
  403. * have been handled already.
  404. */
  405. case BOOK3S_INTERRUPT_H_DATA_STORAGE:
  406. r = kvmppc_book3s_hv_page_fault(run, vcpu,
  407. vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
  408. break;
  409. case BOOK3S_INTERRUPT_H_INST_STORAGE:
  410. r = kvmppc_book3s_hv_page_fault(run, vcpu,
  411. kvmppc_get_pc(vcpu), 0);
  412. break;
  413. /*
  414. * This occurs if the guest executes an illegal instruction.
  415. * We just generate a program interrupt to the guest, since
  416. * we don't emulate any guest instructions at this stage.
  417. */
  418. case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
  419. kvmppc_core_queue_program(vcpu, 0x80000);
  420. r = RESUME_GUEST;
  421. break;
  422. default:
  423. kvmppc_dump_regs(vcpu);
  424. printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n",
  425. vcpu->arch.trap, kvmppc_get_pc(vcpu),
  426. vcpu->arch.shregs.msr);
  427. r = RESUME_HOST;
  428. BUG();
  429. break;
  430. }
  431. return r;
  432. }
  433. int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
  434. struct kvm_sregs *sregs)
  435. {
  436. int i;
  437. sregs->pvr = vcpu->arch.pvr;
  438. memset(sregs, 0, sizeof(struct kvm_sregs));
  439. for (i = 0; i < vcpu->arch.slb_max; i++) {
  440. sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige;
  441. sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv;
  442. }
  443. return 0;
  444. }
  445. int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
  446. struct kvm_sregs *sregs)
  447. {
  448. int i, j;
  449. kvmppc_set_pvr(vcpu, sregs->pvr);
  450. j = 0;
  451. for (i = 0; i < vcpu->arch.slb_nr; i++) {
  452. if (sregs->u.s.ppc64.slb[i].slbe & SLB_ESID_V) {
  453. vcpu->arch.slb[j].orige = sregs->u.s.ppc64.slb[i].slbe;
  454. vcpu->arch.slb[j].origv = sregs->u.s.ppc64.slb[i].slbv;
  455. ++j;
  456. }
  457. }
  458. vcpu->arch.slb_max = j;
  459. return 0;
  460. }
  461. int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
  462. {
  463. int r = -EINVAL;
  464. switch (reg->id) {
  465. case KVM_REG_PPC_HIOR:
  466. r = put_user(0, (u64 __user *)reg->addr);
  467. break;
  468. default:
  469. break;
  470. }
  471. return r;
  472. }
  473. int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
  474. {
  475. int r = -EINVAL;
  476. switch (reg->id) {
  477. case KVM_REG_PPC_HIOR:
  478. {
  479. u64 hior;
  480. /* Only allow this to be set to zero */
  481. r = get_user(hior, (u64 __user *)reg->addr);
  482. if (!r && (hior != 0))
  483. r = -EINVAL;
  484. break;
  485. }
  486. default:
  487. break;
  488. }
  489. return r;
  490. }
  491. int kvmppc_core_check_processor_compat(void)
  492. {
  493. if (cpu_has_feature(CPU_FTR_HVMODE))
  494. return 0;
  495. return -EIO;
  496. }
  497. struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
  498. {
  499. struct kvm_vcpu *vcpu;
  500. int err = -EINVAL;
  501. int core;
  502. struct kvmppc_vcore *vcore;
  503. core = id / threads_per_core;
  504. if (core >= KVM_MAX_VCORES)
  505. goto out;
  506. err = -ENOMEM;
  507. vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
  508. if (!vcpu)
  509. goto out;
  510. err = kvm_vcpu_init(vcpu, kvm, id);
  511. if (err)
  512. goto free_vcpu;
  513. vcpu->arch.shared = &vcpu->arch.shregs;
  514. vcpu->arch.last_cpu = -1;
  515. vcpu->arch.mmcr[0] = MMCR0_FC;
  516. vcpu->arch.ctrl = CTRL_RUNLATCH;
  517. /* default to host PVR, since we can't spoof it */
  518. vcpu->arch.pvr = mfspr(SPRN_PVR);
  519. kvmppc_set_pvr(vcpu, vcpu->arch.pvr);
  520. spin_lock_init(&vcpu->arch.vpa_update_lock);
  521. kvmppc_mmu_book3s_hv_init(vcpu);
  522. /*
  523. * We consider the vcpu stopped until we see the first run ioctl for it.
  524. */
  525. vcpu->arch.state = KVMPPC_VCPU_STOPPED;
  526. init_waitqueue_head(&vcpu->arch.cpu_run);
  527. mutex_lock(&kvm->lock);
  528. vcore = kvm->arch.vcores[core];
  529. if (!vcore) {
  530. vcore = kzalloc(sizeof(struct kvmppc_vcore), GFP_KERNEL);
  531. if (vcore) {
  532. INIT_LIST_HEAD(&vcore->runnable_threads);
  533. spin_lock_init(&vcore->lock);
  534. init_waitqueue_head(&vcore->wq);
  535. vcore->preempt_tb = mftb();
  536. }
  537. kvm->arch.vcores[core] = vcore;
  538. }
  539. mutex_unlock(&kvm->lock);
  540. if (!vcore)
  541. goto free_vcpu;
  542. spin_lock(&vcore->lock);
  543. ++vcore->num_threads;
  544. spin_unlock(&vcore->lock);
  545. vcpu->arch.vcore = vcore;
  546. vcpu->arch.stolen_logged = vcore->stolen_tb;
  547. vcpu->arch.cpu_type = KVM_CPU_3S_64;
  548. kvmppc_sanity_check(vcpu);
  549. return vcpu;
  550. free_vcpu:
  551. kmem_cache_free(kvm_vcpu_cache, vcpu);
  552. out:
  553. return ERR_PTR(err);
  554. }
  555. void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
  556. {
  557. spin_lock(&vcpu->arch.vpa_update_lock);
  558. if (vcpu->arch.dtl.pinned_addr)
  559. kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.dtl.pinned_addr);
  560. if (vcpu->arch.slb_shadow.pinned_addr)
  561. kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.slb_shadow.pinned_addr);
  562. if (vcpu->arch.vpa.pinned_addr)
  563. kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.vpa.pinned_addr);
  564. spin_unlock(&vcpu->arch.vpa_update_lock);
  565. kvm_vcpu_uninit(vcpu);
  566. kmem_cache_free(kvm_vcpu_cache, vcpu);
  567. }
  568. static void kvmppc_set_timer(struct kvm_vcpu *vcpu)
  569. {
  570. unsigned long dec_nsec, now;
  571. now = get_tb();
  572. if (now > vcpu->arch.dec_expires) {
  573. /* decrementer has already gone negative */
  574. kvmppc_core_queue_dec(vcpu);
  575. kvmppc_core_prepare_to_enter(vcpu);
  576. return;
  577. }
  578. dec_nsec = (vcpu->arch.dec_expires - now) * NSEC_PER_SEC
  579. / tb_ticks_per_sec;
  580. hrtimer_start(&vcpu->arch.dec_timer, ktime_set(0, dec_nsec),
  581. HRTIMER_MODE_REL);
  582. vcpu->arch.timer_running = 1;
  583. }
  584. static void kvmppc_end_cede(struct kvm_vcpu *vcpu)
  585. {
  586. vcpu->arch.ceded = 0;
  587. if (vcpu->arch.timer_running) {
  588. hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
  589. vcpu->arch.timer_running = 0;
  590. }
  591. }
  592. extern int __kvmppc_vcore_entry(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
  593. extern void xics_wake_cpu(int cpu);
  594. static void kvmppc_remove_runnable(struct kvmppc_vcore *vc,
  595. struct kvm_vcpu *vcpu)
  596. {
  597. struct kvm_vcpu *v;
  598. if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE)
  599. return;
  600. vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
  601. --vc->n_runnable;
  602. ++vc->n_busy;
  603. /* decrement the physical thread id of each following vcpu */
  604. v = vcpu;
  605. list_for_each_entry_continue(v, &vc->runnable_threads, arch.run_list)
  606. --v->arch.ptid;
  607. list_del(&vcpu->arch.run_list);
  608. }
  609. static int kvmppc_grab_hwthread(int cpu)
  610. {
  611. struct paca_struct *tpaca;
  612. long timeout = 1000;
  613. tpaca = &paca[cpu];
  614. /* Ensure the thread won't go into the kernel if it wakes */
  615. tpaca->kvm_hstate.hwthread_req = 1;
  616. /*
  617. * If the thread is already executing in the kernel (e.g. handling
  618. * a stray interrupt), wait for it to get back to nap mode.
  619. * The smp_mb() is to ensure that our setting of hwthread_req
  620. * is visible before we look at hwthread_state, so if this
  621. * races with the code at system_reset_pSeries and the thread
  622. * misses our setting of hwthread_req, we are sure to see its
  623. * setting of hwthread_state, and vice versa.
  624. */
  625. smp_mb();
  626. while (tpaca->kvm_hstate.hwthread_state == KVM_HWTHREAD_IN_KERNEL) {
  627. if (--timeout <= 0) {
  628. pr_err("KVM: couldn't grab cpu %d\n", cpu);
  629. return -EBUSY;
  630. }
  631. udelay(1);
  632. }
  633. return 0;
  634. }
  635. static void kvmppc_release_hwthread(int cpu)
  636. {
  637. struct paca_struct *tpaca;
  638. tpaca = &paca[cpu];
  639. tpaca->kvm_hstate.hwthread_req = 0;
  640. tpaca->kvm_hstate.kvm_vcpu = NULL;
  641. }
  642. static void kvmppc_start_thread(struct kvm_vcpu *vcpu)
  643. {
  644. int cpu;
  645. struct paca_struct *tpaca;
  646. struct kvmppc_vcore *vc = vcpu->arch.vcore;
  647. if (vcpu->arch.timer_running) {
  648. hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
  649. vcpu->arch.timer_running = 0;
  650. }
  651. cpu = vc->pcpu + vcpu->arch.ptid;
  652. tpaca = &paca[cpu];
  653. tpaca->kvm_hstate.kvm_vcpu = vcpu;
  654. tpaca->kvm_hstate.kvm_vcore = vc;
  655. tpaca->kvm_hstate.napping = 0;
  656. vcpu->cpu = vc->pcpu;
  657. smp_wmb();
  658. #if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP)
  659. if (vcpu->arch.ptid) {
  660. kvmppc_grab_hwthread(cpu);
  661. xics_wake_cpu(cpu);
  662. ++vc->n_woken;
  663. }
  664. #endif
  665. }
  666. static void kvmppc_wait_for_nap(struct kvmppc_vcore *vc)
  667. {
  668. int i;
  669. HMT_low();
  670. i = 0;
  671. while (vc->nap_count < vc->n_woken) {
  672. if (++i >= 1000000) {
  673. pr_err("kvmppc_wait_for_nap timeout %d %d\n",
  674. vc->nap_count, vc->n_woken);
  675. break;
  676. }
  677. cpu_relax();
  678. }
  679. HMT_medium();
  680. }
  681. /*
  682. * Check that we are on thread 0 and that any other threads in
  683. * this core are off-line.
  684. */
  685. static int on_primary_thread(void)
  686. {
  687. int cpu = smp_processor_id();
  688. int thr = cpu_thread_in_core(cpu);
  689. if (thr)
  690. return 0;
  691. while (++thr < threads_per_core)
  692. if (cpu_online(cpu + thr))
  693. return 0;
  694. return 1;
  695. }
  696. /*
  697. * Run a set of guest threads on a physical core.
  698. * Called with vc->lock held.
  699. */
  700. static int kvmppc_run_core(struct kvmppc_vcore *vc)
  701. {
  702. struct kvm_vcpu *vcpu, *vcpu0, *vnext;
  703. long ret;
  704. u64 now;
  705. int ptid, i;
  706. /* don't start if any threads have a signal pending */
  707. list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
  708. if (signal_pending(vcpu->arch.run_task))
  709. return 0;
  710. /*
  711. * Make sure we are running on thread 0, and that
  712. * secondary threads are offline.
  713. * XXX we should also block attempts to bring any
  714. * secondary threads online.
  715. */
  716. if (threads_per_core > 1 && !on_primary_thread()) {
  717. list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
  718. vcpu->arch.ret = -EBUSY;
  719. goto out;
  720. }
  721. /*
  722. * Assign physical thread IDs, first to non-ceded vcpus
  723. * and then to ceded ones.
  724. */
  725. ptid = 0;
  726. vcpu0 = NULL;
  727. list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
  728. if (!vcpu->arch.ceded) {
  729. if (!ptid)
  730. vcpu0 = vcpu;
  731. vcpu->arch.ptid = ptid++;
  732. }
  733. }
  734. if (!vcpu0)
  735. return 0; /* nothing to run */
  736. list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
  737. if (vcpu->arch.ceded)
  738. vcpu->arch.ptid = ptid++;
  739. vc->n_woken = 0;
  740. vc->nap_count = 0;
  741. vc->entry_exit_count = 0;
  742. vc->vcore_state = VCORE_RUNNING;
  743. vc->stolen_tb += mftb() - vc->preempt_tb;
  744. vc->in_guest = 0;
  745. vc->pcpu = smp_processor_id();
  746. vc->napping_threads = 0;
  747. list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
  748. kvmppc_start_thread(vcpu);
  749. if (vcpu->arch.vpa.update_pending ||
  750. vcpu->arch.slb_shadow.update_pending ||
  751. vcpu->arch.dtl.update_pending)
  752. kvmppc_update_vpas(vcpu);
  753. kvmppc_create_dtl_entry(vcpu, vc);
  754. }
  755. /* Grab any remaining hw threads so they can't go into the kernel */
  756. for (i = ptid; i < threads_per_core; ++i)
  757. kvmppc_grab_hwthread(vc->pcpu + i);
  758. preempt_disable();
  759. spin_unlock(&vc->lock);
  760. kvm_guest_enter();
  761. __kvmppc_vcore_entry(NULL, vcpu0);
  762. for (i = 0; i < threads_per_core; ++i)
  763. kvmppc_release_hwthread(vc->pcpu + i);
  764. spin_lock(&vc->lock);
  765. /* disable sending of IPIs on virtual external irqs */
  766. list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
  767. vcpu->cpu = -1;
  768. /* wait for secondary threads to finish writing their state to memory */
  769. if (vc->nap_count < vc->n_woken)
  770. kvmppc_wait_for_nap(vc);
  771. /* prevent other vcpu threads from doing kvmppc_start_thread() now */
  772. vc->vcore_state = VCORE_EXITING;
  773. spin_unlock(&vc->lock);
  774. /* make sure updates to secondary vcpu structs are visible now */
  775. smp_mb();
  776. kvm_guest_exit();
  777. preempt_enable();
  778. kvm_resched(vcpu);
  779. now = get_tb();
  780. list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
  781. /* cancel pending dec exception if dec is positive */
  782. if (now < vcpu->arch.dec_expires &&
  783. kvmppc_core_pending_dec(vcpu))
  784. kvmppc_core_dequeue_dec(vcpu);
  785. ret = RESUME_GUEST;
  786. if (vcpu->arch.trap)
  787. ret = kvmppc_handle_exit(vcpu->arch.kvm_run, vcpu,
  788. vcpu->arch.run_task);
  789. vcpu->arch.ret = ret;
  790. vcpu->arch.trap = 0;
  791. if (vcpu->arch.ceded) {
  792. if (ret != RESUME_GUEST)
  793. kvmppc_end_cede(vcpu);
  794. else
  795. kvmppc_set_timer(vcpu);
  796. }
  797. }
  798. spin_lock(&vc->lock);
  799. out:
  800. vc->vcore_state = VCORE_INACTIVE;
  801. vc->preempt_tb = mftb();
  802. list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads,
  803. arch.run_list) {
  804. if (vcpu->arch.ret != RESUME_GUEST) {
  805. kvmppc_remove_runnable(vc, vcpu);
  806. wake_up(&vcpu->arch.cpu_run);
  807. }
  808. }
  809. return 1;
  810. }
  811. /*
  812. * Wait for some other vcpu thread to execute us, and
  813. * wake us up when we need to handle something in the host.
  814. */
  815. static void kvmppc_wait_for_exec(struct kvm_vcpu *vcpu, int wait_state)
  816. {
  817. DEFINE_WAIT(wait);
  818. prepare_to_wait(&vcpu->arch.cpu_run, &wait, wait_state);
  819. if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE)
  820. schedule();
  821. finish_wait(&vcpu->arch.cpu_run, &wait);
  822. }
  823. /*
  824. * All the vcpus in this vcore are idle, so wait for a decrementer
  825. * or external interrupt to one of the vcpus. vc->lock is held.
  826. */
  827. static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
  828. {
  829. DEFINE_WAIT(wait);
  830. struct kvm_vcpu *v;
  831. int all_idle = 1;
  832. prepare_to_wait(&vc->wq, &wait, TASK_INTERRUPTIBLE);
  833. vc->vcore_state = VCORE_SLEEPING;
  834. spin_unlock(&vc->lock);
  835. list_for_each_entry(v, &vc->runnable_threads, arch.run_list) {
  836. if (!v->arch.ceded || v->arch.pending_exceptions) {
  837. all_idle = 0;
  838. break;
  839. }
  840. }
  841. if (all_idle)
  842. schedule();
  843. finish_wait(&vc->wq, &wait);
  844. spin_lock(&vc->lock);
  845. vc->vcore_state = VCORE_INACTIVE;
  846. }
  847. static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
  848. {
  849. int n_ceded;
  850. int prev_state;
  851. struct kvmppc_vcore *vc;
  852. struct kvm_vcpu *v, *vn;
  853. kvm_run->exit_reason = 0;
  854. vcpu->arch.ret = RESUME_GUEST;
  855. vcpu->arch.trap = 0;
  856. /*
  857. * Synchronize with other threads in this virtual core
  858. */
  859. vc = vcpu->arch.vcore;
  860. spin_lock(&vc->lock);
  861. vcpu->arch.ceded = 0;
  862. vcpu->arch.run_task = current;
  863. vcpu->arch.kvm_run = kvm_run;
  864. prev_state = vcpu->arch.state;
  865. vcpu->arch.state = KVMPPC_VCPU_RUNNABLE;
  866. list_add_tail(&vcpu->arch.run_list, &vc->runnable_threads);
  867. ++vc->n_runnable;
  868. /*
  869. * This happens the first time this is called for a vcpu.
  870. * If the vcore is already running, we may be able to start
  871. * this thread straight away and have it join in.
  872. */
  873. if (prev_state == KVMPPC_VCPU_STOPPED) {
  874. if (vc->vcore_state == VCORE_RUNNING &&
  875. VCORE_EXIT_COUNT(vc) == 0) {
  876. vcpu->arch.ptid = vc->n_runnable - 1;
  877. kvmppc_start_thread(vcpu);
  878. }
  879. } else if (prev_state == KVMPPC_VCPU_BUSY_IN_HOST)
  880. --vc->n_busy;
  881. while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE &&
  882. !signal_pending(current)) {
  883. if (vc->n_busy || vc->vcore_state != VCORE_INACTIVE) {
  884. spin_unlock(&vc->lock);
  885. kvmppc_wait_for_exec(vcpu, TASK_INTERRUPTIBLE);
  886. spin_lock(&vc->lock);
  887. continue;
  888. }
  889. vc->runner = vcpu;
  890. n_ceded = 0;
  891. list_for_each_entry(v, &vc->runnable_threads, arch.run_list)
  892. n_ceded += v->arch.ceded;
  893. if (n_ceded == vc->n_runnable)
  894. kvmppc_vcore_blocked(vc);
  895. else
  896. kvmppc_run_core(vc);
  897. list_for_each_entry_safe(v, vn, &vc->runnable_threads,
  898. arch.run_list) {
  899. kvmppc_core_prepare_to_enter(v);
  900. if (signal_pending(v->arch.run_task)) {
  901. kvmppc_remove_runnable(vc, v);
  902. v->stat.signal_exits++;
  903. v->arch.kvm_run->exit_reason = KVM_EXIT_INTR;
  904. v->arch.ret = -EINTR;
  905. wake_up(&v->arch.cpu_run);
  906. }
  907. }
  908. vc->runner = NULL;
  909. }
  910. if (signal_pending(current)) {
  911. if (vc->vcore_state == VCORE_RUNNING ||
  912. vc->vcore_state == VCORE_EXITING) {
  913. spin_unlock(&vc->lock);
  914. kvmppc_wait_for_exec(vcpu, TASK_UNINTERRUPTIBLE);
  915. spin_lock(&vc->lock);
  916. }
  917. if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) {
  918. kvmppc_remove_runnable(vc, vcpu);
  919. vcpu->stat.signal_exits++;
  920. kvm_run->exit_reason = KVM_EXIT_INTR;
  921. vcpu->arch.ret = -EINTR;
  922. }
  923. }
  924. spin_unlock(&vc->lock);
  925. return vcpu->arch.ret;
  926. }
  927. int kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
  928. {
  929. int r;
  930. if (!vcpu->arch.sane) {
  931. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  932. return -EINVAL;
  933. }
  934. kvmppc_core_prepare_to_enter(vcpu);
  935. /* No need to go into the guest when all we'll do is come back out */
  936. if (signal_pending(current)) {
  937. run->exit_reason = KVM_EXIT_INTR;
  938. return -EINTR;
  939. }
  940. /* On the first time here, set up VRMA or RMA */
  941. if (!vcpu->kvm->arch.rma_setup_done) {
  942. r = kvmppc_hv_setup_rma(vcpu);
  943. if (r)
  944. return r;
  945. }
  946. flush_fp_to_thread(current);
  947. flush_altivec_to_thread(current);
  948. flush_vsx_to_thread(current);
  949. vcpu->arch.wqp = &vcpu->arch.vcore->wq;
  950. vcpu->arch.pgdir = current->mm->pgd;
  951. do {
  952. r = kvmppc_run_vcpu(run, vcpu);
  953. if (run->exit_reason == KVM_EXIT_PAPR_HCALL &&
  954. !(vcpu->arch.shregs.msr & MSR_PR)) {
  955. r = kvmppc_pseries_do_hcall(vcpu);
  956. kvmppc_core_prepare_to_enter(vcpu);
  957. }
  958. } while (r == RESUME_GUEST);
  959. return r;
  960. }
  961. /* Work out RMLS (real mode limit selector) field value for a given RMA size.
  962. Assumes POWER7 or PPC970. */
  963. static inline int lpcr_rmls(unsigned long rma_size)
  964. {
  965. switch (rma_size) {
  966. case 32ul << 20: /* 32 MB */
  967. if (cpu_has_feature(CPU_FTR_ARCH_206))
  968. return 8; /* only supported on POWER7 */
  969. return -1;
  970. case 64ul << 20: /* 64 MB */
  971. return 3;
  972. case 128ul << 20: /* 128 MB */
  973. return 7;
  974. case 256ul << 20: /* 256 MB */
  975. return 4;
  976. case 1ul << 30: /* 1 GB */
  977. return 2;
  978. case 16ul << 30: /* 16 GB */
  979. return 1;
  980. case 256ul << 30: /* 256 GB */
  981. return 0;
  982. default:
  983. return -1;
  984. }
  985. }
  986. static int kvm_rma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  987. {
  988. struct kvmppc_linear_info *ri = vma->vm_file->private_data;
  989. struct page *page;
  990. if (vmf->pgoff >= ri->npages)
  991. return VM_FAULT_SIGBUS;
  992. page = pfn_to_page(ri->base_pfn + vmf->pgoff);
  993. get_page(page);
  994. vmf->page = page;
  995. return 0;
  996. }
  997. static const struct vm_operations_struct kvm_rma_vm_ops = {
  998. .fault = kvm_rma_fault,
  999. };
  1000. static int kvm_rma_mmap(struct file *file, struct vm_area_struct *vma)
  1001. {
  1002. vma->vm_flags |= VM_RESERVED;
  1003. vma->vm_ops = &kvm_rma_vm_ops;
  1004. return 0;
  1005. }
  1006. static int kvm_rma_release(struct inode *inode, struct file *filp)
  1007. {
  1008. struct kvmppc_linear_info *ri = filp->private_data;
  1009. kvm_release_rma(ri);
  1010. return 0;
  1011. }
  1012. static struct file_operations kvm_rma_fops = {
  1013. .mmap = kvm_rma_mmap,
  1014. .release = kvm_rma_release,
  1015. };
  1016. long kvm_vm_ioctl_allocate_rma(struct kvm *kvm, struct kvm_allocate_rma *ret)
  1017. {
  1018. struct kvmppc_linear_info *ri;
  1019. long fd;
  1020. ri = kvm_alloc_rma();
  1021. if (!ri)
  1022. return -ENOMEM;
  1023. fd = anon_inode_getfd("kvm-rma", &kvm_rma_fops, ri, O_RDWR);
  1024. if (fd < 0)
  1025. kvm_release_rma(ri);
  1026. ret->rma_size = ri->npages << PAGE_SHIFT;
  1027. return fd;
  1028. }
  1029. static void kvmppc_add_seg_page_size(struct kvm_ppc_one_seg_page_size **sps,
  1030. int linux_psize)
  1031. {
  1032. struct mmu_psize_def *def = &mmu_psize_defs[linux_psize];
  1033. if (!def->shift)
  1034. return;
  1035. (*sps)->page_shift = def->shift;
  1036. (*sps)->slb_enc = def->sllp;
  1037. (*sps)->enc[0].page_shift = def->shift;
  1038. (*sps)->enc[0].pte_enc = def->penc;
  1039. (*sps)++;
  1040. }
  1041. int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm, struct kvm_ppc_smmu_info *info)
  1042. {
  1043. struct kvm_ppc_one_seg_page_size *sps;
  1044. info->flags = KVM_PPC_PAGE_SIZES_REAL;
  1045. if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
  1046. info->flags |= KVM_PPC_1T_SEGMENTS;
  1047. info->slb_size = mmu_slb_size;
  1048. /* We only support these sizes for now, and no muti-size segments */
  1049. sps = &info->sps[0];
  1050. kvmppc_add_seg_page_size(&sps, MMU_PAGE_4K);
  1051. kvmppc_add_seg_page_size(&sps, MMU_PAGE_64K);
  1052. kvmppc_add_seg_page_size(&sps, MMU_PAGE_16M);
  1053. return 0;
  1054. }
  1055. /*
  1056. * Get (and clear) the dirty memory log for a memory slot.
  1057. */
  1058. int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
  1059. {
  1060. struct kvm_memory_slot *memslot;
  1061. int r;
  1062. unsigned long n;
  1063. mutex_lock(&kvm->slots_lock);
  1064. r = -EINVAL;
  1065. if (log->slot >= KVM_MEMORY_SLOTS)
  1066. goto out;
  1067. memslot = id_to_memslot(kvm->memslots, log->slot);
  1068. r = -ENOENT;
  1069. if (!memslot->dirty_bitmap)
  1070. goto out;
  1071. n = kvm_dirty_bitmap_bytes(memslot);
  1072. memset(memslot->dirty_bitmap, 0, n);
  1073. r = kvmppc_hv_get_dirty_log(kvm, memslot);
  1074. if (r)
  1075. goto out;
  1076. r = -EFAULT;
  1077. if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
  1078. goto out;
  1079. r = 0;
  1080. out:
  1081. mutex_unlock(&kvm->slots_lock);
  1082. return r;
  1083. }
  1084. static unsigned long slb_pgsize_encoding(unsigned long psize)
  1085. {
  1086. unsigned long senc = 0;
  1087. if (psize > 0x1000) {
  1088. senc = SLB_VSID_L;
  1089. if (psize == 0x10000)
  1090. senc |= SLB_VSID_LP_01;
  1091. }
  1092. return senc;
  1093. }
  1094. int kvmppc_core_prepare_memory_region(struct kvm *kvm,
  1095. struct kvm_userspace_memory_region *mem)
  1096. {
  1097. unsigned long npages;
  1098. unsigned long *phys;
  1099. /* Allocate a slot_phys array */
  1100. phys = kvm->arch.slot_phys[mem->slot];
  1101. if (!kvm->arch.using_mmu_notifiers && !phys) {
  1102. npages = mem->memory_size >> PAGE_SHIFT;
  1103. phys = vzalloc(npages * sizeof(unsigned long));
  1104. if (!phys)
  1105. return -ENOMEM;
  1106. kvm->arch.slot_phys[mem->slot] = phys;
  1107. kvm->arch.slot_npages[mem->slot] = npages;
  1108. }
  1109. return 0;
  1110. }
  1111. static void unpin_slot(struct kvm *kvm, int slot_id)
  1112. {
  1113. unsigned long *physp;
  1114. unsigned long j, npages, pfn;
  1115. struct page *page;
  1116. physp = kvm->arch.slot_phys[slot_id];
  1117. npages = kvm->arch.slot_npages[slot_id];
  1118. if (physp) {
  1119. spin_lock(&kvm->arch.slot_phys_lock);
  1120. for (j = 0; j < npages; j++) {
  1121. if (!(physp[j] & KVMPPC_GOT_PAGE))
  1122. continue;
  1123. pfn = physp[j] >> PAGE_SHIFT;
  1124. page = pfn_to_page(pfn);
  1125. SetPageDirty(page);
  1126. put_page(page);
  1127. }
  1128. kvm->arch.slot_phys[slot_id] = NULL;
  1129. spin_unlock(&kvm->arch.slot_phys_lock);
  1130. vfree(physp);
  1131. }
  1132. }
  1133. void kvmppc_core_commit_memory_region(struct kvm *kvm,
  1134. struct kvm_userspace_memory_region *mem)
  1135. {
  1136. }
  1137. static int kvmppc_hv_setup_rma(struct kvm_vcpu *vcpu)
  1138. {
  1139. int err = 0;
  1140. struct kvm *kvm = vcpu->kvm;
  1141. struct kvmppc_linear_info *ri = NULL;
  1142. unsigned long hva;
  1143. struct kvm_memory_slot *memslot;
  1144. struct vm_area_struct *vma;
  1145. unsigned long lpcr, senc;
  1146. unsigned long psize, porder;
  1147. unsigned long rma_size;
  1148. unsigned long rmls;
  1149. unsigned long *physp;
  1150. unsigned long i, npages;
  1151. mutex_lock(&kvm->lock);
  1152. if (kvm->arch.rma_setup_done)
  1153. goto out; /* another vcpu beat us to it */
  1154. /* Look up the memslot for guest physical address 0 */
  1155. memslot = gfn_to_memslot(kvm, 0);
  1156. /* We must have some memory at 0 by now */
  1157. err = -EINVAL;
  1158. if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
  1159. goto out;
  1160. /* Look up the VMA for the start of this memory slot */
  1161. hva = memslot->userspace_addr;
  1162. down_read(&current->mm->mmap_sem);
  1163. vma = find_vma(current->mm, hva);
  1164. if (!vma || vma->vm_start > hva || (vma->vm_flags & VM_IO))
  1165. goto up_out;
  1166. psize = vma_kernel_pagesize(vma);
  1167. porder = __ilog2(psize);
  1168. /* Is this one of our preallocated RMAs? */
  1169. if (vma->vm_file && vma->vm_file->f_op == &kvm_rma_fops &&
  1170. hva == vma->vm_start)
  1171. ri = vma->vm_file->private_data;
  1172. up_read(&current->mm->mmap_sem);
  1173. if (!ri) {
  1174. /* On POWER7, use VRMA; on PPC970, give up */
  1175. err = -EPERM;
  1176. if (cpu_has_feature(CPU_FTR_ARCH_201)) {
  1177. pr_err("KVM: CPU requires an RMO\n");
  1178. goto out;
  1179. }
  1180. /* We can handle 4k, 64k or 16M pages in the VRMA */
  1181. err = -EINVAL;
  1182. if (!(psize == 0x1000 || psize == 0x10000 ||
  1183. psize == 0x1000000))
  1184. goto out;
  1185. /* Update VRMASD field in the LPCR */
  1186. senc = slb_pgsize_encoding(psize);
  1187. kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T |
  1188. (VRMA_VSID << SLB_VSID_SHIFT_1T);
  1189. lpcr = kvm->arch.lpcr & ~LPCR_VRMASD;
  1190. lpcr |= senc << (LPCR_VRMASD_SH - 4);
  1191. kvm->arch.lpcr = lpcr;
  1192. /* Create HPTEs in the hash page table for the VRMA */
  1193. kvmppc_map_vrma(vcpu, memslot, porder);
  1194. } else {
  1195. /* Set up to use an RMO region */
  1196. rma_size = ri->npages;
  1197. if (rma_size > memslot->npages)
  1198. rma_size = memslot->npages;
  1199. rma_size <<= PAGE_SHIFT;
  1200. rmls = lpcr_rmls(rma_size);
  1201. err = -EINVAL;
  1202. if (rmls < 0) {
  1203. pr_err("KVM: Can't use RMA of 0x%lx bytes\n", rma_size);
  1204. goto out;
  1205. }
  1206. atomic_inc(&ri->use_count);
  1207. kvm->arch.rma = ri;
  1208. /* Update LPCR and RMOR */
  1209. lpcr = kvm->arch.lpcr;
  1210. if (cpu_has_feature(CPU_FTR_ARCH_201)) {
  1211. /* PPC970; insert RMLS value (split field) in HID4 */
  1212. lpcr &= ~((1ul << HID4_RMLS0_SH) |
  1213. (3ul << HID4_RMLS2_SH));
  1214. lpcr |= ((rmls >> 2) << HID4_RMLS0_SH) |
  1215. ((rmls & 3) << HID4_RMLS2_SH);
  1216. /* RMOR is also in HID4 */
  1217. lpcr |= ((ri->base_pfn >> (26 - PAGE_SHIFT)) & 0xffff)
  1218. << HID4_RMOR_SH;
  1219. } else {
  1220. /* POWER7 */
  1221. lpcr &= ~(LPCR_VPM0 | LPCR_VRMA_L);
  1222. lpcr |= rmls << LPCR_RMLS_SH;
  1223. kvm->arch.rmor = kvm->arch.rma->base_pfn << PAGE_SHIFT;
  1224. }
  1225. kvm->arch.lpcr = lpcr;
  1226. pr_info("KVM: Using RMO at %lx size %lx (LPCR = %lx)\n",
  1227. ri->base_pfn << PAGE_SHIFT, rma_size, lpcr);
  1228. /* Initialize phys addrs of pages in RMO */
  1229. npages = ri->npages;
  1230. porder = __ilog2(npages);
  1231. physp = kvm->arch.slot_phys[memslot->id];
  1232. spin_lock(&kvm->arch.slot_phys_lock);
  1233. for (i = 0; i < npages; ++i)
  1234. physp[i] = ((ri->base_pfn + i) << PAGE_SHIFT) + porder;
  1235. spin_unlock(&kvm->arch.slot_phys_lock);
  1236. }
  1237. /* Order updates to kvm->arch.lpcr etc. vs. rma_setup_done */
  1238. smp_wmb();
  1239. kvm->arch.rma_setup_done = 1;
  1240. err = 0;
  1241. out:
  1242. mutex_unlock(&kvm->lock);
  1243. return err;
  1244. up_out:
  1245. up_read(&current->mm->mmap_sem);
  1246. goto out;
  1247. }
  1248. int kvmppc_core_init_vm(struct kvm *kvm)
  1249. {
  1250. long r;
  1251. unsigned long lpcr;
  1252. /* Allocate hashed page table */
  1253. r = kvmppc_alloc_hpt(kvm);
  1254. if (r)
  1255. return r;
  1256. INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables);
  1257. kvm->arch.rma = NULL;
  1258. kvm->arch.host_sdr1 = mfspr(SPRN_SDR1);
  1259. if (cpu_has_feature(CPU_FTR_ARCH_201)) {
  1260. /* PPC970; HID4 is effectively the LPCR */
  1261. unsigned long lpid = kvm->arch.lpid;
  1262. kvm->arch.host_lpid = 0;
  1263. kvm->arch.host_lpcr = lpcr = mfspr(SPRN_HID4);
  1264. lpcr &= ~((3 << HID4_LPID1_SH) | (0xful << HID4_LPID5_SH));
  1265. lpcr |= ((lpid >> 4) << HID4_LPID1_SH) |
  1266. ((lpid & 0xf) << HID4_LPID5_SH);
  1267. } else {
  1268. /* POWER7; init LPCR for virtual RMA mode */
  1269. kvm->arch.host_lpid = mfspr(SPRN_LPID);
  1270. kvm->arch.host_lpcr = lpcr = mfspr(SPRN_LPCR);
  1271. lpcr &= LPCR_PECE | LPCR_LPES;
  1272. lpcr |= (4UL << LPCR_DPFD_SH) | LPCR_HDICE |
  1273. LPCR_VPM0 | LPCR_VPM1;
  1274. kvm->arch.vrma_slb_v = SLB_VSID_B_1T |
  1275. (VRMA_VSID << SLB_VSID_SHIFT_1T);
  1276. }
  1277. kvm->arch.lpcr = lpcr;
  1278. kvm->arch.using_mmu_notifiers = !!cpu_has_feature(CPU_FTR_ARCH_206);
  1279. spin_lock_init(&kvm->arch.slot_phys_lock);
  1280. return 0;
  1281. }
  1282. void kvmppc_core_destroy_vm(struct kvm *kvm)
  1283. {
  1284. unsigned long i;
  1285. if (!kvm->arch.using_mmu_notifiers)
  1286. for (i = 0; i < KVM_MEM_SLOTS_NUM; i++)
  1287. unpin_slot(kvm, i);
  1288. if (kvm->arch.rma) {
  1289. kvm_release_rma(kvm->arch.rma);
  1290. kvm->arch.rma = NULL;
  1291. }
  1292. kvmppc_free_hpt(kvm);
  1293. WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
  1294. }
  1295. /* These are stubs for now */
  1296. void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
  1297. {
  1298. }
  1299. /* We don't need to emulate any privileged instructions or dcbz */
  1300. int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
  1301. unsigned int inst, int *advance)
  1302. {
  1303. return EMULATE_FAIL;
  1304. }
  1305. int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
  1306. {
  1307. return EMULATE_FAIL;
  1308. }
  1309. int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
  1310. {
  1311. return EMULATE_FAIL;
  1312. }
  1313. static int kvmppc_book3s_hv_init(void)
  1314. {
  1315. int r;
  1316. r = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
  1317. if (r)
  1318. return r;
  1319. r = kvmppc_mmu_hv_init();
  1320. return r;
  1321. }
  1322. static void kvmppc_book3s_hv_exit(void)
  1323. {
  1324. kvm_exit();
  1325. }
  1326. module_init(kvmppc_book3s_hv_init);
  1327. module_exit(kvmppc_book3s_hv_exit);