book3s_hv.c 52 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075
  1. /*
  2. * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
  3. * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
  4. *
  5. * Authors:
  6. * Paul Mackerras <paulus@au1.ibm.com>
  7. * Alexander Graf <agraf@suse.de>
  8. * Kevin Wolf <mail@kevin-wolf.de>
  9. *
  10. * Description: KVM functions specific to running on Book 3S
  11. * processors in hypervisor mode (specifically POWER7 and later).
  12. *
  13. * This file is derived from arch/powerpc/kvm/book3s.c,
  14. * by Alexander Graf <agraf@suse.de>.
  15. *
  16. * This program is free software; you can redistribute it and/or modify
  17. * it under the terms of the GNU General Public License, version 2, as
  18. * published by the Free Software Foundation.
  19. */
  20. #include <linux/kvm_host.h>
  21. #include <linux/err.h>
  22. #include <linux/slab.h>
  23. #include <linux/preempt.h>
  24. #include <linux/sched.h>
  25. #include <linux/delay.h>
  26. #include <linux/export.h>
  27. #include <linux/fs.h>
  28. #include <linux/anon_inodes.h>
  29. #include <linux/cpumask.h>
  30. #include <linux/spinlock.h>
  31. #include <linux/page-flags.h>
  32. #include <linux/srcu.h>
  33. #include <asm/reg.h>
  34. #include <asm/cputable.h>
  35. #include <asm/cacheflush.h>
  36. #include <asm/tlbflush.h>
  37. #include <asm/uaccess.h>
  38. #include <asm/io.h>
  39. #include <asm/kvm_ppc.h>
  40. #include <asm/kvm_book3s.h>
  41. #include <asm/mmu_context.h>
  42. #include <asm/lppaca.h>
  43. #include <asm/processor.h>
  44. #include <asm/cputhreads.h>
  45. #include <asm/page.h>
  46. #include <asm/hvcall.h>
  47. #include <asm/switch_to.h>
  48. #include <asm/smp.h>
  49. #include <linux/gfp.h>
  50. #include <linux/vmalloc.h>
  51. #include <linux/highmem.h>
  52. #include <linux/hugetlb.h>
  53. /* #define EXIT_DEBUG */
  54. /* #define EXIT_DEBUG_SIMPLE */
  55. /* #define EXIT_DEBUG_INT */
  56. /* Used to indicate that a guest page fault needs to be handled */
  57. #define RESUME_PAGE_FAULT (RESUME_GUEST | RESUME_FLAG_ARCH1)
  58. /* Used as a "null" value for timebase values */
  59. #define TB_NIL (~(u64)0)
  60. static void kvmppc_end_cede(struct kvm_vcpu *vcpu);
  61. static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
  62. void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
  63. {
  64. int me;
  65. int cpu = vcpu->cpu;
  66. wait_queue_head_t *wqp;
  67. wqp = kvm_arch_vcpu_wq(vcpu);
  68. if (waitqueue_active(wqp)) {
  69. wake_up_interruptible(wqp);
  70. ++vcpu->stat.halt_wakeup;
  71. }
  72. me = get_cpu();
  73. /* CPU points to the first thread of the core */
  74. if (cpu != me && cpu >= 0 && cpu < nr_cpu_ids) {
  75. int real_cpu = cpu + vcpu->arch.ptid;
  76. if (paca[real_cpu].kvm_hstate.xics_phys)
  77. xics_wake_cpu(real_cpu);
  78. else if (cpu_online(cpu))
  79. smp_send_reschedule(cpu);
  80. }
  81. put_cpu();
  82. }
  83. /*
  84. * We use the vcpu_load/put functions to measure stolen time.
  85. * Stolen time is counted as time when either the vcpu is able to
  86. * run as part of a virtual core, but the task running the vcore
  87. * is preempted or sleeping, or when the vcpu needs something done
  88. * in the kernel by the task running the vcpu, but that task is
  89. * preempted or sleeping. Those two things have to be counted
  90. * separately, since one of the vcpu tasks will take on the job
  91. * of running the core, and the other vcpu tasks in the vcore will
  92. * sleep waiting for it to do that, but that sleep shouldn't count
  93. * as stolen time.
  94. *
  95. * Hence we accumulate stolen time when the vcpu can run as part of
  96. * a vcore using vc->stolen_tb, and the stolen time when the vcpu
  97. * needs its task to do other things in the kernel (for example,
  98. * service a page fault) in busy_stolen. We don't accumulate
  99. * stolen time for a vcore when it is inactive, or for a vcpu
  100. * when it is in state RUNNING or NOTREADY. NOTREADY is a bit of
  101. * a misnomer; it means that the vcpu task is not executing in
  102. * the KVM_VCPU_RUN ioctl, i.e. it is in userspace or elsewhere in
  103. * the kernel. We don't have any way of dividing up that time
  104. * between time that the vcpu is genuinely stopped, time that
  105. * the task is actively working on behalf of the vcpu, and time
  106. * that the task is preempted, so we don't count any of it as
  107. * stolen.
  108. *
  109. * Updates to busy_stolen are protected by arch.tbacct_lock;
  110. * updates to vc->stolen_tb are protected by the arch.tbacct_lock
  111. * of the vcpu that has taken responsibility for running the vcore
  112. * (i.e. vc->runner). The stolen times are measured in units of
  113. * timebase ticks. (Note that the != TB_NIL checks below are
  114. * purely defensive; they should never fail.)
  115. */
  116. void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
  117. {
  118. struct kvmppc_vcore *vc = vcpu->arch.vcore;
  119. spin_lock(&vcpu->arch.tbacct_lock);
  120. if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE &&
  121. vc->preempt_tb != TB_NIL) {
  122. vc->stolen_tb += mftb() - vc->preempt_tb;
  123. vc->preempt_tb = TB_NIL;
  124. }
  125. if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST &&
  126. vcpu->arch.busy_preempt != TB_NIL) {
  127. vcpu->arch.busy_stolen += mftb() - vcpu->arch.busy_preempt;
  128. vcpu->arch.busy_preempt = TB_NIL;
  129. }
  130. spin_unlock(&vcpu->arch.tbacct_lock);
  131. }
  132. void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
  133. {
  134. struct kvmppc_vcore *vc = vcpu->arch.vcore;
  135. spin_lock(&vcpu->arch.tbacct_lock);
  136. if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE)
  137. vc->preempt_tb = mftb();
  138. if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST)
  139. vcpu->arch.busy_preempt = mftb();
  140. spin_unlock(&vcpu->arch.tbacct_lock);
  141. }
  142. void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
  143. {
  144. vcpu->arch.shregs.msr = msr;
  145. kvmppc_end_cede(vcpu);
  146. }
  147. void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr)
  148. {
  149. vcpu->arch.pvr = pvr;
  150. }
  151. void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
  152. {
  153. int r;
  154. pr_err("vcpu %p (%d):\n", vcpu, vcpu->vcpu_id);
  155. pr_err("pc = %.16lx msr = %.16llx trap = %x\n",
  156. vcpu->arch.pc, vcpu->arch.shregs.msr, vcpu->arch.trap);
  157. for (r = 0; r < 16; ++r)
  158. pr_err("r%2d = %.16lx r%d = %.16lx\n",
  159. r, kvmppc_get_gpr(vcpu, r),
  160. r+16, kvmppc_get_gpr(vcpu, r+16));
  161. pr_err("ctr = %.16lx lr = %.16lx\n",
  162. vcpu->arch.ctr, vcpu->arch.lr);
  163. pr_err("srr0 = %.16llx srr1 = %.16llx\n",
  164. vcpu->arch.shregs.srr0, vcpu->arch.shregs.srr1);
  165. pr_err("sprg0 = %.16llx sprg1 = %.16llx\n",
  166. vcpu->arch.shregs.sprg0, vcpu->arch.shregs.sprg1);
  167. pr_err("sprg2 = %.16llx sprg3 = %.16llx\n",
  168. vcpu->arch.shregs.sprg2, vcpu->arch.shregs.sprg3);
  169. pr_err("cr = %.8x xer = %.16lx dsisr = %.8x\n",
  170. vcpu->arch.cr, vcpu->arch.xer, vcpu->arch.shregs.dsisr);
  171. pr_err("dar = %.16llx\n", vcpu->arch.shregs.dar);
  172. pr_err("fault dar = %.16lx dsisr = %.8x\n",
  173. vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
  174. pr_err("SLB (%d entries):\n", vcpu->arch.slb_max);
  175. for (r = 0; r < vcpu->arch.slb_max; ++r)
  176. pr_err(" ESID = %.16llx VSID = %.16llx\n",
  177. vcpu->arch.slb[r].orige, vcpu->arch.slb[r].origv);
  178. pr_err("lpcr = %.16lx sdr1 = %.16lx last_inst = %.8x\n",
  179. vcpu->arch.vcore->lpcr, vcpu->kvm->arch.sdr1,
  180. vcpu->arch.last_inst);
  181. }
  182. struct kvm_vcpu *kvmppc_find_vcpu(struct kvm *kvm, int id)
  183. {
  184. int r;
  185. struct kvm_vcpu *v, *ret = NULL;
  186. mutex_lock(&kvm->lock);
  187. kvm_for_each_vcpu(r, v, kvm) {
  188. if (v->vcpu_id == id) {
  189. ret = v;
  190. break;
  191. }
  192. }
  193. mutex_unlock(&kvm->lock);
  194. return ret;
  195. }
  196. static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa)
  197. {
  198. vpa->__old_status |= LPPACA_OLD_SHARED_PROC;
  199. vpa->yield_count = 1;
  200. }
  201. static int set_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *v,
  202. unsigned long addr, unsigned long len)
  203. {
  204. /* check address is cacheline aligned */
  205. if (addr & (L1_CACHE_BYTES - 1))
  206. return -EINVAL;
  207. spin_lock(&vcpu->arch.vpa_update_lock);
  208. if (v->next_gpa != addr || v->len != len) {
  209. v->next_gpa = addr;
  210. v->len = addr ? len : 0;
  211. v->update_pending = 1;
  212. }
  213. spin_unlock(&vcpu->arch.vpa_update_lock);
  214. return 0;
  215. }
  216. /* Length for a per-processor buffer is passed in at offset 4 in the buffer */
  217. struct reg_vpa {
  218. u32 dummy;
  219. union {
  220. u16 hword;
  221. u32 word;
  222. } length;
  223. };
  224. static int vpa_is_registered(struct kvmppc_vpa *vpap)
  225. {
  226. if (vpap->update_pending)
  227. return vpap->next_gpa != 0;
  228. return vpap->pinned_addr != NULL;
  229. }
  230. static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu,
  231. unsigned long flags,
  232. unsigned long vcpuid, unsigned long vpa)
  233. {
  234. struct kvm *kvm = vcpu->kvm;
  235. unsigned long len, nb;
  236. void *va;
  237. struct kvm_vcpu *tvcpu;
  238. int err;
  239. int subfunc;
  240. struct kvmppc_vpa *vpap;
  241. tvcpu = kvmppc_find_vcpu(kvm, vcpuid);
  242. if (!tvcpu)
  243. return H_PARAMETER;
  244. subfunc = (flags >> H_VPA_FUNC_SHIFT) & H_VPA_FUNC_MASK;
  245. if (subfunc == H_VPA_REG_VPA || subfunc == H_VPA_REG_DTL ||
  246. subfunc == H_VPA_REG_SLB) {
  247. /* Registering new area - address must be cache-line aligned */
  248. if ((vpa & (L1_CACHE_BYTES - 1)) || !vpa)
  249. return H_PARAMETER;
  250. /* convert logical addr to kernel addr and read length */
  251. va = kvmppc_pin_guest_page(kvm, vpa, &nb);
  252. if (va == NULL)
  253. return H_PARAMETER;
  254. if (subfunc == H_VPA_REG_VPA)
  255. len = ((struct reg_vpa *)va)->length.hword;
  256. else
  257. len = ((struct reg_vpa *)va)->length.word;
  258. kvmppc_unpin_guest_page(kvm, va, vpa, false);
  259. /* Check length */
  260. if (len > nb || len < sizeof(struct reg_vpa))
  261. return H_PARAMETER;
  262. } else {
  263. vpa = 0;
  264. len = 0;
  265. }
  266. err = H_PARAMETER;
  267. vpap = NULL;
  268. spin_lock(&tvcpu->arch.vpa_update_lock);
  269. switch (subfunc) {
  270. case H_VPA_REG_VPA: /* register VPA */
  271. if (len < sizeof(struct lppaca))
  272. break;
  273. vpap = &tvcpu->arch.vpa;
  274. err = 0;
  275. break;
  276. case H_VPA_REG_DTL: /* register DTL */
  277. if (len < sizeof(struct dtl_entry))
  278. break;
  279. len -= len % sizeof(struct dtl_entry);
  280. /* Check that they have previously registered a VPA */
  281. err = H_RESOURCE;
  282. if (!vpa_is_registered(&tvcpu->arch.vpa))
  283. break;
  284. vpap = &tvcpu->arch.dtl;
  285. err = 0;
  286. break;
  287. case H_VPA_REG_SLB: /* register SLB shadow buffer */
  288. /* Check that they have previously registered a VPA */
  289. err = H_RESOURCE;
  290. if (!vpa_is_registered(&tvcpu->arch.vpa))
  291. break;
  292. vpap = &tvcpu->arch.slb_shadow;
  293. err = 0;
  294. break;
  295. case H_VPA_DEREG_VPA: /* deregister VPA */
  296. /* Check they don't still have a DTL or SLB buf registered */
  297. err = H_RESOURCE;
  298. if (vpa_is_registered(&tvcpu->arch.dtl) ||
  299. vpa_is_registered(&tvcpu->arch.slb_shadow))
  300. break;
  301. vpap = &tvcpu->arch.vpa;
  302. err = 0;
  303. break;
  304. case H_VPA_DEREG_DTL: /* deregister DTL */
  305. vpap = &tvcpu->arch.dtl;
  306. err = 0;
  307. break;
  308. case H_VPA_DEREG_SLB: /* deregister SLB shadow buffer */
  309. vpap = &tvcpu->arch.slb_shadow;
  310. err = 0;
  311. break;
  312. }
  313. if (vpap) {
  314. vpap->next_gpa = vpa;
  315. vpap->len = len;
  316. vpap->update_pending = 1;
  317. }
  318. spin_unlock(&tvcpu->arch.vpa_update_lock);
  319. return err;
  320. }
  321. static void kvmppc_update_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *vpap)
  322. {
  323. struct kvm *kvm = vcpu->kvm;
  324. void *va;
  325. unsigned long nb;
  326. unsigned long gpa;
  327. /*
  328. * We need to pin the page pointed to by vpap->next_gpa,
  329. * but we can't call kvmppc_pin_guest_page under the lock
  330. * as it does get_user_pages() and down_read(). So we
  331. * have to drop the lock, pin the page, then get the lock
  332. * again and check that a new area didn't get registered
  333. * in the meantime.
  334. */
  335. for (;;) {
  336. gpa = vpap->next_gpa;
  337. spin_unlock(&vcpu->arch.vpa_update_lock);
  338. va = NULL;
  339. nb = 0;
  340. if (gpa)
  341. va = kvmppc_pin_guest_page(kvm, gpa, &nb);
  342. spin_lock(&vcpu->arch.vpa_update_lock);
  343. if (gpa == vpap->next_gpa)
  344. break;
  345. /* sigh... unpin that one and try again */
  346. if (va)
  347. kvmppc_unpin_guest_page(kvm, va, gpa, false);
  348. }
  349. vpap->update_pending = 0;
  350. if (va && nb < vpap->len) {
  351. /*
  352. * If it's now too short, it must be that userspace
  353. * has changed the mappings underlying guest memory,
  354. * so unregister the region.
  355. */
  356. kvmppc_unpin_guest_page(kvm, va, gpa, false);
  357. va = NULL;
  358. }
  359. if (vpap->pinned_addr)
  360. kvmppc_unpin_guest_page(kvm, vpap->pinned_addr, vpap->gpa,
  361. vpap->dirty);
  362. vpap->gpa = gpa;
  363. vpap->pinned_addr = va;
  364. vpap->dirty = false;
  365. if (va)
  366. vpap->pinned_end = va + vpap->len;
  367. }
  368. static void kvmppc_update_vpas(struct kvm_vcpu *vcpu)
  369. {
  370. if (!(vcpu->arch.vpa.update_pending ||
  371. vcpu->arch.slb_shadow.update_pending ||
  372. vcpu->arch.dtl.update_pending))
  373. return;
  374. spin_lock(&vcpu->arch.vpa_update_lock);
  375. if (vcpu->arch.vpa.update_pending) {
  376. kvmppc_update_vpa(vcpu, &vcpu->arch.vpa);
  377. if (vcpu->arch.vpa.pinned_addr)
  378. init_vpa(vcpu, vcpu->arch.vpa.pinned_addr);
  379. }
  380. if (vcpu->arch.dtl.update_pending) {
  381. kvmppc_update_vpa(vcpu, &vcpu->arch.dtl);
  382. vcpu->arch.dtl_ptr = vcpu->arch.dtl.pinned_addr;
  383. vcpu->arch.dtl_index = 0;
  384. }
  385. if (vcpu->arch.slb_shadow.update_pending)
  386. kvmppc_update_vpa(vcpu, &vcpu->arch.slb_shadow);
  387. spin_unlock(&vcpu->arch.vpa_update_lock);
  388. }
  389. /*
  390. * Return the accumulated stolen time for the vcore up until `now'.
  391. * The caller should hold the vcore lock.
  392. */
  393. static u64 vcore_stolen_time(struct kvmppc_vcore *vc, u64 now)
  394. {
  395. u64 p;
  396. /*
  397. * If we are the task running the vcore, then since we hold
  398. * the vcore lock, we can't be preempted, so stolen_tb/preempt_tb
  399. * can't be updated, so we don't need the tbacct_lock.
  400. * If the vcore is inactive, it can't become active (since we
  401. * hold the vcore lock), so the vcpu load/put functions won't
  402. * update stolen_tb/preempt_tb, and we don't need tbacct_lock.
  403. */
  404. if (vc->vcore_state != VCORE_INACTIVE &&
  405. vc->runner->arch.run_task != current) {
  406. spin_lock(&vc->runner->arch.tbacct_lock);
  407. p = vc->stolen_tb;
  408. if (vc->preempt_tb != TB_NIL)
  409. p += now - vc->preempt_tb;
  410. spin_unlock(&vc->runner->arch.tbacct_lock);
  411. } else {
  412. p = vc->stolen_tb;
  413. }
  414. return p;
  415. }
  416. static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu,
  417. struct kvmppc_vcore *vc)
  418. {
  419. struct dtl_entry *dt;
  420. struct lppaca *vpa;
  421. unsigned long stolen;
  422. unsigned long core_stolen;
  423. u64 now;
  424. dt = vcpu->arch.dtl_ptr;
  425. vpa = vcpu->arch.vpa.pinned_addr;
  426. now = mftb();
  427. core_stolen = vcore_stolen_time(vc, now);
  428. stolen = core_stolen - vcpu->arch.stolen_logged;
  429. vcpu->arch.stolen_logged = core_stolen;
  430. spin_lock(&vcpu->arch.tbacct_lock);
  431. stolen += vcpu->arch.busy_stolen;
  432. vcpu->arch.busy_stolen = 0;
  433. spin_unlock(&vcpu->arch.tbacct_lock);
  434. if (!dt || !vpa)
  435. return;
  436. memset(dt, 0, sizeof(struct dtl_entry));
  437. dt->dispatch_reason = 7;
  438. dt->processor_id = vc->pcpu + vcpu->arch.ptid;
  439. dt->timebase = now + vc->tb_offset;
  440. dt->enqueue_to_dispatch_time = stolen;
  441. dt->srr0 = kvmppc_get_pc(vcpu);
  442. dt->srr1 = vcpu->arch.shregs.msr;
  443. ++dt;
  444. if (dt == vcpu->arch.dtl.pinned_end)
  445. dt = vcpu->arch.dtl.pinned_addr;
  446. vcpu->arch.dtl_ptr = dt;
  447. /* order writing *dt vs. writing vpa->dtl_idx */
  448. smp_wmb();
  449. vpa->dtl_idx = ++vcpu->arch.dtl_index;
  450. vcpu->arch.dtl.dirty = true;
  451. }
  452. int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
  453. {
  454. unsigned long req = kvmppc_get_gpr(vcpu, 3);
  455. unsigned long target, ret = H_SUCCESS;
  456. struct kvm_vcpu *tvcpu;
  457. int idx, rc;
  458. switch (req) {
  459. case H_ENTER:
  460. idx = srcu_read_lock(&vcpu->kvm->srcu);
  461. ret = kvmppc_virtmode_h_enter(vcpu, kvmppc_get_gpr(vcpu, 4),
  462. kvmppc_get_gpr(vcpu, 5),
  463. kvmppc_get_gpr(vcpu, 6),
  464. kvmppc_get_gpr(vcpu, 7));
  465. srcu_read_unlock(&vcpu->kvm->srcu, idx);
  466. break;
  467. case H_CEDE:
  468. break;
  469. case H_PROD:
  470. target = kvmppc_get_gpr(vcpu, 4);
  471. tvcpu = kvmppc_find_vcpu(vcpu->kvm, target);
  472. if (!tvcpu) {
  473. ret = H_PARAMETER;
  474. break;
  475. }
  476. tvcpu->arch.prodded = 1;
  477. smp_mb();
  478. if (vcpu->arch.ceded) {
  479. if (waitqueue_active(&vcpu->wq)) {
  480. wake_up_interruptible(&vcpu->wq);
  481. vcpu->stat.halt_wakeup++;
  482. }
  483. }
  484. break;
  485. case H_CONFER:
  486. target = kvmppc_get_gpr(vcpu, 4);
  487. if (target == -1)
  488. break;
  489. tvcpu = kvmppc_find_vcpu(vcpu->kvm, target);
  490. if (!tvcpu) {
  491. ret = H_PARAMETER;
  492. break;
  493. }
  494. kvm_vcpu_yield_to(tvcpu);
  495. break;
  496. case H_REGISTER_VPA:
  497. ret = do_h_register_vpa(vcpu, kvmppc_get_gpr(vcpu, 4),
  498. kvmppc_get_gpr(vcpu, 5),
  499. kvmppc_get_gpr(vcpu, 6));
  500. break;
  501. case H_RTAS:
  502. if (list_empty(&vcpu->kvm->arch.rtas_tokens))
  503. return RESUME_HOST;
  504. rc = kvmppc_rtas_hcall(vcpu);
  505. if (rc == -ENOENT)
  506. return RESUME_HOST;
  507. else if (rc == 0)
  508. break;
  509. /* Send the error out to userspace via KVM_RUN */
  510. return rc;
  511. case H_XIRR:
  512. case H_CPPR:
  513. case H_EOI:
  514. case H_IPI:
  515. case H_IPOLL:
  516. case H_XIRR_X:
  517. if (kvmppc_xics_enabled(vcpu)) {
  518. ret = kvmppc_xics_hcall(vcpu, req);
  519. break;
  520. } /* fallthrough */
  521. default:
  522. return RESUME_HOST;
  523. }
  524. kvmppc_set_gpr(vcpu, 3, ret);
  525. vcpu->arch.hcall_needed = 0;
  526. return RESUME_GUEST;
  527. }
  528. static int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
  529. struct task_struct *tsk)
  530. {
  531. int r = RESUME_HOST;
  532. vcpu->stat.sum_exits++;
  533. run->exit_reason = KVM_EXIT_UNKNOWN;
  534. run->ready_for_interrupt_injection = 1;
  535. switch (vcpu->arch.trap) {
  536. /* We're good on these - the host merely wanted to get our attention */
  537. case BOOK3S_INTERRUPT_HV_DECREMENTER:
  538. vcpu->stat.dec_exits++;
  539. r = RESUME_GUEST;
  540. break;
  541. case BOOK3S_INTERRUPT_EXTERNAL:
  542. vcpu->stat.ext_intr_exits++;
  543. r = RESUME_GUEST;
  544. break;
  545. case BOOK3S_INTERRUPT_PERFMON:
  546. r = RESUME_GUEST;
  547. break;
  548. case BOOK3S_INTERRUPT_MACHINE_CHECK:
  549. /*
  550. * Deliver a machine check interrupt to the guest.
  551. * We have to do this, even if the host has handled the
  552. * machine check, because machine checks use SRR0/1 and
  553. * the interrupt might have trashed guest state in them.
  554. */
  555. kvmppc_book3s_queue_irqprio(vcpu,
  556. BOOK3S_INTERRUPT_MACHINE_CHECK);
  557. r = RESUME_GUEST;
  558. break;
  559. case BOOK3S_INTERRUPT_PROGRAM:
  560. {
  561. ulong flags;
  562. /*
  563. * Normally program interrupts are delivered directly
  564. * to the guest by the hardware, but we can get here
  565. * as a result of a hypervisor emulation interrupt
  566. * (e40) getting turned into a 700 by BML RTAS.
  567. */
  568. flags = vcpu->arch.shregs.msr & 0x1f0000ull;
  569. kvmppc_core_queue_program(vcpu, flags);
  570. r = RESUME_GUEST;
  571. break;
  572. }
  573. case BOOK3S_INTERRUPT_SYSCALL:
  574. {
  575. /* hcall - punt to userspace */
  576. int i;
  577. if (vcpu->arch.shregs.msr & MSR_PR) {
  578. /* sc 1 from userspace - reflect to guest syscall */
  579. kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_SYSCALL);
  580. r = RESUME_GUEST;
  581. break;
  582. }
  583. run->papr_hcall.nr = kvmppc_get_gpr(vcpu, 3);
  584. for (i = 0; i < 9; ++i)
  585. run->papr_hcall.args[i] = kvmppc_get_gpr(vcpu, 4 + i);
  586. run->exit_reason = KVM_EXIT_PAPR_HCALL;
  587. vcpu->arch.hcall_needed = 1;
  588. r = RESUME_HOST;
  589. break;
  590. }
  591. /*
  592. * We get these next two if the guest accesses a page which it thinks
  593. * it has mapped but which is not actually present, either because
  594. * it is for an emulated I/O device or because the corresonding
  595. * host page has been paged out. Any other HDSI/HISI interrupts
  596. * have been handled already.
  597. */
  598. case BOOK3S_INTERRUPT_H_DATA_STORAGE:
  599. r = RESUME_PAGE_FAULT;
  600. break;
  601. case BOOK3S_INTERRUPT_H_INST_STORAGE:
  602. vcpu->arch.fault_dar = kvmppc_get_pc(vcpu);
  603. vcpu->arch.fault_dsisr = 0;
  604. r = RESUME_PAGE_FAULT;
  605. break;
  606. /*
  607. * This occurs if the guest executes an illegal instruction.
  608. * We just generate a program interrupt to the guest, since
  609. * we don't emulate any guest instructions at this stage.
  610. */
  611. case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
  612. kvmppc_core_queue_program(vcpu, 0x80000);
  613. r = RESUME_GUEST;
  614. break;
  615. default:
  616. kvmppc_dump_regs(vcpu);
  617. printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n",
  618. vcpu->arch.trap, kvmppc_get_pc(vcpu),
  619. vcpu->arch.shregs.msr);
  620. r = RESUME_HOST;
  621. BUG();
  622. break;
  623. }
  624. return r;
  625. }
  626. int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
  627. struct kvm_sregs *sregs)
  628. {
  629. int i;
  630. memset(sregs, 0, sizeof(struct kvm_sregs));
  631. sregs->pvr = vcpu->arch.pvr;
  632. for (i = 0; i < vcpu->arch.slb_max; i++) {
  633. sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige;
  634. sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv;
  635. }
  636. return 0;
  637. }
  638. int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
  639. struct kvm_sregs *sregs)
  640. {
  641. int i, j;
  642. kvmppc_set_pvr(vcpu, sregs->pvr);
  643. j = 0;
  644. for (i = 0; i < vcpu->arch.slb_nr; i++) {
  645. if (sregs->u.s.ppc64.slb[i].slbe & SLB_ESID_V) {
  646. vcpu->arch.slb[j].orige = sregs->u.s.ppc64.slb[i].slbe;
  647. vcpu->arch.slb[j].origv = sregs->u.s.ppc64.slb[i].slbv;
  648. ++j;
  649. }
  650. }
  651. vcpu->arch.slb_max = j;
  652. return 0;
  653. }
  654. static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr)
  655. {
  656. struct kvmppc_vcore *vc = vcpu->arch.vcore;
  657. u64 mask;
  658. spin_lock(&vc->lock);
  659. /*
  660. * Userspace can only modify DPFD (default prefetch depth),
  661. * ILE (interrupt little-endian) and TC (translation control).
  662. */
  663. mask = LPCR_DPFD | LPCR_ILE | LPCR_TC;
  664. vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask);
  665. spin_unlock(&vc->lock);
  666. }
  667. int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
  668. {
  669. int r = 0;
  670. long int i;
  671. switch (id) {
  672. case KVM_REG_PPC_HIOR:
  673. *val = get_reg_val(id, 0);
  674. break;
  675. case KVM_REG_PPC_DABR:
  676. *val = get_reg_val(id, vcpu->arch.dabr);
  677. break;
  678. case KVM_REG_PPC_DSCR:
  679. *val = get_reg_val(id, vcpu->arch.dscr);
  680. break;
  681. case KVM_REG_PPC_PURR:
  682. *val = get_reg_val(id, vcpu->arch.purr);
  683. break;
  684. case KVM_REG_PPC_SPURR:
  685. *val = get_reg_val(id, vcpu->arch.spurr);
  686. break;
  687. case KVM_REG_PPC_AMR:
  688. *val = get_reg_val(id, vcpu->arch.amr);
  689. break;
  690. case KVM_REG_PPC_UAMOR:
  691. *val = get_reg_val(id, vcpu->arch.uamor);
  692. break;
  693. case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCRA:
  694. i = id - KVM_REG_PPC_MMCR0;
  695. *val = get_reg_val(id, vcpu->arch.mmcr[i]);
  696. break;
  697. case KVM_REG_PPC_PMC1 ... KVM_REG_PPC_PMC8:
  698. i = id - KVM_REG_PPC_PMC1;
  699. *val = get_reg_val(id, vcpu->arch.pmc[i]);
  700. break;
  701. case KVM_REG_PPC_SIAR:
  702. *val = get_reg_val(id, vcpu->arch.siar);
  703. break;
  704. case KVM_REG_PPC_SDAR:
  705. *val = get_reg_val(id, vcpu->arch.sdar);
  706. break;
  707. #ifdef CONFIG_VSX
  708. case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
  709. if (cpu_has_feature(CPU_FTR_VSX)) {
  710. /* VSX => FP reg i is stored in arch.vsr[2*i] */
  711. long int i = id - KVM_REG_PPC_FPR0;
  712. *val = get_reg_val(id, vcpu->arch.vsr[2 * i]);
  713. } else {
  714. /* let generic code handle it */
  715. r = -EINVAL;
  716. }
  717. break;
  718. case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
  719. if (cpu_has_feature(CPU_FTR_VSX)) {
  720. long int i = id - KVM_REG_PPC_VSR0;
  721. val->vsxval[0] = vcpu->arch.vsr[2 * i];
  722. val->vsxval[1] = vcpu->arch.vsr[2 * i + 1];
  723. } else {
  724. r = -ENXIO;
  725. }
  726. break;
  727. #endif /* CONFIG_VSX */
  728. case KVM_REG_PPC_VPA_ADDR:
  729. spin_lock(&vcpu->arch.vpa_update_lock);
  730. *val = get_reg_val(id, vcpu->arch.vpa.next_gpa);
  731. spin_unlock(&vcpu->arch.vpa_update_lock);
  732. break;
  733. case KVM_REG_PPC_VPA_SLB:
  734. spin_lock(&vcpu->arch.vpa_update_lock);
  735. val->vpaval.addr = vcpu->arch.slb_shadow.next_gpa;
  736. val->vpaval.length = vcpu->arch.slb_shadow.len;
  737. spin_unlock(&vcpu->arch.vpa_update_lock);
  738. break;
  739. case KVM_REG_PPC_VPA_DTL:
  740. spin_lock(&vcpu->arch.vpa_update_lock);
  741. val->vpaval.addr = vcpu->arch.dtl.next_gpa;
  742. val->vpaval.length = vcpu->arch.dtl.len;
  743. spin_unlock(&vcpu->arch.vpa_update_lock);
  744. break;
  745. case KVM_REG_PPC_TB_OFFSET:
  746. *val = get_reg_val(id, vcpu->arch.vcore->tb_offset);
  747. break;
  748. case KVM_REG_PPC_LPCR:
  749. *val = get_reg_val(id, vcpu->arch.vcore->lpcr);
  750. break;
  751. case KVM_REG_PPC_PPR:
  752. *val = get_reg_val(id, vcpu->arch.ppr);
  753. break;
  754. default:
  755. r = -EINVAL;
  756. break;
  757. }
  758. return r;
  759. }
  760. int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
  761. {
  762. int r = 0;
  763. long int i;
  764. unsigned long addr, len;
  765. switch (id) {
  766. case KVM_REG_PPC_HIOR:
  767. /* Only allow this to be set to zero */
  768. if (set_reg_val(id, *val))
  769. r = -EINVAL;
  770. break;
  771. case KVM_REG_PPC_DABR:
  772. vcpu->arch.dabr = set_reg_val(id, *val);
  773. break;
  774. case KVM_REG_PPC_DSCR:
  775. vcpu->arch.dscr = set_reg_val(id, *val);
  776. break;
  777. case KVM_REG_PPC_PURR:
  778. vcpu->arch.purr = set_reg_val(id, *val);
  779. break;
  780. case KVM_REG_PPC_SPURR:
  781. vcpu->arch.spurr = set_reg_val(id, *val);
  782. break;
  783. case KVM_REG_PPC_AMR:
  784. vcpu->arch.amr = set_reg_val(id, *val);
  785. break;
  786. case KVM_REG_PPC_UAMOR:
  787. vcpu->arch.uamor = set_reg_val(id, *val);
  788. break;
  789. case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCRA:
  790. i = id - KVM_REG_PPC_MMCR0;
  791. vcpu->arch.mmcr[i] = set_reg_val(id, *val);
  792. break;
  793. case KVM_REG_PPC_PMC1 ... KVM_REG_PPC_PMC8:
  794. i = id - KVM_REG_PPC_PMC1;
  795. vcpu->arch.pmc[i] = set_reg_val(id, *val);
  796. break;
  797. case KVM_REG_PPC_SIAR:
  798. vcpu->arch.siar = set_reg_val(id, *val);
  799. break;
  800. case KVM_REG_PPC_SDAR:
  801. vcpu->arch.sdar = set_reg_val(id, *val);
  802. break;
  803. #ifdef CONFIG_VSX
  804. case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
  805. if (cpu_has_feature(CPU_FTR_VSX)) {
  806. /* VSX => FP reg i is stored in arch.vsr[2*i] */
  807. long int i = id - KVM_REG_PPC_FPR0;
  808. vcpu->arch.vsr[2 * i] = set_reg_val(id, *val);
  809. } else {
  810. /* let generic code handle it */
  811. r = -EINVAL;
  812. }
  813. break;
  814. case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
  815. if (cpu_has_feature(CPU_FTR_VSX)) {
  816. long int i = id - KVM_REG_PPC_VSR0;
  817. vcpu->arch.vsr[2 * i] = val->vsxval[0];
  818. vcpu->arch.vsr[2 * i + 1] = val->vsxval[1];
  819. } else {
  820. r = -ENXIO;
  821. }
  822. break;
  823. #endif /* CONFIG_VSX */
  824. case KVM_REG_PPC_VPA_ADDR:
  825. addr = set_reg_val(id, *val);
  826. r = -EINVAL;
  827. if (!addr && (vcpu->arch.slb_shadow.next_gpa ||
  828. vcpu->arch.dtl.next_gpa))
  829. break;
  830. r = set_vpa(vcpu, &vcpu->arch.vpa, addr, sizeof(struct lppaca));
  831. break;
  832. case KVM_REG_PPC_VPA_SLB:
  833. addr = val->vpaval.addr;
  834. len = val->vpaval.length;
  835. r = -EINVAL;
  836. if (addr && !vcpu->arch.vpa.next_gpa)
  837. break;
  838. r = set_vpa(vcpu, &vcpu->arch.slb_shadow, addr, len);
  839. break;
  840. case KVM_REG_PPC_VPA_DTL:
  841. addr = val->vpaval.addr;
  842. len = val->vpaval.length;
  843. r = -EINVAL;
  844. if (addr && (len < sizeof(struct dtl_entry) ||
  845. !vcpu->arch.vpa.next_gpa))
  846. break;
  847. len -= len % sizeof(struct dtl_entry);
  848. r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len);
  849. break;
  850. case KVM_REG_PPC_TB_OFFSET:
  851. /* round up to multiple of 2^24 */
  852. vcpu->arch.vcore->tb_offset =
  853. ALIGN(set_reg_val(id, *val), 1UL << 24);
  854. break;
  855. case KVM_REG_PPC_LPCR:
  856. kvmppc_set_lpcr(vcpu, set_reg_val(id, *val));
  857. break;
  858. case KVM_REG_PPC_PPR:
  859. vcpu->arch.ppr = set_reg_val(id, *val);
  860. break;
  861. default:
  862. r = -EINVAL;
  863. break;
  864. }
  865. return r;
  866. }
  867. int kvmppc_core_check_processor_compat(void)
  868. {
  869. if (cpu_has_feature(CPU_FTR_HVMODE))
  870. return 0;
  871. return -EIO;
  872. }
  873. struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
  874. {
  875. struct kvm_vcpu *vcpu;
  876. int err = -EINVAL;
  877. int core;
  878. struct kvmppc_vcore *vcore;
  879. core = id / threads_per_core;
  880. if (core >= KVM_MAX_VCORES)
  881. goto out;
  882. err = -ENOMEM;
  883. vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
  884. if (!vcpu)
  885. goto out;
  886. err = kvm_vcpu_init(vcpu, kvm, id);
  887. if (err)
  888. goto free_vcpu;
  889. vcpu->arch.shared = &vcpu->arch.shregs;
  890. vcpu->arch.mmcr[0] = MMCR0_FC;
  891. vcpu->arch.ctrl = CTRL_RUNLATCH;
  892. /* default to host PVR, since we can't spoof it */
  893. vcpu->arch.pvr = mfspr(SPRN_PVR);
  894. kvmppc_set_pvr(vcpu, vcpu->arch.pvr);
  895. spin_lock_init(&vcpu->arch.vpa_update_lock);
  896. spin_lock_init(&vcpu->arch.tbacct_lock);
  897. vcpu->arch.busy_preempt = TB_NIL;
  898. kvmppc_mmu_book3s_hv_init(vcpu);
  899. vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
  900. init_waitqueue_head(&vcpu->arch.cpu_run);
  901. mutex_lock(&kvm->lock);
  902. vcore = kvm->arch.vcores[core];
  903. if (!vcore) {
  904. vcore = kzalloc(sizeof(struct kvmppc_vcore), GFP_KERNEL);
  905. if (vcore) {
  906. INIT_LIST_HEAD(&vcore->runnable_threads);
  907. spin_lock_init(&vcore->lock);
  908. init_waitqueue_head(&vcore->wq);
  909. vcore->preempt_tb = TB_NIL;
  910. vcore->lpcr = kvm->arch.lpcr;
  911. }
  912. kvm->arch.vcores[core] = vcore;
  913. kvm->arch.online_vcores++;
  914. }
  915. mutex_unlock(&kvm->lock);
  916. if (!vcore)
  917. goto free_vcpu;
  918. spin_lock(&vcore->lock);
  919. ++vcore->num_threads;
  920. spin_unlock(&vcore->lock);
  921. vcpu->arch.vcore = vcore;
  922. vcpu->arch.cpu_type = KVM_CPU_3S_64;
  923. kvmppc_sanity_check(vcpu);
  924. return vcpu;
  925. free_vcpu:
  926. kmem_cache_free(kvm_vcpu_cache, vcpu);
  927. out:
  928. return ERR_PTR(err);
  929. }
  930. static void unpin_vpa(struct kvm *kvm, struct kvmppc_vpa *vpa)
  931. {
  932. if (vpa->pinned_addr)
  933. kvmppc_unpin_guest_page(kvm, vpa->pinned_addr, vpa->gpa,
  934. vpa->dirty);
  935. }
  936. void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
  937. {
  938. spin_lock(&vcpu->arch.vpa_update_lock);
  939. unpin_vpa(vcpu->kvm, &vcpu->arch.dtl);
  940. unpin_vpa(vcpu->kvm, &vcpu->arch.slb_shadow);
  941. unpin_vpa(vcpu->kvm, &vcpu->arch.vpa);
  942. spin_unlock(&vcpu->arch.vpa_update_lock);
  943. kvm_vcpu_uninit(vcpu);
  944. kmem_cache_free(kvm_vcpu_cache, vcpu);
  945. }
  946. static void kvmppc_set_timer(struct kvm_vcpu *vcpu)
  947. {
  948. unsigned long dec_nsec, now;
  949. now = get_tb();
  950. if (now > vcpu->arch.dec_expires) {
  951. /* decrementer has already gone negative */
  952. kvmppc_core_queue_dec(vcpu);
  953. kvmppc_core_prepare_to_enter(vcpu);
  954. return;
  955. }
  956. dec_nsec = (vcpu->arch.dec_expires - now) * NSEC_PER_SEC
  957. / tb_ticks_per_sec;
  958. hrtimer_start(&vcpu->arch.dec_timer, ktime_set(0, dec_nsec),
  959. HRTIMER_MODE_REL);
  960. vcpu->arch.timer_running = 1;
  961. }
  962. static void kvmppc_end_cede(struct kvm_vcpu *vcpu)
  963. {
  964. vcpu->arch.ceded = 0;
  965. if (vcpu->arch.timer_running) {
  966. hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
  967. vcpu->arch.timer_running = 0;
  968. }
  969. }
  970. extern int __kvmppc_vcore_entry(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
  971. static void kvmppc_remove_runnable(struct kvmppc_vcore *vc,
  972. struct kvm_vcpu *vcpu)
  973. {
  974. u64 now;
  975. if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE)
  976. return;
  977. spin_lock(&vcpu->arch.tbacct_lock);
  978. now = mftb();
  979. vcpu->arch.busy_stolen += vcore_stolen_time(vc, now) -
  980. vcpu->arch.stolen_logged;
  981. vcpu->arch.busy_preempt = now;
  982. vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
  983. spin_unlock(&vcpu->arch.tbacct_lock);
  984. --vc->n_runnable;
  985. list_del(&vcpu->arch.run_list);
  986. }
  987. static int kvmppc_grab_hwthread(int cpu)
  988. {
  989. struct paca_struct *tpaca;
  990. long timeout = 1000;
  991. tpaca = &paca[cpu];
  992. /* Ensure the thread won't go into the kernel if it wakes */
  993. tpaca->kvm_hstate.hwthread_req = 1;
  994. tpaca->kvm_hstate.kvm_vcpu = NULL;
  995. /*
  996. * If the thread is already executing in the kernel (e.g. handling
  997. * a stray interrupt), wait for it to get back to nap mode.
  998. * The smp_mb() is to ensure that our setting of hwthread_req
  999. * is visible before we look at hwthread_state, so if this
  1000. * races with the code at system_reset_pSeries and the thread
  1001. * misses our setting of hwthread_req, we are sure to see its
  1002. * setting of hwthread_state, and vice versa.
  1003. */
  1004. smp_mb();
  1005. while (tpaca->kvm_hstate.hwthread_state == KVM_HWTHREAD_IN_KERNEL) {
  1006. if (--timeout <= 0) {
  1007. pr_err("KVM: couldn't grab cpu %d\n", cpu);
  1008. return -EBUSY;
  1009. }
  1010. udelay(1);
  1011. }
  1012. return 0;
  1013. }
  1014. static void kvmppc_release_hwthread(int cpu)
  1015. {
  1016. struct paca_struct *tpaca;
  1017. tpaca = &paca[cpu];
  1018. tpaca->kvm_hstate.hwthread_req = 0;
  1019. tpaca->kvm_hstate.kvm_vcpu = NULL;
  1020. }
  1021. static void kvmppc_start_thread(struct kvm_vcpu *vcpu)
  1022. {
  1023. int cpu;
  1024. struct paca_struct *tpaca;
  1025. struct kvmppc_vcore *vc = vcpu->arch.vcore;
  1026. if (vcpu->arch.timer_running) {
  1027. hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
  1028. vcpu->arch.timer_running = 0;
  1029. }
  1030. cpu = vc->pcpu + vcpu->arch.ptid;
  1031. tpaca = &paca[cpu];
  1032. tpaca->kvm_hstate.kvm_vcpu = vcpu;
  1033. tpaca->kvm_hstate.kvm_vcore = vc;
  1034. tpaca->kvm_hstate.napping = 0;
  1035. vcpu->cpu = vc->pcpu;
  1036. smp_wmb();
  1037. #if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP)
  1038. if (vcpu->arch.ptid) {
  1039. xics_wake_cpu(cpu);
  1040. ++vc->n_woken;
  1041. }
  1042. #endif
  1043. }
  1044. static void kvmppc_wait_for_nap(struct kvmppc_vcore *vc)
  1045. {
  1046. int i;
  1047. HMT_low();
  1048. i = 0;
  1049. while (vc->nap_count < vc->n_woken) {
  1050. if (++i >= 1000000) {
  1051. pr_err("kvmppc_wait_for_nap timeout %d %d\n",
  1052. vc->nap_count, vc->n_woken);
  1053. break;
  1054. }
  1055. cpu_relax();
  1056. }
  1057. HMT_medium();
  1058. }
  1059. /*
  1060. * Check that we are on thread 0 and that any other threads in
  1061. * this core are off-line. Then grab the threads so they can't
  1062. * enter the kernel.
  1063. */
  1064. static int on_primary_thread(void)
  1065. {
  1066. int cpu = smp_processor_id();
  1067. int thr = cpu_thread_in_core(cpu);
  1068. if (thr)
  1069. return 0;
  1070. while (++thr < threads_per_core)
  1071. if (cpu_online(cpu + thr))
  1072. return 0;
  1073. /* Grab all hw threads so they can't go into the kernel */
  1074. for (thr = 1; thr < threads_per_core; ++thr) {
  1075. if (kvmppc_grab_hwthread(cpu + thr)) {
  1076. /* Couldn't grab one; let the others go */
  1077. do {
  1078. kvmppc_release_hwthread(cpu + thr);
  1079. } while (--thr > 0);
  1080. return 0;
  1081. }
  1082. }
  1083. return 1;
  1084. }
  1085. /*
  1086. * Run a set of guest threads on a physical core.
  1087. * Called with vc->lock held.
  1088. */
  1089. static void kvmppc_run_core(struct kvmppc_vcore *vc)
  1090. {
  1091. struct kvm_vcpu *vcpu, *vcpu0, *vnext;
  1092. long ret;
  1093. u64 now;
  1094. int ptid, i, need_vpa_update;
  1095. int srcu_idx;
  1096. struct kvm_vcpu *vcpus_to_update[threads_per_core];
  1097. /* don't start if any threads have a signal pending */
  1098. need_vpa_update = 0;
  1099. list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
  1100. if (signal_pending(vcpu->arch.run_task))
  1101. return;
  1102. if (vcpu->arch.vpa.update_pending ||
  1103. vcpu->arch.slb_shadow.update_pending ||
  1104. vcpu->arch.dtl.update_pending)
  1105. vcpus_to_update[need_vpa_update++] = vcpu;
  1106. }
  1107. /*
  1108. * Initialize *vc, in particular vc->vcore_state, so we can
  1109. * drop the vcore lock if necessary.
  1110. */
  1111. vc->n_woken = 0;
  1112. vc->nap_count = 0;
  1113. vc->entry_exit_count = 0;
  1114. vc->vcore_state = VCORE_STARTING;
  1115. vc->in_guest = 0;
  1116. vc->napping_threads = 0;
  1117. /*
  1118. * Updating any of the vpas requires calling kvmppc_pin_guest_page,
  1119. * which can't be called with any spinlocks held.
  1120. */
  1121. if (need_vpa_update) {
  1122. spin_unlock(&vc->lock);
  1123. for (i = 0; i < need_vpa_update; ++i)
  1124. kvmppc_update_vpas(vcpus_to_update[i]);
  1125. spin_lock(&vc->lock);
  1126. }
  1127. /*
  1128. * Assign physical thread IDs, first to non-ceded vcpus
  1129. * and then to ceded ones.
  1130. */
  1131. ptid = 0;
  1132. vcpu0 = NULL;
  1133. list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
  1134. if (!vcpu->arch.ceded) {
  1135. if (!ptid)
  1136. vcpu0 = vcpu;
  1137. vcpu->arch.ptid = ptid++;
  1138. }
  1139. }
  1140. if (!vcpu0)
  1141. goto out; /* nothing to run; should never happen */
  1142. list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
  1143. if (vcpu->arch.ceded)
  1144. vcpu->arch.ptid = ptid++;
  1145. /*
  1146. * Make sure we are running on thread 0, and that
  1147. * secondary threads are offline.
  1148. */
  1149. if (threads_per_core > 1 && !on_primary_thread()) {
  1150. list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
  1151. vcpu->arch.ret = -EBUSY;
  1152. goto out;
  1153. }
  1154. vc->pcpu = smp_processor_id();
  1155. list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
  1156. kvmppc_start_thread(vcpu);
  1157. kvmppc_create_dtl_entry(vcpu, vc);
  1158. }
  1159. vc->vcore_state = VCORE_RUNNING;
  1160. preempt_disable();
  1161. spin_unlock(&vc->lock);
  1162. kvm_guest_enter();
  1163. srcu_idx = srcu_read_lock(&vcpu0->kvm->srcu);
  1164. __kvmppc_vcore_entry(NULL, vcpu0);
  1165. spin_lock(&vc->lock);
  1166. /* disable sending of IPIs on virtual external irqs */
  1167. list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
  1168. vcpu->cpu = -1;
  1169. /* wait for secondary threads to finish writing their state to memory */
  1170. if (vc->nap_count < vc->n_woken)
  1171. kvmppc_wait_for_nap(vc);
  1172. for (i = 0; i < threads_per_core; ++i)
  1173. kvmppc_release_hwthread(vc->pcpu + i);
  1174. /* prevent other vcpu threads from doing kvmppc_start_thread() now */
  1175. vc->vcore_state = VCORE_EXITING;
  1176. spin_unlock(&vc->lock);
  1177. srcu_read_unlock(&vcpu0->kvm->srcu, srcu_idx);
  1178. /* make sure updates to secondary vcpu structs are visible now */
  1179. smp_mb();
  1180. kvm_guest_exit();
  1181. preempt_enable();
  1182. kvm_resched(vcpu);
  1183. spin_lock(&vc->lock);
  1184. now = get_tb();
  1185. list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
  1186. /* cancel pending dec exception if dec is positive */
  1187. if (now < vcpu->arch.dec_expires &&
  1188. kvmppc_core_pending_dec(vcpu))
  1189. kvmppc_core_dequeue_dec(vcpu);
  1190. ret = RESUME_GUEST;
  1191. if (vcpu->arch.trap)
  1192. ret = kvmppc_handle_exit(vcpu->arch.kvm_run, vcpu,
  1193. vcpu->arch.run_task);
  1194. vcpu->arch.ret = ret;
  1195. vcpu->arch.trap = 0;
  1196. if (vcpu->arch.ceded) {
  1197. if (ret != RESUME_GUEST)
  1198. kvmppc_end_cede(vcpu);
  1199. else
  1200. kvmppc_set_timer(vcpu);
  1201. }
  1202. }
  1203. out:
  1204. vc->vcore_state = VCORE_INACTIVE;
  1205. list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads,
  1206. arch.run_list) {
  1207. if (vcpu->arch.ret != RESUME_GUEST) {
  1208. kvmppc_remove_runnable(vc, vcpu);
  1209. wake_up(&vcpu->arch.cpu_run);
  1210. }
  1211. }
  1212. }
  1213. /*
  1214. * Wait for some other vcpu thread to execute us, and
  1215. * wake us up when we need to handle something in the host.
  1216. */
  1217. static void kvmppc_wait_for_exec(struct kvm_vcpu *vcpu, int wait_state)
  1218. {
  1219. DEFINE_WAIT(wait);
  1220. prepare_to_wait(&vcpu->arch.cpu_run, &wait, wait_state);
  1221. if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE)
  1222. schedule();
  1223. finish_wait(&vcpu->arch.cpu_run, &wait);
  1224. }
  1225. /*
  1226. * All the vcpus in this vcore are idle, so wait for a decrementer
  1227. * or external interrupt to one of the vcpus. vc->lock is held.
  1228. */
  1229. static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
  1230. {
  1231. DEFINE_WAIT(wait);
  1232. prepare_to_wait(&vc->wq, &wait, TASK_INTERRUPTIBLE);
  1233. vc->vcore_state = VCORE_SLEEPING;
  1234. spin_unlock(&vc->lock);
  1235. schedule();
  1236. finish_wait(&vc->wq, &wait);
  1237. spin_lock(&vc->lock);
  1238. vc->vcore_state = VCORE_INACTIVE;
  1239. }
  1240. static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
  1241. {
  1242. int n_ceded;
  1243. struct kvmppc_vcore *vc;
  1244. struct kvm_vcpu *v, *vn;
  1245. kvm_run->exit_reason = 0;
  1246. vcpu->arch.ret = RESUME_GUEST;
  1247. vcpu->arch.trap = 0;
  1248. kvmppc_update_vpas(vcpu);
  1249. /*
  1250. * Synchronize with other threads in this virtual core
  1251. */
  1252. vc = vcpu->arch.vcore;
  1253. spin_lock(&vc->lock);
  1254. vcpu->arch.ceded = 0;
  1255. vcpu->arch.run_task = current;
  1256. vcpu->arch.kvm_run = kvm_run;
  1257. vcpu->arch.stolen_logged = vcore_stolen_time(vc, mftb());
  1258. vcpu->arch.state = KVMPPC_VCPU_RUNNABLE;
  1259. vcpu->arch.busy_preempt = TB_NIL;
  1260. list_add_tail(&vcpu->arch.run_list, &vc->runnable_threads);
  1261. ++vc->n_runnable;
  1262. /*
  1263. * This happens the first time this is called for a vcpu.
  1264. * If the vcore is already running, we may be able to start
  1265. * this thread straight away and have it join in.
  1266. */
  1267. if (!signal_pending(current)) {
  1268. if (vc->vcore_state == VCORE_RUNNING &&
  1269. VCORE_EXIT_COUNT(vc) == 0) {
  1270. vcpu->arch.ptid = vc->n_runnable - 1;
  1271. kvmppc_create_dtl_entry(vcpu, vc);
  1272. kvmppc_start_thread(vcpu);
  1273. } else if (vc->vcore_state == VCORE_SLEEPING) {
  1274. wake_up(&vc->wq);
  1275. }
  1276. }
  1277. while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE &&
  1278. !signal_pending(current)) {
  1279. if (vc->vcore_state != VCORE_INACTIVE) {
  1280. spin_unlock(&vc->lock);
  1281. kvmppc_wait_for_exec(vcpu, TASK_INTERRUPTIBLE);
  1282. spin_lock(&vc->lock);
  1283. continue;
  1284. }
  1285. list_for_each_entry_safe(v, vn, &vc->runnable_threads,
  1286. arch.run_list) {
  1287. kvmppc_core_prepare_to_enter(v);
  1288. if (signal_pending(v->arch.run_task)) {
  1289. kvmppc_remove_runnable(vc, v);
  1290. v->stat.signal_exits++;
  1291. v->arch.kvm_run->exit_reason = KVM_EXIT_INTR;
  1292. v->arch.ret = -EINTR;
  1293. wake_up(&v->arch.cpu_run);
  1294. }
  1295. }
  1296. if (!vc->n_runnable || vcpu->arch.state != KVMPPC_VCPU_RUNNABLE)
  1297. break;
  1298. vc->runner = vcpu;
  1299. n_ceded = 0;
  1300. list_for_each_entry(v, &vc->runnable_threads, arch.run_list) {
  1301. if (!v->arch.pending_exceptions)
  1302. n_ceded += v->arch.ceded;
  1303. else
  1304. v->arch.ceded = 0;
  1305. }
  1306. if (n_ceded == vc->n_runnable)
  1307. kvmppc_vcore_blocked(vc);
  1308. else
  1309. kvmppc_run_core(vc);
  1310. vc->runner = NULL;
  1311. }
  1312. while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE &&
  1313. (vc->vcore_state == VCORE_RUNNING ||
  1314. vc->vcore_state == VCORE_EXITING)) {
  1315. spin_unlock(&vc->lock);
  1316. kvmppc_wait_for_exec(vcpu, TASK_UNINTERRUPTIBLE);
  1317. spin_lock(&vc->lock);
  1318. }
  1319. if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) {
  1320. kvmppc_remove_runnable(vc, vcpu);
  1321. vcpu->stat.signal_exits++;
  1322. kvm_run->exit_reason = KVM_EXIT_INTR;
  1323. vcpu->arch.ret = -EINTR;
  1324. }
  1325. if (vc->n_runnable && vc->vcore_state == VCORE_INACTIVE) {
  1326. /* Wake up some vcpu to run the core */
  1327. v = list_first_entry(&vc->runnable_threads,
  1328. struct kvm_vcpu, arch.run_list);
  1329. wake_up(&v->arch.cpu_run);
  1330. }
  1331. spin_unlock(&vc->lock);
  1332. return vcpu->arch.ret;
  1333. }
  1334. int kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
  1335. {
  1336. int r;
  1337. int srcu_idx;
  1338. if (!vcpu->arch.sane) {
  1339. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  1340. return -EINVAL;
  1341. }
  1342. kvmppc_core_prepare_to_enter(vcpu);
  1343. /* No need to go into the guest when all we'll do is come back out */
  1344. if (signal_pending(current)) {
  1345. run->exit_reason = KVM_EXIT_INTR;
  1346. return -EINTR;
  1347. }
  1348. atomic_inc(&vcpu->kvm->arch.vcpus_running);
  1349. /* Order vcpus_running vs. rma_setup_done, see kvmppc_alloc_reset_hpt */
  1350. smp_mb();
  1351. /* On the first time here, set up HTAB and VRMA or RMA */
  1352. if (!vcpu->kvm->arch.rma_setup_done) {
  1353. r = kvmppc_hv_setup_htab_rma(vcpu);
  1354. if (r)
  1355. goto out;
  1356. }
  1357. flush_fp_to_thread(current);
  1358. flush_altivec_to_thread(current);
  1359. flush_vsx_to_thread(current);
  1360. vcpu->arch.wqp = &vcpu->arch.vcore->wq;
  1361. vcpu->arch.pgdir = current->mm->pgd;
  1362. vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
  1363. do {
  1364. r = kvmppc_run_vcpu(run, vcpu);
  1365. if (run->exit_reason == KVM_EXIT_PAPR_HCALL &&
  1366. !(vcpu->arch.shregs.msr & MSR_PR)) {
  1367. r = kvmppc_pseries_do_hcall(vcpu);
  1368. kvmppc_core_prepare_to_enter(vcpu);
  1369. } else if (r == RESUME_PAGE_FAULT) {
  1370. srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
  1371. r = kvmppc_book3s_hv_page_fault(run, vcpu,
  1372. vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
  1373. srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
  1374. }
  1375. } while (r == RESUME_GUEST);
  1376. out:
  1377. vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
  1378. atomic_dec(&vcpu->kvm->arch.vcpus_running);
  1379. return r;
  1380. }
  1381. /* Work out RMLS (real mode limit selector) field value for a given RMA size.
  1382. Assumes POWER7 or PPC970. */
  1383. static inline int lpcr_rmls(unsigned long rma_size)
  1384. {
  1385. switch (rma_size) {
  1386. case 32ul << 20: /* 32 MB */
  1387. if (cpu_has_feature(CPU_FTR_ARCH_206))
  1388. return 8; /* only supported on POWER7 */
  1389. return -1;
  1390. case 64ul << 20: /* 64 MB */
  1391. return 3;
  1392. case 128ul << 20: /* 128 MB */
  1393. return 7;
  1394. case 256ul << 20: /* 256 MB */
  1395. return 4;
  1396. case 1ul << 30: /* 1 GB */
  1397. return 2;
  1398. case 16ul << 30: /* 16 GB */
  1399. return 1;
  1400. case 256ul << 30: /* 256 GB */
  1401. return 0;
  1402. default:
  1403. return -1;
  1404. }
  1405. }
  1406. static int kvm_rma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  1407. {
  1408. struct page *page;
  1409. struct kvm_rma_info *ri = vma->vm_file->private_data;
  1410. if (vmf->pgoff >= kvm_rma_pages)
  1411. return VM_FAULT_SIGBUS;
  1412. page = pfn_to_page(ri->base_pfn + vmf->pgoff);
  1413. get_page(page);
  1414. vmf->page = page;
  1415. return 0;
  1416. }
  1417. static const struct vm_operations_struct kvm_rma_vm_ops = {
  1418. .fault = kvm_rma_fault,
  1419. };
  1420. static int kvm_rma_mmap(struct file *file, struct vm_area_struct *vma)
  1421. {
  1422. vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
  1423. vma->vm_ops = &kvm_rma_vm_ops;
  1424. return 0;
  1425. }
  1426. static int kvm_rma_release(struct inode *inode, struct file *filp)
  1427. {
  1428. struct kvm_rma_info *ri = filp->private_data;
  1429. kvm_release_rma(ri);
  1430. return 0;
  1431. }
  1432. static const struct file_operations kvm_rma_fops = {
  1433. .mmap = kvm_rma_mmap,
  1434. .release = kvm_rma_release,
  1435. };
  1436. long kvm_vm_ioctl_allocate_rma(struct kvm *kvm, struct kvm_allocate_rma *ret)
  1437. {
  1438. long fd;
  1439. struct kvm_rma_info *ri;
  1440. /*
  1441. * Only do this on PPC970 in HV mode
  1442. */
  1443. if (!cpu_has_feature(CPU_FTR_HVMODE) ||
  1444. !cpu_has_feature(CPU_FTR_ARCH_201))
  1445. return -EINVAL;
  1446. if (!kvm_rma_pages)
  1447. return -EINVAL;
  1448. ri = kvm_alloc_rma();
  1449. if (!ri)
  1450. return -ENOMEM;
  1451. fd = anon_inode_getfd("kvm-rma", &kvm_rma_fops, ri, O_RDWR | O_CLOEXEC);
  1452. if (fd < 0)
  1453. kvm_release_rma(ri);
  1454. ret->rma_size = kvm_rma_pages << PAGE_SHIFT;
  1455. return fd;
  1456. }
  1457. static void kvmppc_add_seg_page_size(struct kvm_ppc_one_seg_page_size **sps,
  1458. int linux_psize)
  1459. {
  1460. struct mmu_psize_def *def = &mmu_psize_defs[linux_psize];
  1461. if (!def->shift)
  1462. return;
  1463. (*sps)->page_shift = def->shift;
  1464. (*sps)->slb_enc = def->sllp;
  1465. (*sps)->enc[0].page_shift = def->shift;
  1466. /*
  1467. * Only return base page encoding. We don't want to return
  1468. * all the supporting pte_enc, because our H_ENTER doesn't
  1469. * support MPSS yet. Once they do, we can start passing all
  1470. * support pte_enc here
  1471. */
  1472. (*sps)->enc[0].pte_enc = def->penc[linux_psize];
  1473. (*sps)++;
  1474. }
  1475. int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm, struct kvm_ppc_smmu_info *info)
  1476. {
  1477. struct kvm_ppc_one_seg_page_size *sps;
  1478. info->flags = KVM_PPC_PAGE_SIZES_REAL;
  1479. if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
  1480. info->flags |= KVM_PPC_1T_SEGMENTS;
  1481. info->slb_size = mmu_slb_size;
  1482. /* We only support these sizes for now, and no muti-size segments */
  1483. sps = &info->sps[0];
  1484. kvmppc_add_seg_page_size(&sps, MMU_PAGE_4K);
  1485. kvmppc_add_seg_page_size(&sps, MMU_PAGE_64K);
  1486. kvmppc_add_seg_page_size(&sps, MMU_PAGE_16M);
  1487. return 0;
  1488. }
  1489. /*
  1490. * Get (and clear) the dirty memory log for a memory slot.
  1491. */
  1492. int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
  1493. {
  1494. struct kvm_memory_slot *memslot;
  1495. int r;
  1496. unsigned long n;
  1497. mutex_lock(&kvm->slots_lock);
  1498. r = -EINVAL;
  1499. if (log->slot >= KVM_USER_MEM_SLOTS)
  1500. goto out;
  1501. memslot = id_to_memslot(kvm->memslots, log->slot);
  1502. r = -ENOENT;
  1503. if (!memslot->dirty_bitmap)
  1504. goto out;
  1505. n = kvm_dirty_bitmap_bytes(memslot);
  1506. memset(memslot->dirty_bitmap, 0, n);
  1507. r = kvmppc_hv_get_dirty_log(kvm, memslot, memslot->dirty_bitmap);
  1508. if (r)
  1509. goto out;
  1510. r = -EFAULT;
  1511. if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
  1512. goto out;
  1513. r = 0;
  1514. out:
  1515. mutex_unlock(&kvm->slots_lock);
  1516. return r;
  1517. }
  1518. static void unpin_slot(struct kvm_memory_slot *memslot)
  1519. {
  1520. unsigned long *physp;
  1521. unsigned long j, npages, pfn;
  1522. struct page *page;
  1523. physp = memslot->arch.slot_phys;
  1524. npages = memslot->npages;
  1525. if (!physp)
  1526. return;
  1527. for (j = 0; j < npages; j++) {
  1528. if (!(physp[j] & KVMPPC_GOT_PAGE))
  1529. continue;
  1530. pfn = physp[j] >> PAGE_SHIFT;
  1531. page = pfn_to_page(pfn);
  1532. SetPageDirty(page);
  1533. put_page(page);
  1534. }
  1535. }
  1536. void kvmppc_core_free_memslot(struct kvm_memory_slot *free,
  1537. struct kvm_memory_slot *dont)
  1538. {
  1539. if (!dont || free->arch.rmap != dont->arch.rmap) {
  1540. vfree(free->arch.rmap);
  1541. free->arch.rmap = NULL;
  1542. }
  1543. if (!dont || free->arch.slot_phys != dont->arch.slot_phys) {
  1544. unpin_slot(free);
  1545. vfree(free->arch.slot_phys);
  1546. free->arch.slot_phys = NULL;
  1547. }
  1548. }
  1549. int kvmppc_core_create_memslot(struct kvm_memory_slot *slot,
  1550. unsigned long npages)
  1551. {
  1552. slot->arch.rmap = vzalloc(npages * sizeof(*slot->arch.rmap));
  1553. if (!slot->arch.rmap)
  1554. return -ENOMEM;
  1555. slot->arch.slot_phys = NULL;
  1556. return 0;
  1557. }
  1558. int kvmppc_core_prepare_memory_region(struct kvm *kvm,
  1559. struct kvm_memory_slot *memslot,
  1560. struct kvm_userspace_memory_region *mem)
  1561. {
  1562. unsigned long *phys;
  1563. /* Allocate a slot_phys array if needed */
  1564. phys = memslot->arch.slot_phys;
  1565. if (!kvm->arch.using_mmu_notifiers && !phys && memslot->npages) {
  1566. phys = vzalloc(memslot->npages * sizeof(unsigned long));
  1567. if (!phys)
  1568. return -ENOMEM;
  1569. memslot->arch.slot_phys = phys;
  1570. }
  1571. return 0;
  1572. }
  1573. void kvmppc_core_commit_memory_region(struct kvm *kvm,
  1574. struct kvm_userspace_memory_region *mem,
  1575. const struct kvm_memory_slot *old)
  1576. {
  1577. unsigned long npages = mem->memory_size >> PAGE_SHIFT;
  1578. struct kvm_memory_slot *memslot;
  1579. if (npages && old->npages) {
  1580. /*
  1581. * If modifying a memslot, reset all the rmap dirty bits.
  1582. * If this is a new memslot, we don't need to do anything
  1583. * since the rmap array starts out as all zeroes,
  1584. * i.e. no pages are dirty.
  1585. */
  1586. memslot = id_to_memslot(kvm->memslots, mem->slot);
  1587. kvmppc_hv_get_dirty_log(kvm, memslot, NULL);
  1588. }
  1589. }
  1590. /*
  1591. * Update LPCR values in kvm->arch and in vcores.
  1592. * Caller must hold kvm->lock.
  1593. */
  1594. void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr, unsigned long mask)
  1595. {
  1596. long int i;
  1597. u32 cores_done = 0;
  1598. if ((kvm->arch.lpcr & mask) == lpcr)
  1599. return;
  1600. kvm->arch.lpcr = (kvm->arch.lpcr & ~mask) | lpcr;
  1601. for (i = 0; i < KVM_MAX_VCORES; ++i) {
  1602. struct kvmppc_vcore *vc = kvm->arch.vcores[i];
  1603. if (!vc)
  1604. continue;
  1605. spin_lock(&vc->lock);
  1606. vc->lpcr = (vc->lpcr & ~mask) | lpcr;
  1607. spin_unlock(&vc->lock);
  1608. if (++cores_done >= kvm->arch.online_vcores)
  1609. break;
  1610. }
  1611. }
  1612. static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
  1613. {
  1614. int err = 0;
  1615. struct kvm *kvm = vcpu->kvm;
  1616. struct kvm_rma_info *ri = NULL;
  1617. unsigned long hva;
  1618. struct kvm_memory_slot *memslot;
  1619. struct vm_area_struct *vma;
  1620. unsigned long lpcr = 0, senc;
  1621. unsigned long lpcr_mask = 0;
  1622. unsigned long psize, porder;
  1623. unsigned long rma_size;
  1624. unsigned long rmls;
  1625. unsigned long *physp;
  1626. unsigned long i, npages;
  1627. int srcu_idx;
  1628. mutex_lock(&kvm->lock);
  1629. if (kvm->arch.rma_setup_done)
  1630. goto out; /* another vcpu beat us to it */
  1631. /* Allocate hashed page table (if not done already) and reset it */
  1632. if (!kvm->arch.hpt_virt) {
  1633. err = kvmppc_alloc_hpt(kvm, NULL);
  1634. if (err) {
  1635. pr_err("KVM: Couldn't alloc HPT\n");
  1636. goto out;
  1637. }
  1638. }
  1639. /* Look up the memslot for guest physical address 0 */
  1640. srcu_idx = srcu_read_lock(&kvm->srcu);
  1641. memslot = gfn_to_memslot(kvm, 0);
  1642. /* We must have some memory at 0 by now */
  1643. err = -EINVAL;
  1644. if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
  1645. goto out_srcu;
  1646. /* Look up the VMA for the start of this memory slot */
  1647. hva = memslot->userspace_addr;
  1648. down_read(&current->mm->mmap_sem);
  1649. vma = find_vma(current->mm, hva);
  1650. if (!vma || vma->vm_start > hva || (vma->vm_flags & VM_IO))
  1651. goto up_out;
  1652. psize = vma_kernel_pagesize(vma);
  1653. porder = __ilog2(psize);
  1654. /* Is this one of our preallocated RMAs? */
  1655. if (vma->vm_file && vma->vm_file->f_op == &kvm_rma_fops &&
  1656. hva == vma->vm_start)
  1657. ri = vma->vm_file->private_data;
  1658. up_read(&current->mm->mmap_sem);
  1659. if (!ri) {
  1660. /* On POWER7, use VRMA; on PPC970, give up */
  1661. err = -EPERM;
  1662. if (cpu_has_feature(CPU_FTR_ARCH_201)) {
  1663. pr_err("KVM: CPU requires an RMO\n");
  1664. goto out_srcu;
  1665. }
  1666. /* We can handle 4k, 64k or 16M pages in the VRMA */
  1667. err = -EINVAL;
  1668. if (!(psize == 0x1000 || psize == 0x10000 ||
  1669. psize == 0x1000000))
  1670. goto out_srcu;
  1671. /* Update VRMASD field in the LPCR */
  1672. senc = slb_pgsize_encoding(psize);
  1673. kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T |
  1674. (VRMA_VSID << SLB_VSID_SHIFT_1T);
  1675. lpcr_mask = LPCR_VRMASD;
  1676. /* the -4 is to account for senc values starting at 0x10 */
  1677. lpcr = senc << (LPCR_VRMASD_SH - 4);
  1678. /* Create HPTEs in the hash page table for the VRMA */
  1679. kvmppc_map_vrma(vcpu, memslot, porder);
  1680. } else {
  1681. /* Set up to use an RMO region */
  1682. rma_size = kvm_rma_pages;
  1683. if (rma_size > memslot->npages)
  1684. rma_size = memslot->npages;
  1685. rma_size <<= PAGE_SHIFT;
  1686. rmls = lpcr_rmls(rma_size);
  1687. err = -EINVAL;
  1688. if ((long)rmls < 0) {
  1689. pr_err("KVM: Can't use RMA of 0x%lx bytes\n", rma_size);
  1690. goto out_srcu;
  1691. }
  1692. atomic_inc(&ri->use_count);
  1693. kvm->arch.rma = ri;
  1694. /* Update LPCR and RMOR */
  1695. if (cpu_has_feature(CPU_FTR_ARCH_201)) {
  1696. /* PPC970; insert RMLS value (split field) in HID4 */
  1697. lpcr_mask = (1ul << HID4_RMLS0_SH) |
  1698. (3ul << HID4_RMLS2_SH) | HID4_RMOR;
  1699. lpcr = ((rmls >> 2) << HID4_RMLS0_SH) |
  1700. ((rmls & 3) << HID4_RMLS2_SH);
  1701. /* RMOR is also in HID4 */
  1702. lpcr |= ((ri->base_pfn >> (26 - PAGE_SHIFT)) & 0xffff)
  1703. << HID4_RMOR_SH;
  1704. } else {
  1705. /* POWER7 */
  1706. lpcr_mask = LPCR_VPM0 | LPCR_VRMA_L | LPCR_RMLS;
  1707. lpcr = rmls << LPCR_RMLS_SH;
  1708. kvm->arch.rmor = ri->base_pfn << PAGE_SHIFT;
  1709. }
  1710. pr_info("KVM: Using RMO at %lx size %lx (LPCR = %lx)\n",
  1711. ri->base_pfn << PAGE_SHIFT, rma_size, lpcr);
  1712. /* Initialize phys addrs of pages in RMO */
  1713. npages = kvm_rma_pages;
  1714. porder = __ilog2(npages);
  1715. physp = memslot->arch.slot_phys;
  1716. if (physp) {
  1717. if (npages > memslot->npages)
  1718. npages = memslot->npages;
  1719. spin_lock(&kvm->arch.slot_phys_lock);
  1720. for (i = 0; i < npages; ++i)
  1721. physp[i] = ((ri->base_pfn + i) << PAGE_SHIFT) +
  1722. porder;
  1723. spin_unlock(&kvm->arch.slot_phys_lock);
  1724. }
  1725. }
  1726. kvmppc_update_lpcr(kvm, lpcr, lpcr_mask);
  1727. /* Order updates to kvm->arch.lpcr etc. vs. rma_setup_done */
  1728. smp_wmb();
  1729. kvm->arch.rma_setup_done = 1;
  1730. err = 0;
  1731. out_srcu:
  1732. srcu_read_unlock(&kvm->srcu, srcu_idx);
  1733. out:
  1734. mutex_unlock(&kvm->lock);
  1735. return err;
  1736. up_out:
  1737. up_read(&current->mm->mmap_sem);
  1738. goto out_srcu;
  1739. }
  1740. int kvmppc_core_init_vm(struct kvm *kvm)
  1741. {
  1742. unsigned long lpcr, lpid;
  1743. /* Allocate the guest's logical partition ID */
  1744. lpid = kvmppc_alloc_lpid();
  1745. if ((long)lpid < 0)
  1746. return -ENOMEM;
  1747. kvm->arch.lpid = lpid;
  1748. /*
  1749. * Since we don't flush the TLB when tearing down a VM,
  1750. * and this lpid might have previously been used,
  1751. * make sure we flush on each core before running the new VM.
  1752. */
  1753. cpumask_setall(&kvm->arch.need_tlb_flush);
  1754. INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables);
  1755. INIT_LIST_HEAD(&kvm->arch.rtas_tokens);
  1756. kvm->arch.rma = NULL;
  1757. kvm->arch.host_sdr1 = mfspr(SPRN_SDR1);
  1758. if (cpu_has_feature(CPU_FTR_ARCH_201)) {
  1759. /* PPC970; HID4 is effectively the LPCR */
  1760. kvm->arch.host_lpid = 0;
  1761. kvm->arch.host_lpcr = lpcr = mfspr(SPRN_HID4);
  1762. lpcr &= ~((3 << HID4_LPID1_SH) | (0xful << HID4_LPID5_SH));
  1763. lpcr |= ((lpid >> 4) << HID4_LPID1_SH) |
  1764. ((lpid & 0xf) << HID4_LPID5_SH);
  1765. } else {
  1766. /* POWER7; init LPCR for virtual RMA mode */
  1767. kvm->arch.host_lpid = mfspr(SPRN_LPID);
  1768. kvm->arch.host_lpcr = lpcr = mfspr(SPRN_LPCR);
  1769. lpcr &= LPCR_PECE | LPCR_LPES;
  1770. lpcr |= (4UL << LPCR_DPFD_SH) | LPCR_HDICE |
  1771. LPCR_VPM0 | LPCR_VPM1;
  1772. kvm->arch.vrma_slb_v = SLB_VSID_B_1T |
  1773. (VRMA_VSID << SLB_VSID_SHIFT_1T);
  1774. }
  1775. kvm->arch.lpcr = lpcr;
  1776. kvm->arch.using_mmu_notifiers = !!cpu_has_feature(CPU_FTR_ARCH_206);
  1777. spin_lock_init(&kvm->arch.slot_phys_lock);
  1778. /*
  1779. * Don't allow secondary CPU threads to come online
  1780. * while any KVM VMs exist.
  1781. */
  1782. inhibit_secondary_onlining();
  1783. return 0;
  1784. }
  1785. void kvmppc_core_destroy_vm(struct kvm *kvm)
  1786. {
  1787. uninhibit_secondary_onlining();
  1788. if (kvm->arch.rma) {
  1789. kvm_release_rma(kvm->arch.rma);
  1790. kvm->arch.rma = NULL;
  1791. }
  1792. kvmppc_rtas_tokens_free(kvm);
  1793. kvmppc_free_hpt(kvm);
  1794. WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
  1795. }
  1796. /* These are stubs for now */
  1797. void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
  1798. {
  1799. }
  1800. /* We don't need to emulate any privileged instructions or dcbz */
  1801. int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
  1802. unsigned int inst, int *advance)
  1803. {
  1804. return EMULATE_FAIL;
  1805. }
  1806. int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
  1807. {
  1808. return EMULATE_FAIL;
  1809. }
  1810. int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
  1811. {
  1812. return EMULATE_FAIL;
  1813. }
  1814. static int kvmppc_book3s_hv_init(void)
  1815. {
  1816. int r;
  1817. r = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
  1818. if (r)
  1819. return r;
  1820. r = kvmppc_mmu_hv_init();
  1821. return r;
  1822. }
  1823. static void kvmppc_book3s_hv_exit(void)
  1824. {
  1825. kvm_exit();
  1826. }
  1827. module_init(kvmppc_book3s_hv_init);
  1828. module_exit(kvmppc_book3s_hv_exit);