booke.c 36 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366
  1. /*
  2. * This program is free software; you can redistribute it and/or modify
  3. * it under the terms of the GNU General Public License, version 2, as
  4. * published by the Free Software Foundation.
  5. *
  6. * This program is distributed in the hope that it will be useful,
  7. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  8. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  9. * GNU General Public License for more details.
  10. *
  11. * You should have received a copy of the GNU General Public License
  12. * along with this program; if not, write to the Free Software
  13. * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  14. *
  15. * Copyright IBM Corp. 2007
  16. * Copyright 2010-2011 Freescale Semiconductor, Inc.
  17. *
  18. * Authors: Hollis Blanchard <hollisb@us.ibm.com>
  19. * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
  20. * Scott Wood <scottwood@freescale.com>
  21. * Varun Sethi <varun.sethi@freescale.com>
  22. */
  23. #include <linux/errno.h>
  24. #include <linux/err.h>
  25. #include <linux/kvm_host.h>
  26. #include <linux/gfp.h>
  27. #include <linux/module.h>
  28. #include <linux/vmalloc.h>
  29. #include <linux/fs.h>
  30. #include <asm/cputable.h>
  31. #include <asm/uaccess.h>
  32. #include <asm/kvm_ppc.h>
  33. #include <asm/cacheflush.h>
  34. #include <asm/dbell.h>
  35. #include <asm/hw_irq.h>
  36. #include <asm/irq.h>
  37. #include "timing.h"
  38. #include "booke.h"
  39. #include "trace.h"
  40. unsigned long kvmppc_booke_handlers;
  41. #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
  42. #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
  43. struct kvm_stats_debugfs_item debugfs_entries[] = {
  44. { "mmio", VCPU_STAT(mmio_exits) },
  45. { "dcr", VCPU_STAT(dcr_exits) },
  46. { "sig", VCPU_STAT(signal_exits) },
  47. { "itlb_r", VCPU_STAT(itlb_real_miss_exits) },
  48. { "itlb_v", VCPU_STAT(itlb_virt_miss_exits) },
  49. { "dtlb_r", VCPU_STAT(dtlb_real_miss_exits) },
  50. { "dtlb_v", VCPU_STAT(dtlb_virt_miss_exits) },
  51. { "sysc", VCPU_STAT(syscall_exits) },
  52. { "isi", VCPU_STAT(isi_exits) },
  53. { "dsi", VCPU_STAT(dsi_exits) },
  54. { "inst_emu", VCPU_STAT(emulated_inst_exits) },
  55. { "dec", VCPU_STAT(dec_exits) },
  56. { "ext_intr", VCPU_STAT(ext_intr_exits) },
  57. { "halt_wakeup", VCPU_STAT(halt_wakeup) },
  58. { "doorbell", VCPU_STAT(dbell_exits) },
  59. { "guest doorbell", VCPU_STAT(gdbell_exits) },
  60. { "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
  61. { NULL }
  62. };
  63. /* TODO: use vcpu_printf() */
  64. void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu)
  65. {
  66. int i;
  67. printk("pc: %08lx msr: %08llx\n", vcpu->arch.pc, vcpu->arch.shared->msr);
  68. printk("lr: %08lx ctr: %08lx\n", vcpu->arch.lr, vcpu->arch.ctr);
  69. printk("srr0: %08llx srr1: %08llx\n", vcpu->arch.shared->srr0,
  70. vcpu->arch.shared->srr1);
  71. printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions);
  72. for (i = 0; i < 32; i += 4) {
  73. printk("gpr%02d: %08lx %08lx %08lx %08lx\n", i,
  74. kvmppc_get_gpr(vcpu, i),
  75. kvmppc_get_gpr(vcpu, i+1),
  76. kvmppc_get_gpr(vcpu, i+2),
  77. kvmppc_get_gpr(vcpu, i+3));
  78. }
  79. }
  80. #ifdef CONFIG_SPE
  81. void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu)
  82. {
  83. preempt_disable();
  84. enable_kernel_spe();
  85. kvmppc_save_guest_spe(vcpu);
  86. vcpu->arch.shadow_msr &= ~MSR_SPE;
  87. preempt_enable();
  88. }
  89. static void kvmppc_vcpu_enable_spe(struct kvm_vcpu *vcpu)
  90. {
  91. preempt_disable();
  92. enable_kernel_spe();
  93. kvmppc_load_guest_spe(vcpu);
  94. vcpu->arch.shadow_msr |= MSR_SPE;
  95. preempt_enable();
  96. }
  97. static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
  98. {
  99. if (vcpu->arch.shared->msr & MSR_SPE) {
  100. if (!(vcpu->arch.shadow_msr & MSR_SPE))
  101. kvmppc_vcpu_enable_spe(vcpu);
  102. } else if (vcpu->arch.shadow_msr & MSR_SPE) {
  103. kvmppc_vcpu_disable_spe(vcpu);
  104. }
  105. }
  106. #else
  107. static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
  108. {
  109. }
  110. #endif
  111. /*
  112. * Helper function for "full" MSR writes. No need to call this if only
  113. * EE/CE/ME/DE/RI are changing.
  114. */
  115. void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
  116. {
  117. u32 old_msr = vcpu->arch.shared->msr;
  118. #ifdef CONFIG_KVM_BOOKE_HV
  119. new_msr |= MSR_GS;
  120. #endif
  121. vcpu->arch.shared->msr = new_msr;
  122. kvmppc_mmu_msr_notify(vcpu, old_msr);
  123. kvmppc_vcpu_sync_spe(vcpu);
  124. }
  125. static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu,
  126. unsigned int priority)
  127. {
  128. set_bit(priority, &vcpu->arch.pending_exceptions);
  129. }
  130. static void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu,
  131. ulong dear_flags, ulong esr_flags)
  132. {
  133. vcpu->arch.queued_dear = dear_flags;
  134. vcpu->arch.queued_esr = esr_flags;
  135. kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS);
  136. }
  137. static void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
  138. ulong dear_flags, ulong esr_flags)
  139. {
  140. vcpu->arch.queued_dear = dear_flags;
  141. vcpu->arch.queued_esr = esr_flags;
  142. kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE);
  143. }
  144. static void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu,
  145. ulong esr_flags)
  146. {
  147. vcpu->arch.queued_esr = esr_flags;
  148. kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE);
  149. }
  150. void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong esr_flags)
  151. {
  152. vcpu->arch.queued_esr = esr_flags;
  153. kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM);
  154. }
  155. void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
  156. {
  157. kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DECREMENTER);
  158. }
  159. int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
  160. {
  161. return test_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
  162. }
  163. void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
  164. {
  165. clear_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
  166. }
  167. void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
  168. struct kvm_interrupt *irq)
  169. {
  170. unsigned int prio = BOOKE_IRQPRIO_EXTERNAL;
  171. if (irq->irq == KVM_INTERRUPT_SET_LEVEL)
  172. prio = BOOKE_IRQPRIO_EXTERNAL_LEVEL;
  173. kvmppc_booke_queue_irqprio(vcpu, prio);
  174. }
  175. void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu,
  176. struct kvm_interrupt *irq)
  177. {
  178. clear_bit(BOOKE_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions);
  179. clear_bit(BOOKE_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions);
  180. }
  181. static void set_guest_srr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
  182. {
  183. #ifdef CONFIG_KVM_BOOKE_HV
  184. mtspr(SPRN_GSRR0, srr0);
  185. mtspr(SPRN_GSRR1, srr1);
  186. #else
  187. vcpu->arch.shared->srr0 = srr0;
  188. vcpu->arch.shared->srr1 = srr1;
  189. #endif
  190. }
  191. static void set_guest_csrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
  192. {
  193. vcpu->arch.csrr0 = srr0;
  194. vcpu->arch.csrr1 = srr1;
  195. }
  196. static void set_guest_dsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
  197. {
  198. if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC)) {
  199. vcpu->arch.dsrr0 = srr0;
  200. vcpu->arch.dsrr1 = srr1;
  201. } else {
  202. set_guest_csrr(vcpu, srr0, srr1);
  203. }
  204. }
  205. static void set_guest_mcsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
  206. {
  207. vcpu->arch.mcsrr0 = srr0;
  208. vcpu->arch.mcsrr1 = srr1;
  209. }
  210. static unsigned long get_guest_dear(struct kvm_vcpu *vcpu)
  211. {
  212. #ifdef CONFIG_KVM_BOOKE_HV
  213. return mfspr(SPRN_GDEAR);
  214. #else
  215. return vcpu->arch.shared->dar;
  216. #endif
  217. }
  218. static void set_guest_dear(struct kvm_vcpu *vcpu, unsigned long dear)
  219. {
  220. #ifdef CONFIG_KVM_BOOKE_HV
  221. mtspr(SPRN_GDEAR, dear);
  222. #else
  223. vcpu->arch.shared->dar = dear;
  224. #endif
  225. }
  226. static unsigned long get_guest_esr(struct kvm_vcpu *vcpu)
  227. {
  228. #ifdef CONFIG_KVM_BOOKE_HV
  229. return mfspr(SPRN_GESR);
  230. #else
  231. return vcpu->arch.shared->esr;
  232. #endif
  233. }
  234. static void set_guest_esr(struct kvm_vcpu *vcpu, u32 esr)
  235. {
  236. #ifdef CONFIG_KVM_BOOKE_HV
  237. mtspr(SPRN_GESR, esr);
  238. #else
  239. vcpu->arch.shared->esr = esr;
  240. #endif
  241. }
  242. /* Deliver the interrupt of the corresponding priority, if possible. */
  243. static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
  244. unsigned int priority)
  245. {
  246. int allowed = 0;
  247. ulong msr_mask = 0;
  248. bool update_esr = false, update_dear = false;
  249. ulong crit_raw = vcpu->arch.shared->critical;
  250. ulong crit_r1 = kvmppc_get_gpr(vcpu, 1);
  251. bool crit;
  252. bool keep_irq = false;
  253. enum int_class int_class;
  254. /* Truncate crit indicators in 32 bit mode */
  255. if (!(vcpu->arch.shared->msr & MSR_SF)) {
  256. crit_raw &= 0xffffffff;
  257. crit_r1 &= 0xffffffff;
  258. }
  259. /* Critical section when crit == r1 */
  260. crit = (crit_raw == crit_r1);
  261. /* ... and we're in supervisor mode */
  262. crit = crit && !(vcpu->arch.shared->msr & MSR_PR);
  263. if (priority == BOOKE_IRQPRIO_EXTERNAL_LEVEL) {
  264. priority = BOOKE_IRQPRIO_EXTERNAL;
  265. keep_irq = true;
  266. }
  267. switch (priority) {
  268. case BOOKE_IRQPRIO_DTLB_MISS:
  269. case BOOKE_IRQPRIO_DATA_STORAGE:
  270. update_dear = true;
  271. /* fall through */
  272. case BOOKE_IRQPRIO_INST_STORAGE:
  273. case BOOKE_IRQPRIO_PROGRAM:
  274. update_esr = true;
  275. /* fall through */
  276. case BOOKE_IRQPRIO_ITLB_MISS:
  277. case BOOKE_IRQPRIO_SYSCALL:
  278. case BOOKE_IRQPRIO_FP_UNAVAIL:
  279. case BOOKE_IRQPRIO_SPE_UNAVAIL:
  280. case BOOKE_IRQPRIO_SPE_FP_DATA:
  281. case BOOKE_IRQPRIO_SPE_FP_ROUND:
  282. case BOOKE_IRQPRIO_AP_UNAVAIL:
  283. case BOOKE_IRQPRIO_ALIGNMENT:
  284. allowed = 1;
  285. msr_mask = MSR_CE | MSR_ME | MSR_DE;
  286. int_class = INT_CLASS_NONCRIT;
  287. break;
  288. case BOOKE_IRQPRIO_CRITICAL:
  289. case BOOKE_IRQPRIO_DBELL_CRIT:
  290. allowed = vcpu->arch.shared->msr & MSR_CE;
  291. allowed = allowed && !crit;
  292. msr_mask = MSR_ME;
  293. int_class = INT_CLASS_CRIT;
  294. break;
  295. case BOOKE_IRQPRIO_MACHINE_CHECK:
  296. allowed = vcpu->arch.shared->msr & MSR_ME;
  297. allowed = allowed && !crit;
  298. int_class = INT_CLASS_MC;
  299. break;
  300. case BOOKE_IRQPRIO_DECREMENTER:
  301. case BOOKE_IRQPRIO_FIT:
  302. keep_irq = true;
  303. /* fall through */
  304. case BOOKE_IRQPRIO_EXTERNAL:
  305. case BOOKE_IRQPRIO_DBELL:
  306. allowed = vcpu->arch.shared->msr & MSR_EE;
  307. allowed = allowed && !crit;
  308. msr_mask = MSR_CE | MSR_ME | MSR_DE;
  309. int_class = INT_CLASS_NONCRIT;
  310. break;
  311. case BOOKE_IRQPRIO_DEBUG:
  312. allowed = vcpu->arch.shared->msr & MSR_DE;
  313. allowed = allowed && !crit;
  314. msr_mask = MSR_ME;
  315. int_class = INT_CLASS_CRIT;
  316. break;
  317. }
  318. if (allowed) {
  319. switch (int_class) {
  320. case INT_CLASS_NONCRIT:
  321. set_guest_srr(vcpu, vcpu->arch.pc,
  322. vcpu->arch.shared->msr);
  323. break;
  324. case INT_CLASS_CRIT:
  325. set_guest_csrr(vcpu, vcpu->arch.pc,
  326. vcpu->arch.shared->msr);
  327. break;
  328. case INT_CLASS_DBG:
  329. set_guest_dsrr(vcpu, vcpu->arch.pc,
  330. vcpu->arch.shared->msr);
  331. break;
  332. case INT_CLASS_MC:
  333. set_guest_mcsrr(vcpu, vcpu->arch.pc,
  334. vcpu->arch.shared->msr);
  335. break;
  336. }
  337. vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority];
  338. if (update_esr == true)
  339. set_guest_esr(vcpu, vcpu->arch.queued_esr);
  340. if (update_dear == true)
  341. set_guest_dear(vcpu, vcpu->arch.queued_dear);
  342. kvmppc_set_msr(vcpu, vcpu->arch.shared->msr & msr_mask);
  343. if (!keep_irq)
  344. clear_bit(priority, &vcpu->arch.pending_exceptions);
  345. }
  346. #ifdef CONFIG_KVM_BOOKE_HV
  347. /*
  348. * If an interrupt is pending but masked, raise a guest doorbell
  349. * so that we are notified when the guest enables the relevant
  350. * MSR bit.
  351. */
  352. if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_EE)
  353. kvmppc_set_pending_interrupt(vcpu, INT_CLASS_NONCRIT);
  354. if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_CE)
  355. kvmppc_set_pending_interrupt(vcpu, INT_CLASS_CRIT);
  356. if (vcpu->arch.pending_exceptions & BOOKE_IRQPRIO_MACHINE_CHECK)
  357. kvmppc_set_pending_interrupt(vcpu, INT_CLASS_MC);
  358. #endif
  359. return allowed;
  360. }
  361. static void update_timer_ints(struct kvm_vcpu *vcpu)
  362. {
  363. if ((vcpu->arch.tcr & TCR_DIE) && (vcpu->arch.tsr & TSR_DIS))
  364. kvmppc_core_queue_dec(vcpu);
  365. else
  366. kvmppc_core_dequeue_dec(vcpu);
  367. }
  368. static void kvmppc_core_check_exceptions(struct kvm_vcpu *vcpu)
  369. {
  370. unsigned long *pending = &vcpu->arch.pending_exceptions;
  371. unsigned int priority;
  372. if (vcpu->requests) {
  373. if (kvm_check_request(KVM_REQ_PENDING_TIMER, vcpu)) {
  374. smp_mb();
  375. update_timer_ints(vcpu);
  376. }
  377. }
  378. priority = __ffs(*pending);
  379. while (priority < BOOKE_IRQPRIO_MAX) {
  380. if (kvmppc_booke_irqprio_deliver(vcpu, priority))
  381. break;
  382. priority = find_next_bit(pending,
  383. BITS_PER_BYTE * sizeof(*pending),
  384. priority + 1);
  385. }
  386. /* Tell the guest about our interrupt status */
  387. vcpu->arch.shared->int_pending = !!*pending;
  388. }
  389. /* Check pending exceptions and deliver one, if possible. */
  390. int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
  391. {
  392. int r = 0;
  393. WARN_ON_ONCE(!irqs_disabled());
  394. kvmppc_core_check_exceptions(vcpu);
  395. if (vcpu->arch.shared->msr & MSR_WE) {
  396. local_irq_enable();
  397. kvm_vcpu_block(vcpu);
  398. clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
  399. local_irq_disable();
  400. kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
  401. r = 1;
  402. };
  403. return r;
  404. }
  405. /*
  406. * Common checks before entering the guest world. Call with interrupts
  407. * disabled.
  408. *
  409. * returns !0 if a signal is pending and check_signal is true
  410. */
  411. static int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
  412. {
  413. int r = 0;
  414. WARN_ON_ONCE(!irqs_disabled());
  415. while (true) {
  416. if (need_resched()) {
  417. local_irq_enable();
  418. cond_resched();
  419. local_irq_disable();
  420. continue;
  421. }
  422. if (signal_pending(current)) {
  423. r = 1;
  424. break;
  425. }
  426. if (kvmppc_core_prepare_to_enter(vcpu)) {
  427. /* interrupts got enabled in between, so we
  428. are back at square 1 */
  429. continue;
  430. }
  431. break;
  432. }
  433. return r;
  434. }
  435. int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
  436. {
  437. int ret;
  438. #ifdef CONFIG_PPC_FPU
  439. unsigned int fpscr;
  440. int fpexc_mode;
  441. u64 fpr[32];
  442. #endif
  443. if (!vcpu->arch.sane) {
  444. kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  445. return -EINVAL;
  446. }
  447. local_irq_disable();
  448. if (kvmppc_prepare_to_enter(vcpu)) {
  449. kvm_run->exit_reason = KVM_EXIT_INTR;
  450. ret = -EINTR;
  451. goto out;
  452. }
  453. kvm_guest_enter();
  454. #ifdef CONFIG_PPC_FPU
  455. /* Save userspace FPU state in stack */
  456. enable_kernel_fp();
  457. memcpy(fpr, current->thread.fpr, sizeof(current->thread.fpr));
  458. fpscr = current->thread.fpscr.val;
  459. fpexc_mode = current->thread.fpexc_mode;
  460. /* Restore guest FPU state to thread */
  461. memcpy(current->thread.fpr, vcpu->arch.fpr, sizeof(vcpu->arch.fpr));
  462. current->thread.fpscr.val = vcpu->arch.fpscr;
  463. /*
  464. * Since we can't trap on MSR_FP in GS-mode, we consider the guest
  465. * as always using the FPU. Kernel usage of FP (via
  466. * enable_kernel_fp()) in this thread must not occur while
  467. * vcpu->fpu_active is set.
  468. */
  469. vcpu->fpu_active = 1;
  470. kvmppc_load_guest_fp(vcpu);
  471. #endif
  472. ret = __kvmppc_vcpu_run(kvm_run, vcpu);
  473. #ifdef CONFIG_PPC_FPU
  474. kvmppc_save_guest_fp(vcpu);
  475. vcpu->fpu_active = 0;
  476. /* Save guest FPU state from thread */
  477. memcpy(vcpu->arch.fpr, current->thread.fpr, sizeof(vcpu->arch.fpr));
  478. vcpu->arch.fpscr = current->thread.fpscr.val;
  479. /* Restore userspace FPU state from stack */
  480. memcpy(current->thread.fpr, fpr, sizeof(current->thread.fpr));
  481. current->thread.fpscr.val = fpscr;
  482. current->thread.fpexc_mode = fpexc_mode;
  483. #endif
  484. kvm_guest_exit();
  485. out:
  486. local_irq_enable();
  487. return ret;
  488. }
  489. static int emulation_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
  490. {
  491. enum emulation_result er;
  492. er = kvmppc_emulate_instruction(run, vcpu);
  493. switch (er) {
  494. case EMULATE_DONE:
  495. /* don't overwrite subtypes, just account kvm_stats */
  496. kvmppc_account_exit_stat(vcpu, EMULATED_INST_EXITS);
  497. /* Future optimization: only reload non-volatiles if
  498. * they were actually modified by emulation. */
  499. return RESUME_GUEST_NV;
  500. case EMULATE_DO_DCR:
  501. run->exit_reason = KVM_EXIT_DCR;
  502. return RESUME_HOST;
  503. case EMULATE_FAIL:
  504. printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
  505. __func__, vcpu->arch.pc, vcpu->arch.last_inst);
  506. /* For debugging, encode the failing instruction and
  507. * report it to userspace. */
  508. run->hw.hardware_exit_reason = ~0ULL << 32;
  509. run->hw.hardware_exit_reason |= vcpu->arch.last_inst;
  510. kvmppc_core_queue_program(vcpu, ESR_PIL);
  511. return RESUME_HOST;
  512. default:
  513. BUG();
  514. }
  515. }
  516. static void kvmppc_fill_pt_regs(struct pt_regs *regs)
  517. {
  518. ulong r1, ip, msr, lr;
  519. asm("mr %0, 1" : "=r"(r1));
  520. asm("mflr %0" : "=r"(lr));
  521. asm("mfmsr %0" : "=r"(msr));
  522. asm("bl 1f; 1: mflr %0" : "=r"(ip));
  523. memset(regs, 0, sizeof(*regs));
  524. regs->gpr[1] = r1;
  525. regs->nip = ip;
  526. regs->msr = msr;
  527. regs->link = lr;
  528. }
  529. /*
  530. * For interrupts needed to be handled by host interrupt handlers,
  531. * corresponding host handler are called from here in similar way
  532. * (but not exact) as they are called from low level handler
  533. * (such as from arch/powerpc/kernel/head_fsl_booke.S).
  534. */
  535. static void kvmppc_restart_interrupt(struct kvm_vcpu *vcpu,
  536. unsigned int exit_nr)
  537. {
  538. struct pt_regs regs;
  539. switch (exit_nr) {
  540. case BOOKE_INTERRUPT_EXTERNAL:
  541. kvmppc_fill_pt_regs(&regs);
  542. do_IRQ(&regs);
  543. break;
  544. case BOOKE_INTERRUPT_DECREMENTER:
  545. kvmppc_fill_pt_regs(&regs);
  546. timer_interrupt(&regs);
  547. break;
  548. #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_BOOK3E_64)
  549. case BOOKE_INTERRUPT_DOORBELL:
  550. kvmppc_fill_pt_regs(&regs);
  551. doorbell_exception(&regs);
  552. break;
  553. #endif
  554. case BOOKE_INTERRUPT_MACHINE_CHECK:
  555. /* FIXME */
  556. break;
  557. case BOOKE_INTERRUPT_PERFORMANCE_MONITOR:
  558. kvmppc_fill_pt_regs(&regs);
  559. performance_monitor_exception(&regs);
  560. break;
  561. case BOOKE_INTERRUPT_WATCHDOG:
  562. kvmppc_fill_pt_regs(&regs);
  563. #ifdef CONFIG_BOOKE_WDT
  564. WatchdogException(&regs);
  565. #else
  566. unknown_exception(&regs);
  567. #endif
  568. break;
  569. case BOOKE_INTERRUPT_CRITICAL:
  570. unknown_exception(&regs);
  571. break;
  572. }
  573. }
  574. /**
  575. * kvmppc_handle_exit
  576. *
  577. * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
  578. */
  579. int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
  580. unsigned int exit_nr)
  581. {
  582. int r = RESUME_HOST;
  583. /* update before a new last_exit_type is rewritten */
  584. kvmppc_update_timing_stats(vcpu);
  585. /* restart interrupts if they were meant for the host */
  586. kvmppc_restart_interrupt(vcpu, exit_nr);
  587. local_irq_enable();
  588. trace_kvm_exit(exit_nr, vcpu);
  589. run->exit_reason = KVM_EXIT_UNKNOWN;
  590. run->ready_for_interrupt_injection = 1;
  591. switch (exit_nr) {
  592. case BOOKE_INTERRUPT_MACHINE_CHECK:
  593. printk("MACHINE CHECK: %lx\n", mfspr(SPRN_MCSR));
  594. kvmppc_dump_vcpu(vcpu);
  595. /* For debugging, send invalid exit reason to user space */
  596. run->hw.hardware_exit_reason = ~1ULL << 32;
  597. run->hw.hardware_exit_reason |= mfspr(SPRN_MCSR);
  598. r = RESUME_HOST;
  599. break;
  600. case BOOKE_INTERRUPT_EXTERNAL:
  601. kvmppc_account_exit(vcpu, EXT_INTR_EXITS);
  602. r = RESUME_GUEST;
  603. break;
  604. case BOOKE_INTERRUPT_DECREMENTER:
  605. kvmppc_account_exit(vcpu, DEC_EXITS);
  606. r = RESUME_GUEST;
  607. break;
  608. case BOOKE_INTERRUPT_WATCHDOG:
  609. r = RESUME_GUEST;
  610. break;
  611. case BOOKE_INTERRUPT_DOORBELL:
  612. kvmppc_account_exit(vcpu, DBELL_EXITS);
  613. r = RESUME_GUEST;
  614. break;
  615. case BOOKE_INTERRUPT_GUEST_DBELL_CRIT:
  616. kvmppc_account_exit(vcpu, GDBELL_EXITS);
  617. /*
  618. * We are here because there is a pending guest interrupt
  619. * which could not be delivered as MSR_CE or MSR_ME was not
  620. * set. Once we break from here we will retry delivery.
  621. */
  622. r = RESUME_GUEST;
  623. break;
  624. case BOOKE_INTERRUPT_GUEST_DBELL:
  625. kvmppc_account_exit(vcpu, GDBELL_EXITS);
  626. /*
  627. * We are here because there is a pending guest interrupt
  628. * which could not be delivered as MSR_EE was not set. Once
  629. * we break from here we will retry delivery.
  630. */
  631. r = RESUME_GUEST;
  632. break;
  633. case BOOKE_INTERRUPT_PERFORMANCE_MONITOR:
  634. r = RESUME_GUEST;
  635. break;
  636. case BOOKE_INTERRUPT_HV_PRIV:
  637. r = emulation_exit(run, vcpu);
  638. break;
  639. case BOOKE_INTERRUPT_PROGRAM:
  640. if (vcpu->arch.shared->msr & (MSR_PR | MSR_GS)) {
  641. /*
  642. * Program traps generated by user-level software must
  643. * be handled by the guest kernel.
  644. *
  645. * In GS mode, hypervisor privileged instructions trap
  646. * on BOOKE_INTERRUPT_HV_PRIV, not here, so these are
  647. * actual program interrupts, handled by the guest.
  648. */
  649. kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr);
  650. r = RESUME_GUEST;
  651. kvmppc_account_exit(vcpu, USR_PR_INST);
  652. break;
  653. }
  654. r = emulation_exit(run, vcpu);
  655. break;
  656. case BOOKE_INTERRUPT_FP_UNAVAIL:
  657. kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL);
  658. kvmppc_account_exit(vcpu, FP_UNAVAIL);
  659. r = RESUME_GUEST;
  660. break;
  661. #ifdef CONFIG_SPE
  662. case BOOKE_INTERRUPT_SPE_UNAVAIL: {
  663. if (vcpu->arch.shared->msr & MSR_SPE)
  664. kvmppc_vcpu_enable_spe(vcpu);
  665. else
  666. kvmppc_booke_queue_irqprio(vcpu,
  667. BOOKE_IRQPRIO_SPE_UNAVAIL);
  668. r = RESUME_GUEST;
  669. break;
  670. }
  671. case BOOKE_INTERRUPT_SPE_FP_DATA:
  672. kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_DATA);
  673. r = RESUME_GUEST;
  674. break;
  675. case BOOKE_INTERRUPT_SPE_FP_ROUND:
  676. kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_ROUND);
  677. r = RESUME_GUEST;
  678. break;
  679. #else
  680. case BOOKE_INTERRUPT_SPE_UNAVAIL:
  681. /*
  682. * Guest wants SPE, but host kernel doesn't support it. Send
  683. * an "unimplemented operation" program check to the guest.
  684. */
  685. kvmppc_core_queue_program(vcpu, ESR_PUO | ESR_SPV);
  686. r = RESUME_GUEST;
  687. break;
  688. /*
  689. * These really should never happen without CONFIG_SPE,
  690. * as we should never enable the real MSR[SPE] in the guest.
  691. */
  692. case BOOKE_INTERRUPT_SPE_FP_DATA:
  693. case BOOKE_INTERRUPT_SPE_FP_ROUND:
  694. printk(KERN_CRIT "%s: unexpected SPE interrupt %u at %08lx\n",
  695. __func__, exit_nr, vcpu->arch.pc);
  696. run->hw.hardware_exit_reason = exit_nr;
  697. r = RESUME_HOST;
  698. break;
  699. #endif
  700. case BOOKE_INTERRUPT_DATA_STORAGE:
  701. kvmppc_core_queue_data_storage(vcpu, vcpu->arch.fault_dear,
  702. vcpu->arch.fault_esr);
  703. kvmppc_account_exit(vcpu, DSI_EXITS);
  704. r = RESUME_GUEST;
  705. break;
  706. case BOOKE_INTERRUPT_INST_STORAGE:
  707. kvmppc_core_queue_inst_storage(vcpu, vcpu->arch.fault_esr);
  708. kvmppc_account_exit(vcpu, ISI_EXITS);
  709. r = RESUME_GUEST;
  710. break;
  711. #ifdef CONFIG_KVM_BOOKE_HV
  712. case BOOKE_INTERRUPT_HV_SYSCALL:
  713. if (!(vcpu->arch.shared->msr & MSR_PR)) {
  714. kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
  715. } else {
  716. /*
  717. * hcall from guest userspace -- send privileged
  718. * instruction program check.
  719. */
  720. kvmppc_core_queue_program(vcpu, ESR_PPR);
  721. }
  722. r = RESUME_GUEST;
  723. break;
  724. #else
  725. case BOOKE_INTERRUPT_SYSCALL:
  726. if (!(vcpu->arch.shared->msr & MSR_PR) &&
  727. (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
  728. /* KVM PV hypercalls */
  729. kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
  730. r = RESUME_GUEST;
  731. } else {
  732. /* Guest syscalls */
  733. kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SYSCALL);
  734. }
  735. kvmppc_account_exit(vcpu, SYSCALL_EXITS);
  736. r = RESUME_GUEST;
  737. break;
  738. #endif
  739. case BOOKE_INTERRUPT_DTLB_MISS: {
  740. unsigned long eaddr = vcpu->arch.fault_dear;
  741. int gtlb_index;
  742. gpa_t gpaddr;
  743. gfn_t gfn;
  744. #ifdef CONFIG_KVM_E500V2
  745. if (!(vcpu->arch.shared->msr & MSR_PR) &&
  746. (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) {
  747. kvmppc_map_magic(vcpu);
  748. kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
  749. r = RESUME_GUEST;
  750. break;
  751. }
  752. #endif
  753. /* Check the guest TLB. */
  754. gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr);
  755. if (gtlb_index < 0) {
  756. /* The guest didn't have a mapping for it. */
  757. kvmppc_core_queue_dtlb_miss(vcpu,
  758. vcpu->arch.fault_dear,
  759. vcpu->arch.fault_esr);
  760. kvmppc_mmu_dtlb_miss(vcpu);
  761. kvmppc_account_exit(vcpu, DTLB_REAL_MISS_EXITS);
  762. r = RESUME_GUEST;
  763. break;
  764. }
  765. gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
  766. gfn = gpaddr >> PAGE_SHIFT;
  767. if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
  768. /* The guest TLB had a mapping, but the shadow TLB
  769. * didn't, and it is RAM. This could be because:
  770. * a) the entry is mapping the host kernel, or
  771. * b) the guest used a large mapping which we're faking
  772. * Either way, we need to satisfy the fault without
  773. * invoking the guest. */
  774. kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
  775. kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
  776. r = RESUME_GUEST;
  777. } else {
  778. /* Guest has mapped and accessed a page which is not
  779. * actually RAM. */
  780. vcpu->arch.paddr_accessed = gpaddr;
  781. vcpu->arch.vaddr_accessed = eaddr;
  782. r = kvmppc_emulate_mmio(run, vcpu);
  783. kvmppc_account_exit(vcpu, MMIO_EXITS);
  784. }
  785. break;
  786. }
  787. case BOOKE_INTERRUPT_ITLB_MISS: {
  788. unsigned long eaddr = vcpu->arch.pc;
  789. gpa_t gpaddr;
  790. gfn_t gfn;
  791. int gtlb_index;
  792. r = RESUME_GUEST;
  793. /* Check the guest TLB. */
  794. gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr);
  795. if (gtlb_index < 0) {
  796. /* The guest didn't have a mapping for it. */
  797. kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS);
  798. kvmppc_mmu_itlb_miss(vcpu);
  799. kvmppc_account_exit(vcpu, ITLB_REAL_MISS_EXITS);
  800. break;
  801. }
  802. kvmppc_account_exit(vcpu, ITLB_VIRT_MISS_EXITS);
  803. gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
  804. gfn = gpaddr >> PAGE_SHIFT;
  805. if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
  806. /* The guest TLB had a mapping, but the shadow TLB
  807. * didn't. This could be because:
  808. * a) the entry is mapping the host kernel, or
  809. * b) the guest used a large mapping which we're faking
  810. * Either way, we need to satisfy the fault without
  811. * invoking the guest. */
  812. kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
  813. } else {
  814. /* Guest mapped and leaped at non-RAM! */
  815. kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK);
  816. }
  817. break;
  818. }
  819. case BOOKE_INTERRUPT_DEBUG: {
  820. u32 dbsr;
  821. vcpu->arch.pc = mfspr(SPRN_CSRR0);
  822. /* clear IAC events in DBSR register */
  823. dbsr = mfspr(SPRN_DBSR);
  824. dbsr &= DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4;
  825. mtspr(SPRN_DBSR, dbsr);
  826. run->exit_reason = KVM_EXIT_DEBUG;
  827. kvmppc_account_exit(vcpu, DEBUG_EXITS);
  828. r = RESUME_HOST;
  829. break;
  830. }
  831. default:
  832. printk(KERN_EMERG "exit_nr %d\n", exit_nr);
  833. BUG();
  834. }
  835. /*
  836. * To avoid clobbering exit_reason, only check for signals if we
  837. * aren't already exiting to userspace for some other reason.
  838. */
  839. if (!(r & RESUME_HOST)) {
  840. local_irq_disable();
  841. if (kvmppc_prepare_to_enter(vcpu)) {
  842. run->exit_reason = KVM_EXIT_INTR;
  843. r = (-EINTR << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
  844. kvmppc_account_exit(vcpu, SIGNAL_EXITS);
  845. }
  846. }
  847. return r;
  848. }
  849. /* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */
  850. int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
  851. {
  852. int i;
  853. int r;
  854. vcpu->arch.pc = 0;
  855. vcpu->arch.shared->pir = vcpu->vcpu_id;
  856. kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */
  857. kvmppc_set_msr(vcpu, 0);
  858. #ifndef CONFIG_KVM_BOOKE_HV
  859. vcpu->arch.shadow_msr = MSR_USER | MSR_DE | MSR_IS | MSR_DS;
  860. vcpu->arch.shadow_pid = 1;
  861. vcpu->arch.shared->msr = 0;
  862. #endif
  863. /* Eye-catching numbers so we know if the guest takes an interrupt
  864. * before it's programmed its own IVPR/IVORs. */
  865. vcpu->arch.ivpr = 0x55550000;
  866. for (i = 0; i < BOOKE_IRQPRIO_MAX; i++)
  867. vcpu->arch.ivor[i] = 0x7700 | i * 4;
  868. kvmppc_init_timing_stats(vcpu);
  869. r = kvmppc_core_vcpu_setup(vcpu);
  870. kvmppc_sanity_check(vcpu);
  871. return r;
  872. }
  873. int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
  874. {
  875. int i;
  876. regs->pc = vcpu->arch.pc;
  877. regs->cr = kvmppc_get_cr(vcpu);
  878. regs->ctr = vcpu->arch.ctr;
  879. regs->lr = vcpu->arch.lr;
  880. regs->xer = kvmppc_get_xer(vcpu);
  881. regs->msr = vcpu->arch.shared->msr;
  882. regs->srr0 = vcpu->arch.shared->srr0;
  883. regs->srr1 = vcpu->arch.shared->srr1;
  884. regs->pid = vcpu->arch.pid;
  885. regs->sprg0 = vcpu->arch.shared->sprg0;
  886. regs->sprg1 = vcpu->arch.shared->sprg1;
  887. regs->sprg2 = vcpu->arch.shared->sprg2;
  888. regs->sprg3 = vcpu->arch.shared->sprg3;
  889. regs->sprg4 = vcpu->arch.shared->sprg4;
  890. regs->sprg5 = vcpu->arch.shared->sprg5;
  891. regs->sprg6 = vcpu->arch.shared->sprg6;
  892. regs->sprg7 = vcpu->arch.shared->sprg7;
  893. for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
  894. regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
  895. return 0;
  896. }
  897. int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
  898. {
  899. int i;
  900. vcpu->arch.pc = regs->pc;
  901. kvmppc_set_cr(vcpu, regs->cr);
  902. vcpu->arch.ctr = regs->ctr;
  903. vcpu->arch.lr = regs->lr;
  904. kvmppc_set_xer(vcpu, regs->xer);
  905. kvmppc_set_msr(vcpu, regs->msr);
  906. vcpu->arch.shared->srr0 = regs->srr0;
  907. vcpu->arch.shared->srr1 = regs->srr1;
  908. kvmppc_set_pid(vcpu, regs->pid);
  909. vcpu->arch.shared->sprg0 = regs->sprg0;
  910. vcpu->arch.shared->sprg1 = regs->sprg1;
  911. vcpu->arch.shared->sprg2 = regs->sprg2;
  912. vcpu->arch.shared->sprg3 = regs->sprg3;
  913. vcpu->arch.shared->sprg4 = regs->sprg4;
  914. vcpu->arch.shared->sprg5 = regs->sprg5;
  915. vcpu->arch.shared->sprg6 = regs->sprg6;
  916. vcpu->arch.shared->sprg7 = regs->sprg7;
  917. for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
  918. kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
  919. return 0;
  920. }
  921. static void get_sregs_base(struct kvm_vcpu *vcpu,
  922. struct kvm_sregs *sregs)
  923. {
  924. u64 tb = get_tb();
  925. sregs->u.e.features |= KVM_SREGS_E_BASE;
  926. sregs->u.e.csrr0 = vcpu->arch.csrr0;
  927. sregs->u.e.csrr1 = vcpu->arch.csrr1;
  928. sregs->u.e.mcsr = vcpu->arch.mcsr;
  929. sregs->u.e.esr = get_guest_esr(vcpu);
  930. sregs->u.e.dear = get_guest_dear(vcpu);
  931. sregs->u.e.tsr = vcpu->arch.tsr;
  932. sregs->u.e.tcr = vcpu->arch.tcr;
  933. sregs->u.e.dec = kvmppc_get_dec(vcpu, tb);
  934. sregs->u.e.tb = tb;
  935. sregs->u.e.vrsave = vcpu->arch.vrsave;
  936. }
  937. static int set_sregs_base(struct kvm_vcpu *vcpu,
  938. struct kvm_sregs *sregs)
  939. {
  940. if (!(sregs->u.e.features & KVM_SREGS_E_BASE))
  941. return 0;
  942. vcpu->arch.csrr0 = sregs->u.e.csrr0;
  943. vcpu->arch.csrr1 = sregs->u.e.csrr1;
  944. vcpu->arch.mcsr = sregs->u.e.mcsr;
  945. set_guest_esr(vcpu, sregs->u.e.esr);
  946. set_guest_dear(vcpu, sregs->u.e.dear);
  947. vcpu->arch.vrsave = sregs->u.e.vrsave;
  948. kvmppc_set_tcr(vcpu, sregs->u.e.tcr);
  949. if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_DEC) {
  950. vcpu->arch.dec = sregs->u.e.dec;
  951. kvmppc_emulate_dec(vcpu);
  952. }
  953. if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_TSR) {
  954. vcpu->arch.tsr = sregs->u.e.tsr;
  955. update_timer_ints(vcpu);
  956. }
  957. return 0;
  958. }
  959. static void get_sregs_arch206(struct kvm_vcpu *vcpu,
  960. struct kvm_sregs *sregs)
  961. {
  962. sregs->u.e.features |= KVM_SREGS_E_ARCH206;
  963. sregs->u.e.pir = vcpu->vcpu_id;
  964. sregs->u.e.mcsrr0 = vcpu->arch.mcsrr0;
  965. sregs->u.e.mcsrr1 = vcpu->arch.mcsrr1;
  966. sregs->u.e.decar = vcpu->arch.decar;
  967. sregs->u.e.ivpr = vcpu->arch.ivpr;
  968. }
  969. static int set_sregs_arch206(struct kvm_vcpu *vcpu,
  970. struct kvm_sregs *sregs)
  971. {
  972. if (!(sregs->u.e.features & KVM_SREGS_E_ARCH206))
  973. return 0;
  974. if (sregs->u.e.pir != vcpu->vcpu_id)
  975. return -EINVAL;
  976. vcpu->arch.mcsrr0 = sregs->u.e.mcsrr0;
  977. vcpu->arch.mcsrr1 = sregs->u.e.mcsrr1;
  978. vcpu->arch.decar = sregs->u.e.decar;
  979. vcpu->arch.ivpr = sregs->u.e.ivpr;
  980. return 0;
  981. }
  982. void kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
  983. {
  984. sregs->u.e.features |= KVM_SREGS_E_IVOR;
  985. sregs->u.e.ivor_low[0] = vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL];
  986. sregs->u.e.ivor_low[1] = vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK];
  987. sregs->u.e.ivor_low[2] = vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE];
  988. sregs->u.e.ivor_low[3] = vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE];
  989. sregs->u.e.ivor_low[4] = vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL];
  990. sregs->u.e.ivor_low[5] = vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT];
  991. sregs->u.e.ivor_low[6] = vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM];
  992. sregs->u.e.ivor_low[7] = vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL];
  993. sregs->u.e.ivor_low[8] = vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL];
  994. sregs->u.e.ivor_low[9] = vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL];
  995. sregs->u.e.ivor_low[10] = vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER];
  996. sregs->u.e.ivor_low[11] = vcpu->arch.ivor[BOOKE_IRQPRIO_FIT];
  997. sregs->u.e.ivor_low[12] = vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG];
  998. sregs->u.e.ivor_low[13] = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS];
  999. sregs->u.e.ivor_low[14] = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS];
  1000. sregs->u.e.ivor_low[15] = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG];
  1001. }
  1002. int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
  1003. {
  1004. if (!(sregs->u.e.features & KVM_SREGS_E_IVOR))
  1005. return 0;
  1006. vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = sregs->u.e.ivor_low[0];
  1007. vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK] = sregs->u.e.ivor_low[1];
  1008. vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = sregs->u.e.ivor_low[2];
  1009. vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = sregs->u.e.ivor_low[3];
  1010. vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL] = sregs->u.e.ivor_low[4];
  1011. vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT] = sregs->u.e.ivor_low[5];
  1012. vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM] = sregs->u.e.ivor_low[6];
  1013. vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL] = sregs->u.e.ivor_low[7];
  1014. vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = sregs->u.e.ivor_low[8];
  1015. vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = sregs->u.e.ivor_low[9];
  1016. vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER] = sregs->u.e.ivor_low[10];
  1017. vcpu->arch.ivor[BOOKE_IRQPRIO_FIT] = sregs->u.e.ivor_low[11];
  1018. vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG] = sregs->u.e.ivor_low[12];
  1019. vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS] = sregs->u.e.ivor_low[13];
  1020. vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS] = sregs->u.e.ivor_low[14];
  1021. vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = sregs->u.e.ivor_low[15];
  1022. return 0;
  1023. }
  1024. int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
  1025. struct kvm_sregs *sregs)
  1026. {
  1027. sregs->pvr = vcpu->arch.pvr;
  1028. get_sregs_base(vcpu, sregs);
  1029. get_sregs_arch206(vcpu, sregs);
  1030. kvmppc_core_get_sregs(vcpu, sregs);
  1031. return 0;
  1032. }
  1033. int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
  1034. struct kvm_sregs *sregs)
  1035. {
  1036. int ret;
  1037. if (vcpu->arch.pvr != sregs->pvr)
  1038. return -EINVAL;
  1039. ret = set_sregs_base(vcpu, sregs);
  1040. if (ret < 0)
  1041. return ret;
  1042. ret = set_sregs_arch206(vcpu, sregs);
  1043. if (ret < 0)
  1044. return ret;
  1045. return kvmppc_core_set_sregs(vcpu, sregs);
  1046. }
  1047. int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
  1048. {
  1049. return -EINVAL;
  1050. }
  1051. int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
  1052. {
  1053. return -EINVAL;
  1054. }
  1055. int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
  1056. {
  1057. return -ENOTSUPP;
  1058. }
  1059. int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
  1060. {
  1061. return -ENOTSUPP;
  1062. }
  1063. int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
  1064. struct kvm_translation *tr)
  1065. {
  1066. int r;
  1067. r = kvmppc_core_vcpu_translate(vcpu, tr);
  1068. return r;
  1069. }
  1070. int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
  1071. {
  1072. return -ENOTSUPP;
  1073. }
  1074. int kvmppc_core_prepare_memory_region(struct kvm *kvm,
  1075. struct kvm_userspace_memory_region *mem)
  1076. {
  1077. return 0;
  1078. }
  1079. void kvmppc_core_commit_memory_region(struct kvm *kvm,
  1080. struct kvm_userspace_memory_region *mem)
  1081. {
  1082. }
  1083. void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr)
  1084. {
  1085. vcpu->arch.tcr = new_tcr;
  1086. update_timer_ints(vcpu);
  1087. }
  1088. void kvmppc_set_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
  1089. {
  1090. set_bits(tsr_bits, &vcpu->arch.tsr);
  1091. smp_wmb();
  1092. kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
  1093. kvm_vcpu_kick(vcpu);
  1094. }
  1095. void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
  1096. {
  1097. clear_bits(tsr_bits, &vcpu->arch.tsr);
  1098. update_timer_ints(vcpu);
  1099. }
  1100. void kvmppc_decrementer_func(unsigned long data)
  1101. {
  1102. struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
  1103. if (vcpu->arch.tcr & TCR_ARE) {
  1104. vcpu->arch.dec = vcpu->arch.decar;
  1105. kvmppc_emulate_dec(vcpu);
  1106. }
  1107. kvmppc_set_tsr_bits(vcpu, TSR_DIS);
  1108. }
  1109. void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
  1110. {
  1111. current->thread.kvm_vcpu = vcpu;
  1112. }
  1113. void kvmppc_booke_vcpu_put(struct kvm_vcpu *vcpu)
  1114. {
  1115. current->thread.kvm_vcpu = NULL;
  1116. }
  1117. int __init kvmppc_booke_init(void)
  1118. {
  1119. #ifndef CONFIG_KVM_BOOKE_HV
  1120. unsigned long ivor[16];
  1121. unsigned long max_ivor = 0;
  1122. int i;
  1123. /* We install our own exception handlers by hijacking IVPR. IVPR must
  1124. * be 16-bit aligned, so we need a 64KB allocation. */
  1125. kvmppc_booke_handlers = __get_free_pages(GFP_KERNEL | __GFP_ZERO,
  1126. VCPU_SIZE_ORDER);
  1127. if (!kvmppc_booke_handlers)
  1128. return -ENOMEM;
  1129. /* XXX make sure our handlers are smaller than Linux's */
  1130. /* Copy our interrupt handlers to match host IVORs. That way we don't
  1131. * have to swap the IVORs on every guest/host transition. */
  1132. ivor[0] = mfspr(SPRN_IVOR0);
  1133. ivor[1] = mfspr(SPRN_IVOR1);
  1134. ivor[2] = mfspr(SPRN_IVOR2);
  1135. ivor[3] = mfspr(SPRN_IVOR3);
  1136. ivor[4] = mfspr(SPRN_IVOR4);
  1137. ivor[5] = mfspr(SPRN_IVOR5);
  1138. ivor[6] = mfspr(SPRN_IVOR6);
  1139. ivor[7] = mfspr(SPRN_IVOR7);
  1140. ivor[8] = mfspr(SPRN_IVOR8);
  1141. ivor[9] = mfspr(SPRN_IVOR9);
  1142. ivor[10] = mfspr(SPRN_IVOR10);
  1143. ivor[11] = mfspr(SPRN_IVOR11);
  1144. ivor[12] = mfspr(SPRN_IVOR12);
  1145. ivor[13] = mfspr(SPRN_IVOR13);
  1146. ivor[14] = mfspr(SPRN_IVOR14);
  1147. ivor[15] = mfspr(SPRN_IVOR15);
  1148. for (i = 0; i < 16; i++) {
  1149. if (ivor[i] > max_ivor)
  1150. max_ivor = ivor[i];
  1151. memcpy((void *)kvmppc_booke_handlers + ivor[i],
  1152. kvmppc_handlers_start + i * kvmppc_handler_len,
  1153. kvmppc_handler_len);
  1154. }
  1155. flush_icache_range(kvmppc_booke_handlers,
  1156. kvmppc_booke_handlers + max_ivor + kvmppc_handler_len);
  1157. #endif /* !BOOKE_HV */
  1158. return 0;
  1159. }
  1160. void __exit kvmppc_booke_exit(void)
  1161. {
  1162. free_pages(kvmppc_booke_handlers, VCPU_SIZE_ORDER);
  1163. kvm_exit();
  1164. }