booke.c 36 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362
  1. /*
  2. * This program is free software; you can redistribute it and/or modify
  3. * it under the terms of the GNU General Public License, version 2, as
  4. * published by the Free Software Foundation.
  5. *
  6. * This program is distributed in the hope that it will be useful,
  7. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  8. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  9. * GNU General Public License for more details.
  10. *
  11. * You should have received a copy of the GNU General Public License
  12. * along with this program; if not, write to the Free Software
  13. * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  14. *
  15. * Copyright IBM Corp. 2007
  16. * Copyright 2010-2011 Freescale Semiconductor, Inc.
  17. *
  18. * Authors: Hollis Blanchard <hollisb@us.ibm.com>
  19. * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
  20. * Scott Wood <scottwood@freescale.com>
  21. * Varun Sethi <varun.sethi@freescale.com>
  22. */
  23. #include <linux/errno.h>
  24. #include <linux/err.h>
  25. #include <linux/kvm_host.h>
  26. #include <linux/gfp.h>
  27. #include <linux/module.h>
  28. #include <linux/vmalloc.h>
  29. #include <linux/fs.h>
  30. #include <asm/cputable.h>
  31. #include <asm/uaccess.h>
  32. #include <asm/kvm_ppc.h>
  33. #include <asm/cacheflush.h>
  34. #include <asm/dbell.h>
  35. #include <asm/hw_irq.h>
  36. #include <asm/irq.h>
  37. #include "timing.h"
  38. #include "booke.h"
  39. unsigned long kvmppc_booke_handlers;
  40. #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
  41. #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
  42. struct kvm_stats_debugfs_item debugfs_entries[] = {
  43. { "mmio", VCPU_STAT(mmio_exits) },
  44. { "dcr", VCPU_STAT(dcr_exits) },
  45. { "sig", VCPU_STAT(signal_exits) },
  46. { "itlb_r", VCPU_STAT(itlb_real_miss_exits) },
  47. { "itlb_v", VCPU_STAT(itlb_virt_miss_exits) },
  48. { "dtlb_r", VCPU_STAT(dtlb_real_miss_exits) },
  49. { "dtlb_v", VCPU_STAT(dtlb_virt_miss_exits) },
  50. { "sysc", VCPU_STAT(syscall_exits) },
  51. { "isi", VCPU_STAT(isi_exits) },
  52. { "dsi", VCPU_STAT(dsi_exits) },
  53. { "inst_emu", VCPU_STAT(emulated_inst_exits) },
  54. { "dec", VCPU_STAT(dec_exits) },
  55. { "ext_intr", VCPU_STAT(ext_intr_exits) },
  56. { "halt_wakeup", VCPU_STAT(halt_wakeup) },
  57. { "doorbell", VCPU_STAT(dbell_exits) },
  58. { "guest doorbell", VCPU_STAT(gdbell_exits) },
  59. { NULL }
  60. };
  61. /* TODO: use vcpu_printf() */
  62. void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu)
  63. {
  64. int i;
  65. printk("pc: %08lx msr: %08llx\n", vcpu->arch.pc, vcpu->arch.shared->msr);
  66. printk("lr: %08lx ctr: %08lx\n", vcpu->arch.lr, vcpu->arch.ctr);
  67. printk("srr0: %08llx srr1: %08llx\n", vcpu->arch.shared->srr0,
  68. vcpu->arch.shared->srr1);
  69. printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions);
  70. for (i = 0; i < 32; i += 4) {
  71. printk("gpr%02d: %08lx %08lx %08lx %08lx\n", i,
  72. kvmppc_get_gpr(vcpu, i),
  73. kvmppc_get_gpr(vcpu, i+1),
  74. kvmppc_get_gpr(vcpu, i+2),
  75. kvmppc_get_gpr(vcpu, i+3));
  76. }
  77. }
  78. #ifdef CONFIG_SPE
  79. void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu)
  80. {
  81. preempt_disable();
  82. enable_kernel_spe();
  83. kvmppc_save_guest_spe(vcpu);
  84. vcpu->arch.shadow_msr &= ~MSR_SPE;
  85. preempt_enable();
  86. }
  87. static void kvmppc_vcpu_enable_spe(struct kvm_vcpu *vcpu)
  88. {
  89. preempt_disable();
  90. enable_kernel_spe();
  91. kvmppc_load_guest_spe(vcpu);
  92. vcpu->arch.shadow_msr |= MSR_SPE;
  93. preempt_enable();
  94. }
  95. static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
  96. {
  97. if (vcpu->arch.shared->msr & MSR_SPE) {
  98. if (!(vcpu->arch.shadow_msr & MSR_SPE))
  99. kvmppc_vcpu_enable_spe(vcpu);
  100. } else if (vcpu->arch.shadow_msr & MSR_SPE) {
  101. kvmppc_vcpu_disable_spe(vcpu);
  102. }
  103. }
  104. #else
  105. static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
  106. {
  107. }
  108. #endif
  109. /*
  110. * Helper function for "full" MSR writes. No need to call this if only
  111. * EE/CE/ME/DE/RI are changing.
  112. */
  113. void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
  114. {
  115. u32 old_msr = vcpu->arch.shared->msr;
  116. #ifdef CONFIG_KVM_BOOKE_HV
  117. new_msr |= MSR_GS;
  118. #endif
  119. vcpu->arch.shared->msr = new_msr;
  120. kvmppc_mmu_msr_notify(vcpu, old_msr);
  121. kvmppc_vcpu_sync_spe(vcpu);
  122. }
  123. static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu,
  124. unsigned int priority)
  125. {
  126. set_bit(priority, &vcpu->arch.pending_exceptions);
  127. }
  128. static void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu,
  129. ulong dear_flags, ulong esr_flags)
  130. {
  131. vcpu->arch.queued_dear = dear_flags;
  132. vcpu->arch.queued_esr = esr_flags;
  133. kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS);
  134. }
  135. static void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
  136. ulong dear_flags, ulong esr_flags)
  137. {
  138. vcpu->arch.queued_dear = dear_flags;
  139. vcpu->arch.queued_esr = esr_flags;
  140. kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE);
  141. }
  142. static void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu,
  143. ulong esr_flags)
  144. {
  145. vcpu->arch.queued_esr = esr_flags;
  146. kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE);
  147. }
  148. void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong esr_flags)
  149. {
  150. vcpu->arch.queued_esr = esr_flags;
  151. kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM);
  152. }
  153. void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
  154. {
  155. kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DECREMENTER);
  156. }
  157. int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
  158. {
  159. return test_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
  160. }
  161. void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
  162. {
  163. clear_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
  164. }
  165. void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
  166. struct kvm_interrupt *irq)
  167. {
  168. unsigned int prio = BOOKE_IRQPRIO_EXTERNAL;
  169. if (irq->irq == KVM_INTERRUPT_SET_LEVEL)
  170. prio = BOOKE_IRQPRIO_EXTERNAL_LEVEL;
  171. kvmppc_booke_queue_irqprio(vcpu, prio);
  172. }
  173. void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu,
  174. struct kvm_interrupt *irq)
  175. {
  176. clear_bit(BOOKE_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions);
  177. clear_bit(BOOKE_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions);
  178. }
  179. static void set_guest_srr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
  180. {
  181. #ifdef CONFIG_KVM_BOOKE_HV
  182. mtspr(SPRN_GSRR0, srr0);
  183. mtspr(SPRN_GSRR1, srr1);
  184. #else
  185. vcpu->arch.shared->srr0 = srr0;
  186. vcpu->arch.shared->srr1 = srr1;
  187. #endif
  188. }
  189. static void set_guest_csrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
  190. {
  191. vcpu->arch.csrr0 = srr0;
  192. vcpu->arch.csrr1 = srr1;
  193. }
  194. static void set_guest_dsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
  195. {
  196. if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC)) {
  197. vcpu->arch.dsrr0 = srr0;
  198. vcpu->arch.dsrr1 = srr1;
  199. } else {
  200. set_guest_csrr(vcpu, srr0, srr1);
  201. }
  202. }
  203. static void set_guest_mcsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
  204. {
  205. vcpu->arch.mcsrr0 = srr0;
  206. vcpu->arch.mcsrr1 = srr1;
  207. }
  208. static unsigned long get_guest_dear(struct kvm_vcpu *vcpu)
  209. {
  210. #ifdef CONFIG_KVM_BOOKE_HV
  211. return mfspr(SPRN_GDEAR);
  212. #else
  213. return vcpu->arch.shared->dar;
  214. #endif
  215. }
  216. static void set_guest_dear(struct kvm_vcpu *vcpu, unsigned long dear)
  217. {
  218. #ifdef CONFIG_KVM_BOOKE_HV
  219. mtspr(SPRN_GDEAR, dear);
  220. #else
  221. vcpu->arch.shared->dar = dear;
  222. #endif
  223. }
  224. static unsigned long get_guest_esr(struct kvm_vcpu *vcpu)
  225. {
  226. #ifdef CONFIG_KVM_BOOKE_HV
  227. return mfspr(SPRN_GESR);
  228. #else
  229. return vcpu->arch.shared->esr;
  230. #endif
  231. }
  232. static void set_guest_esr(struct kvm_vcpu *vcpu, u32 esr)
  233. {
  234. #ifdef CONFIG_KVM_BOOKE_HV
  235. mtspr(SPRN_GESR, esr);
  236. #else
  237. vcpu->arch.shared->esr = esr;
  238. #endif
  239. }
  240. /* Deliver the interrupt of the corresponding priority, if possible. */
  241. static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
  242. unsigned int priority)
  243. {
  244. int allowed = 0;
  245. ulong msr_mask = 0;
  246. bool update_esr = false, update_dear = false;
  247. ulong crit_raw = vcpu->arch.shared->critical;
  248. ulong crit_r1 = kvmppc_get_gpr(vcpu, 1);
  249. bool crit;
  250. bool keep_irq = false;
  251. enum int_class int_class;
  252. /* Truncate crit indicators in 32 bit mode */
  253. if (!(vcpu->arch.shared->msr & MSR_SF)) {
  254. crit_raw &= 0xffffffff;
  255. crit_r1 &= 0xffffffff;
  256. }
  257. /* Critical section when crit == r1 */
  258. crit = (crit_raw == crit_r1);
  259. /* ... and we're in supervisor mode */
  260. crit = crit && !(vcpu->arch.shared->msr & MSR_PR);
  261. if (priority == BOOKE_IRQPRIO_EXTERNAL_LEVEL) {
  262. priority = BOOKE_IRQPRIO_EXTERNAL;
  263. keep_irq = true;
  264. }
  265. switch (priority) {
  266. case BOOKE_IRQPRIO_DTLB_MISS:
  267. case BOOKE_IRQPRIO_DATA_STORAGE:
  268. update_dear = true;
  269. /* fall through */
  270. case BOOKE_IRQPRIO_INST_STORAGE:
  271. case BOOKE_IRQPRIO_PROGRAM:
  272. update_esr = true;
  273. /* fall through */
  274. case BOOKE_IRQPRIO_ITLB_MISS:
  275. case BOOKE_IRQPRIO_SYSCALL:
  276. case BOOKE_IRQPRIO_FP_UNAVAIL:
  277. case BOOKE_IRQPRIO_SPE_UNAVAIL:
  278. case BOOKE_IRQPRIO_SPE_FP_DATA:
  279. case BOOKE_IRQPRIO_SPE_FP_ROUND:
  280. case BOOKE_IRQPRIO_AP_UNAVAIL:
  281. case BOOKE_IRQPRIO_ALIGNMENT:
  282. allowed = 1;
  283. msr_mask = MSR_CE | MSR_ME | MSR_DE;
  284. int_class = INT_CLASS_NONCRIT;
  285. break;
  286. case BOOKE_IRQPRIO_CRITICAL:
  287. case BOOKE_IRQPRIO_DBELL_CRIT:
  288. allowed = vcpu->arch.shared->msr & MSR_CE;
  289. allowed = allowed && !crit;
  290. msr_mask = MSR_ME;
  291. int_class = INT_CLASS_CRIT;
  292. break;
  293. case BOOKE_IRQPRIO_MACHINE_CHECK:
  294. allowed = vcpu->arch.shared->msr & MSR_ME;
  295. allowed = allowed && !crit;
  296. int_class = INT_CLASS_MC;
  297. break;
  298. case BOOKE_IRQPRIO_DECREMENTER:
  299. case BOOKE_IRQPRIO_FIT:
  300. keep_irq = true;
  301. /* fall through */
  302. case BOOKE_IRQPRIO_EXTERNAL:
  303. case BOOKE_IRQPRIO_DBELL:
  304. allowed = vcpu->arch.shared->msr & MSR_EE;
  305. allowed = allowed && !crit;
  306. msr_mask = MSR_CE | MSR_ME | MSR_DE;
  307. int_class = INT_CLASS_NONCRIT;
  308. break;
  309. case BOOKE_IRQPRIO_DEBUG:
  310. allowed = vcpu->arch.shared->msr & MSR_DE;
  311. allowed = allowed && !crit;
  312. msr_mask = MSR_ME;
  313. int_class = INT_CLASS_CRIT;
  314. break;
  315. }
  316. if (allowed) {
  317. switch (int_class) {
  318. case INT_CLASS_NONCRIT:
  319. set_guest_srr(vcpu, vcpu->arch.pc,
  320. vcpu->arch.shared->msr);
  321. break;
  322. case INT_CLASS_CRIT:
  323. set_guest_csrr(vcpu, vcpu->arch.pc,
  324. vcpu->arch.shared->msr);
  325. break;
  326. case INT_CLASS_DBG:
  327. set_guest_dsrr(vcpu, vcpu->arch.pc,
  328. vcpu->arch.shared->msr);
  329. break;
  330. case INT_CLASS_MC:
  331. set_guest_mcsrr(vcpu, vcpu->arch.pc,
  332. vcpu->arch.shared->msr);
  333. break;
  334. }
  335. vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority];
  336. if (update_esr == true)
  337. set_guest_esr(vcpu, vcpu->arch.queued_esr);
  338. if (update_dear == true)
  339. set_guest_dear(vcpu, vcpu->arch.queued_dear);
  340. kvmppc_set_msr(vcpu, vcpu->arch.shared->msr & msr_mask);
  341. if (!keep_irq)
  342. clear_bit(priority, &vcpu->arch.pending_exceptions);
  343. }
  344. #ifdef CONFIG_KVM_BOOKE_HV
  345. /*
  346. * If an interrupt is pending but masked, raise a guest doorbell
  347. * so that we are notified when the guest enables the relevant
  348. * MSR bit.
  349. */
  350. if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_EE)
  351. kvmppc_set_pending_interrupt(vcpu, INT_CLASS_NONCRIT);
  352. if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_CE)
  353. kvmppc_set_pending_interrupt(vcpu, INT_CLASS_CRIT);
  354. if (vcpu->arch.pending_exceptions & BOOKE_IRQPRIO_MACHINE_CHECK)
  355. kvmppc_set_pending_interrupt(vcpu, INT_CLASS_MC);
  356. #endif
  357. return allowed;
  358. }
  359. static void update_timer_ints(struct kvm_vcpu *vcpu)
  360. {
  361. if ((vcpu->arch.tcr & TCR_DIE) && (vcpu->arch.tsr & TSR_DIS))
  362. kvmppc_core_queue_dec(vcpu);
  363. else
  364. kvmppc_core_dequeue_dec(vcpu);
  365. }
  366. static void kvmppc_core_check_exceptions(struct kvm_vcpu *vcpu)
  367. {
  368. unsigned long *pending = &vcpu->arch.pending_exceptions;
  369. unsigned int priority;
  370. if (vcpu->requests) {
  371. if (kvm_check_request(KVM_REQ_PENDING_TIMER, vcpu)) {
  372. smp_mb();
  373. update_timer_ints(vcpu);
  374. }
  375. }
  376. priority = __ffs(*pending);
  377. while (priority < BOOKE_IRQPRIO_MAX) {
  378. if (kvmppc_booke_irqprio_deliver(vcpu, priority))
  379. break;
  380. priority = find_next_bit(pending,
  381. BITS_PER_BYTE * sizeof(*pending),
  382. priority + 1);
  383. }
  384. /* Tell the guest about our interrupt status */
  385. vcpu->arch.shared->int_pending = !!*pending;
  386. }
  387. /* Check pending exceptions and deliver one, if possible. */
  388. int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
  389. {
  390. int r = 0;
  391. WARN_ON_ONCE(!irqs_disabled());
  392. kvmppc_core_check_exceptions(vcpu);
  393. if (vcpu->arch.shared->msr & MSR_WE) {
  394. local_irq_enable();
  395. kvm_vcpu_block(vcpu);
  396. clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
  397. local_irq_disable();
  398. kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
  399. r = 1;
  400. };
  401. return r;
  402. }
  403. /*
  404. * Common checks before entering the guest world. Call with interrupts
  405. * disabled.
  406. *
  407. * returns !0 if a signal is pending and check_signal is true
  408. */
  409. static int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
  410. {
  411. int r = 0;
  412. WARN_ON_ONCE(!irqs_disabled());
  413. while (true) {
  414. if (need_resched()) {
  415. local_irq_enable();
  416. cond_resched();
  417. local_irq_disable();
  418. continue;
  419. }
  420. if (signal_pending(current)) {
  421. r = 1;
  422. break;
  423. }
  424. if (kvmppc_core_prepare_to_enter(vcpu)) {
  425. /* interrupts got enabled in between, so we
  426. are back at square 1 */
  427. continue;
  428. }
  429. break;
  430. }
  431. return r;
  432. }
  433. int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
  434. {
  435. int ret;
  436. #ifdef CONFIG_PPC_FPU
  437. unsigned int fpscr;
  438. int fpexc_mode;
  439. u64 fpr[32];
  440. #endif
  441. if (!vcpu->arch.sane) {
  442. kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  443. return -EINVAL;
  444. }
  445. local_irq_disable();
  446. if (kvmppc_prepare_to_enter(vcpu)) {
  447. kvm_run->exit_reason = KVM_EXIT_INTR;
  448. ret = -EINTR;
  449. goto out;
  450. }
  451. kvm_guest_enter();
  452. #ifdef CONFIG_PPC_FPU
  453. /* Save userspace FPU state in stack */
  454. enable_kernel_fp();
  455. memcpy(fpr, current->thread.fpr, sizeof(current->thread.fpr));
  456. fpscr = current->thread.fpscr.val;
  457. fpexc_mode = current->thread.fpexc_mode;
  458. /* Restore guest FPU state to thread */
  459. memcpy(current->thread.fpr, vcpu->arch.fpr, sizeof(vcpu->arch.fpr));
  460. current->thread.fpscr.val = vcpu->arch.fpscr;
  461. /*
  462. * Since we can't trap on MSR_FP in GS-mode, we consider the guest
  463. * as always using the FPU. Kernel usage of FP (via
  464. * enable_kernel_fp()) in this thread must not occur while
  465. * vcpu->fpu_active is set.
  466. */
  467. vcpu->fpu_active = 1;
  468. kvmppc_load_guest_fp(vcpu);
  469. #endif
  470. ret = __kvmppc_vcpu_run(kvm_run, vcpu);
  471. #ifdef CONFIG_PPC_FPU
  472. kvmppc_save_guest_fp(vcpu);
  473. vcpu->fpu_active = 0;
  474. /* Save guest FPU state from thread */
  475. memcpy(vcpu->arch.fpr, current->thread.fpr, sizeof(vcpu->arch.fpr));
  476. vcpu->arch.fpscr = current->thread.fpscr.val;
  477. /* Restore userspace FPU state from stack */
  478. memcpy(current->thread.fpr, fpr, sizeof(current->thread.fpr));
  479. current->thread.fpscr.val = fpscr;
  480. current->thread.fpexc_mode = fpexc_mode;
  481. #endif
  482. kvm_guest_exit();
  483. out:
  484. local_irq_enable();
  485. return ret;
  486. }
  487. static int emulation_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
  488. {
  489. enum emulation_result er;
  490. er = kvmppc_emulate_instruction(run, vcpu);
  491. switch (er) {
  492. case EMULATE_DONE:
  493. /* don't overwrite subtypes, just account kvm_stats */
  494. kvmppc_account_exit_stat(vcpu, EMULATED_INST_EXITS);
  495. /* Future optimization: only reload non-volatiles if
  496. * they were actually modified by emulation. */
  497. return RESUME_GUEST_NV;
  498. case EMULATE_DO_DCR:
  499. run->exit_reason = KVM_EXIT_DCR;
  500. return RESUME_HOST;
  501. case EMULATE_FAIL:
  502. printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
  503. __func__, vcpu->arch.pc, vcpu->arch.last_inst);
  504. /* For debugging, encode the failing instruction and
  505. * report it to userspace. */
  506. run->hw.hardware_exit_reason = ~0ULL << 32;
  507. run->hw.hardware_exit_reason |= vcpu->arch.last_inst;
  508. kvmppc_core_queue_program(vcpu, ESR_PIL);
  509. return RESUME_HOST;
  510. default:
  511. BUG();
  512. }
  513. }
  514. static void kvmppc_fill_pt_regs(struct pt_regs *regs)
  515. {
  516. ulong r1, ip, msr, lr;
  517. asm("mr %0, 1" : "=r"(r1));
  518. asm("mflr %0" : "=r"(lr));
  519. asm("mfmsr %0" : "=r"(msr));
  520. asm("bl 1f; 1: mflr %0" : "=r"(ip));
  521. memset(regs, 0, sizeof(*regs));
  522. regs->gpr[1] = r1;
  523. regs->nip = ip;
  524. regs->msr = msr;
  525. regs->link = lr;
  526. }
  527. /*
  528. * For interrupts needed to be handled by host interrupt handlers,
  529. * corresponding host handler are called from here in similar way
  530. * (but not exact) as they are called from low level handler
  531. * (such as from arch/powerpc/kernel/head_fsl_booke.S).
  532. */
  533. static void kvmppc_restart_interrupt(struct kvm_vcpu *vcpu,
  534. unsigned int exit_nr)
  535. {
  536. struct pt_regs regs;
  537. switch (exit_nr) {
  538. case BOOKE_INTERRUPT_EXTERNAL:
  539. kvmppc_fill_pt_regs(&regs);
  540. do_IRQ(&regs);
  541. break;
  542. case BOOKE_INTERRUPT_DECREMENTER:
  543. kvmppc_fill_pt_regs(&regs);
  544. timer_interrupt(&regs);
  545. break;
  546. #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_BOOK3E_64)
  547. case BOOKE_INTERRUPT_DOORBELL:
  548. kvmppc_fill_pt_regs(&regs);
  549. doorbell_exception(&regs);
  550. break;
  551. #endif
  552. case BOOKE_INTERRUPT_MACHINE_CHECK:
  553. /* FIXME */
  554. break;
  555. case BOOKE_INTERRUPT_PERFORMANCE_MONITOR:
  556. kvmppc_fill_pt_regs(&regs);
  557. performance_monitor_exception(&regs);
  558. break;
  559. case BOOKE_INTERRUPT_WATCHDOG:
  560. kvmppc_fill_pt_regs(&regs);
  561. #ifdef CONFIG_BOOKE_WDT
  562. WatchdogException(&regs);
  563. #else
  564. unknown_exception(&regs);
  565. #endif
  566. break;
  567. case BOOKE_INTERRUPT_CRITICAL:
  568. unknown_exception(&regs);
  569. break;
  570. }
  571. }
  572. /**
  573. * kvmppc_handle_exit
  574. *
  575. * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
  576. */
  577. int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
  578. unsigned int exit_nr)
  579. {
  580. int r = RESUME_HOST;
  581. /* update before a new last_exit_type is rewritten */
  582. kvmppc_update_timing_stats(vcpu);
  583. /* restart interrupts if they were meant for the host */
  584. kvmppc_restart_interrupt(vcpu, exit_nr);
  585. local_irq_enable();
  586. run->exit_reason = KVM_EXIT_UNKNOWN;
  587. run->ready_for_interrupt_injection = 1;
  588. switch (exit_nr) {
  589. case BOOKE_INTERRUPT_MACHINE_CHECK:
  590. printk("MACHINE CHECK: %lx\n", mfspr(SPRN_MCSR));
  591. kvmppc_dump_vcpu(vcpu);
  592. /* For debugging, send invalid exit reason to user space */
  593. run->hw.hardware_exit_reason = ~1ULL << 32;
  594. run->hw.hardware_exit_reason |= mfspr(SPRN_MCSR);
  595. r = RESUME_HOST;
  596. break;
  597. case BOOKE_INTERRUPT_EXTERNAL:
  598. kvmppc_account_exit(vcpu, EXT_INTR_EXITS);
  599. r = RESUME_GUEST;
  600. break;
  601. case BOOKE_INTERRUPT_DECREMENTER:
  602. kvmppc_account_exit(vcpu, DEC_EXITS);
  603. r = RESUME_GUEST;
  604. break;
  605. case BOOKE_INTERRUPT_WATCHDOG:
  606. r = RESUME_GUEST;
  607. break;
  608. case BOOKE_INTERRUPT_DOORBELL:
  609. kvmppc_account_exit(vcpu, DBELL_EXITS);
  610. r = RESUME_GUEST;
  611. break;
  612. case BOOKE_INTERRUPT_GUEST_DBELL_CRIT:
  613. kvmppc_account_exit(vcpu, GDBELL_EXITS);
  614. /*
  615. * We are here because there is a pending guest interrupt
  616. * which could not be delivered as MSR_CE or MSR_ME was not
  617. * set. Once we break from here we will retry delivery.
  618. */
  619. r = RESUME_GUEST;
  620. break;
  621. case BOOKE_INTERRUPT_GUEST_DBELL:
  622. kvmppc_account_exit(vcpu, GDBELL_EXITS);
  623. /*
  624. * We are here because there is a pending guest interrupt
  625. * which could not be delivered as MSR_EE was not set. Once
  626. * we break from here we will retry delivery.
  627. */
  628. r = RESUME_GUEST;
  629. break;
  630. case BOOKE_INTERRUPT_PERFORMANCE_MONITOR:
  631. r = RESUME_GUEST;
  632. break;
  633. case BOOKE_INTERRUPT_HV_PRIV:
  634. r = emulation_exit(run, vcpu);
  635. break;
  636. case BOOKE_INTERRUPT_PROGRAM:
  637. if (vcpu->arch.shared->msr & (MSR_PR | MSR_GS)) {
  638. /*
  639. * Program traps generated by user-level software must
  640. * be handled by the guest kernel.
  641. *
  642. * In GS mode, hypervisor privileged instructions trap
  643. * on BOOKE_INTERRUPT_HV_PRIV, not here, so these are
  644. * actual program interrupts, handled by the guest.
  645. */
  646. kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr);
  647. r = RESUME_GUEST;
  648. kvmppc_account_exit(vcpu, USR_PR_INST);
  649. break;
  650. }
  651. r = emulation_exit(run, vcpu);
  652. break;
  653. case BOOKE_INTERRUPT_FP_UNAVAIL:
  654. kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL);
  655. kvmppc_account_exit(vcpu, FP_UNAVAIL);
  656. r = RESUME_GUEST;
  657. break;
  658. #ifdef CONFIG_SPE
  659. case BOOKE_INTERRUPT_SPE_UNAVAIL: {
  660. if (vcpu->arch.shared->msr & MSR_SPE)
  661. kvmppc_vcpu_enable_spe(vcpu);
  662. else
  663. kvmppc_booke_queue_irqprio(vcpu,
  664. BOOKE_IRQPRIO_SPE_UNAVAIL);
  665. r = RESUME_GUEST;
  666. break;
  667. }
  668. case BOOKE_INTERRUPT_SPE_FP_DATA:
  669. kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_DATA);
  670. r = RESUME_GUEST;
  671. break;
  672. case BOOKE_INTERRUPT_SPE_FP_ROUND:
  673. kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_ROUND);
  674. r = RESUME_GUEST;
  675. break;
  676. #else
  677. case BOOKE_INTERRUPT_SPE_UNAVAIL:
  678. /*
  679. * Guest wants SPE, but host kernel doesn't support it. Send
  680. * an "unimplemented operation" program check to the guest.
  681. */
  682. kvmppc_core_queue_program(vcpu, ESR_PUO | ESR_SPV);
  683. r = RESUME_GUEST;
  684. break;
  685. /*
  686. * These really should never happen without CONFIG_SPE,
  687. * as we should never enable the real MSR[SPE] in the guest.
  688. */
  689. case BOOKE_INTERRUPT_SPE_FP_DATA:
  690. case BOOKE_INTERRUPT_SPE_FP_ROUND:
  691. printk(KERN_CRIT "%s: unexpected SPE interrupt %u at %08lx\n",
  692. __func__, exit_nr, vcpu->arch.pc);
  693. run->hw.hardware_exit_reason = exit_nr;
  694. r = RESUME_HOST;
  695. break;
  696. #endif
  697. case BOOKE_INTERRUPT_DATA_STORAGE:
  698. kvmppc_core_queue_data_storage(vcpu, vcpu->arch.fault_dear,
  699. vcpu->arch.fault_esr);
  700. kvmppc_account_exit(vcpu, DSI_EXITS);
  701. r = RESUME_GUEST;
  702. break;
  703. case BOOKE_INTERRUPT_INST_STORAGE:
  704. kvmppc_core_queue_inst_storage(vcpu, vcpu->arch.fault_esr);
  705. kvmppc_account_exit(vcpu, ISI_EXITS);
  706. r = RESUME_GUEST;
  707. break;
  708. #ifdef CONFIG_KVM_BOOKE_HV
  709. case BOOKE_INTERRUPT_HV_SYSCALL:
  710. if (!(vcpu->arch.shared->msr & MSR_PR)) {
  711. kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
  712. } else {
  713. /*
  714. * hcall from guest userspace -- send privileged
  715. * instruction program check.
  716. */
  717. kvmppc_core_queue_program(vcpu, ESR_PPR);
  718. }
  719. r = RESUME_GUEST;
  720. break;
  721. #else
  722. case BOOKE_INTERRUPT_SYSCALL:
  723. if (!(vcpu->arch.shared->msr & MSR_PR) &&
  724. (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
  725. /* KVM PV hypercalls */
  726. kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
  727. r = RESUME_GUEST;
  728. } else {
  729. /* Guest syscalls */
  730. kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SYSCALL);
  731. }
  732. kvmppc_account_exit(vcpu, SYSCALL_EXITS);
  733. r = RESUME_GUEST;
  734. break;
  735. #endif
  736. case BOOKE_INTERRUPT_DTLB_MISS: {
  737. unsigned long eaddr = vcpu->arch.fault_dear;
  738. int gtlb_index;
  739. gpa_t gpaddr;
  740. gfn_t gfn;
  741. #ifdef CONFIG_KVM_E500V2
  742. if (!(vcpu->arch.shared->msr & MSR_PR) &&
  743. (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) {
  744. kvmppc_map_magic(vcpu);
  745. kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
  746. r = RESUME_GUEST;
  747. break;
  748. }
  749. #endif
  750. /* Check the guest TLB. */
  751. gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr);
  752. if (gtlb_index < 0) {
  753. /* The guest didn't have a mapping for it. */
  754. kvmppc_core_queue_dtlb_miss(vcpu,
  755. vcpu->arch.fault_dear,
  756. vcpu->arch.fault_esr);
  757. kvmppc_mmu_dtlb_miss(vcpu);
  758. kvmppc_account_exit(vcpu, DTLB_REAL_MISS_EXITS);
  759. r = RESUME_GUEST;
  760. break;
  761. }
  762. gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
  763. gfn = gpaddr >> PAGE_SHIFT;
  764. if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
  765. /* The guest TLB had a mapping, but the shadow TLB
  766. * didn't, and it is RAM. This could be because:
  767. * a) the entry is mapping the host kernel, or
  768. * b) the guest used a large mapping which we're faking
  769. * Either way, we need to satisfy the fault without
  770. * invoking the guest. */
  771. kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
  772. kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
  773. r = RESUME_GUEST;
  774. } else {
  775. /* Guest has mapped and accessed a page which is not
  776. * actually RAM. */
  777. vcpu->arch.paddr_accessed = gpaddr;
  778. vcpu->arch.vaddr_accessed = eaddr;
  779. r = kvmppc_emulate_mmio(run, vcpu);
  780. kvmppc_account_exit(vcpu, MMIO_EXITS);
  781. }
  782. break;
  783. }
  784. case BOOKE_INTERRUPT_ITLB_MISS: {
  785. unsigned long eaddr = vcpu->arch.pc;
  786. gpa_t gpaddr;
  787. gfn_t gfn;
  788. int gtlb_index;
  789. r = RESUME_GUEST;
  790. /* Check the guest TLB. */
  791. gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr);
  792. if (gtlb_index < 0) {
  793. /* The guest didn't have a mapping for it. */
  794. kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS);
  795. kvmppc_mmu_itlb_miss(vcpu);
  796. kvmppc_account_exit(vcpu, ITLB_REAL_MISS_EXITS);
  797. break;
  798. }
  799. kvmppc_account_exit(vcpu, ITLB_VIRT_MISS_EXITS);
  800. gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
  801. gfn = gpaddr >> PAGE_SHIFT;
  802. if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
  803. /* The guest TLB had a mapping, but the shadow TLB
  804. * didn't. This could be because:
  805. * a) the entry is mapping the host kernel, or
  806. * b) the guest used a large mapping which we're faking
  807. * Either way, we need to satisfy the fault without
  808. * invoking the guest. */
  809. kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
  810. } else {
  811. /* Guest mapped and leaped at non-RAM! */
  812. kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK);
  813. }
  814. break;
  815. }
  816. case BOOKE_INTERRUPT_DEBUG: {
  817. u32 dbsr;
  818. vcpu->arch.pc = mfspr(SPRN_CSRR0);
  819. /* clear IAC events in DBSR register */
  820. dbsr = mfspr(SPRN_DBSR);
  821. dbsr &= DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4;
  822. mtspr(SPRN_DBSR, dbsr);
  823. run->exit_reason = KVM_EXIT_DEBUG;
  824. kvmppc_account_exit(vcpu, DEBUG_EXITS);
  825. r = RESUME_HOST;
  826. break;
  827. }
  828. default:
  829. printk(KERN_EMERG "exit_nr %d\n", exit_nr);
  830. BUG();
  831. }
  832. /*
  833. * To avoid clobbering exit_reason, only check for signals if we
  834. * aren't already exiting to userspace for some other reason.
  835. */
  836. if (!(r & RESUME_HOST)) {
  837. local_irq_disable();
  838. if (kvmppc_prepare_to_enter(vcpu)) {
  839. run->exit_reason = KVM_EXIT_INTR;
  840. r = (-EINTR << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
  841. kvmppc_account_exit(vcpu, SIGNAL_EXITS);
  842. }
  843. }
  844. return r;
  845. }
  846. /* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */
  847. int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
  848. {
  849. int i;
  850. int r;
  851. vcpu->arch.pc = 0;
  852. vcpu->arch.shared->pir = vcpu->vcpu_id;
  853. kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */
  854. kvmppc_set_msr(vcpu, 0);
  855. #ifndef CONFIG_KVM_BOOKE_HV
  856. vcpu->arch.shadow_msr = MSR_USER | MSR_DE | MSR_IS | MSR_DS;
  857. vcpu->arch.shadow_pid = 1;
  858. vcpu->arch.shared->msr = 0;
  859. #endif
  860. /* Eye-catching numbers so we know if the guest takes an interrupt
  861. * before it's programmed its own IVPR/IVORs. */
  862. vcpu->arch.ivpr = 0x55550000;
  863. for (i = 0; i < BOOKE_IRQPRIO_MAX; i++)
  864. vcpu->arch.ivor[i] = 0x7700 | i * 4;
  865. kvmppc_init_timing_stats(vcpu);
  866. r = kvmppc_core_vcpu_setup(vcpu);
  867. kvmppc_sanity_check(vcpu);
  868. return r;
  869. }
  870. int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
  871. {
  872. int i;
  873. regs->pc = vcpu->arch.pc;
  874. regs->cr = kvmppc_get_cr(vcpu);
  875. regs->ctr = vcpu->arch.ctr;
  876. regs->lr = vcpu->arch.lr;
  877. regs->xer = kvmppc_get_xer(vcpu);
  878. regs->msr = vcpu->arch.shared->msr;
  879. regs->srr0 = vcpu->arch.shared->srr0;
  880. regs->srr1 = vcpu->arch.shared->srr1;
  881. regs->pid = vcpu->arch.pid;
  882. regs->sprg0 = vcpu->arch.shared->sprg0;
  883. regs->sprg1 = vcpu->arch.shared->sprg1;
  884. regs->sprg2 = vcpu->arch.shared->sprg2;
  885. regs->sprg3 = vcpu->arch.shared->sprg3;
  886. regs->sprg4 = vcpu->arch.shared->sprg4;
  887. regs->sprg5 = vcpu->arch.shared->sprg5;
  888. regs->sprg6 = vcpu->arch.shared->sprg6;
  889. regs->sprg7 = vcpu->arch.shared->sprg7;
  890. for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
  891. regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
  892. return 0;
  893. }
  894. int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
  895. {
  896. int i;
  897. vcpu->arch.pc = regs->pc;
  898. kvmppc_set_cr(vcpu, regs->cr);
  899. vcpu->arch.ctr = regs->ctr;
  900. vcpu->arch.lr = regs->lr;
  901. kvmppc_set_xer(vcpu, regs->xer);
  902. kvmppc_set_msr(vcpu, regs->msr);
  903. vcpu->arch.shared->srr0 = regs->srr0;
  904. vcpu->arch.shared->srr1 = regs->srr1;
  905. kvmppc_set_pid(vcpu, regs->pid);
  906. vcpu->arch.shared->sprg0 = regs->sprg0;
  907. vcpu->arch.shared->sprg1 = regs->sprg1;
  908. vcpu->arch.shared->sprg2 = regs->sprg2;
  909. vcpu->arch.shared->sprg3 = regs->sprg3;
  910. vcpu->arch.shared->sprg4 = regs->sprg4;
  911. vcpu->arch.shared->sprg5 = regs->sprg5;
  912. vcpu->arch.shared->sprg6 = regs->sprg6;
  913. vcpu->arch.shared->sprg7 = regs->sprg7;
  914. for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
  915. kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
  916. return 0;
  917. }
  918. static void get_sregs_base(struct kvm_vcpu *vcpu,
  919. struct kvm_sregs *sregs)
  920. {
  921. u64 tb = get_tb();
  922. sregs->u.e.features |= KVM_SREGS_E_BASE;
  923. sregs->u.e.csrr0 = vcpu->arch.csrr0;
  924. sregs->u.e.csrr1 = vcpu->arch.csrr1;
  925. sregs->u.e.mcsr = vcpu->arch.mcsr;
  926. sregs->u.e.esr = get_guest_esr(vcpu);
  927. sregs->u.e.dear = get_guest_dear(vcpu);
  928. sregs->u.e.tsr = vcpu->arch.tsr;
  929. sregs->u.e.tcr = vcpu->arch.tcr;
  930. sregs->u.e.dec = kvmppc_get_dec(vcpu, tb);
  931. sregs->u.e.tb = tb;
  932. sregs->u.e.vrsave = vcpu->arch.vrsave;
  933. }
  934. static int set_sregs_base(struct kvm_vcpu *vcpu,
  935. struct kvm_sregs *sregs)
  936. {
  937. if (!(sregs->u.e.features & KVM_SREGS_E_BASE))
  938. return 0;
  939. vcpu->arch.csrr0 = sregs->u.e.csrr0;
  940. vcpu->arch.csrr1 = sregs->u.e.csrr1;
  941. vcpu->arch.mcsr = sregs->u.e.mcsr;
  942. set_guest_esr(vcpu, sregs->u.e.esr);
  943. set_guest_dear(vcpu, sregs->u.e.dear);
  944. vcpu->arch.vrsave = sregs->u.e.vrsave;
  945. kvmppc_set_tcr(vcpu, sregs->u.e.tcr);
  946. if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_DEC) {
  947. vcpu->arch.dec = sregs->u.e.dec;
  948. kvmppc_emulate_dec(vcpu);
  949. }
  950. if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_TSR) {
  951. vcpu->arch.tsr = sregs->u.e.tsr;
  952. update_timer_ints(vcpu);
  953. }
  954. return 0;
  955. }
  956. static void get_sregs_arch206(struct kvm_vcpu *vcpu,
  957. struct kvm_sregs *sregs)
  958. {
  959. sregs->u.e.features |= KVM_SREGS_E_ARCH206;
  960. sregs->u.e.pir = vcpu->vcpu_id;
  961. sregs->u.e.mcsrr0 = vcpu->arch.mcsrr0;
  962. sregs->u.e.mcsrr1 = vcpu->arch.mcsrr1;
  963. sregs->u.e.decar = vcpu->arch.decar;
  964. sregs->u.e.ivpr = vcpu->arch.ivpr;
  965. }
  966. static int set_sregs_arch206(struct kvm_vcpu *vcpu,
  967. struct kvm_sregs *sregs)
  968. {
  969. if (!(sregs->u.e.features & KVM_SREGS_E_ARCH206))
  970. return 0;
  971. if (sregs->u.e.pir != vcpu->vcpu_id)
  972. return -EINVAL;
  973. vcpu->arch.mcsrr0 = sregs->u.e.mcsrr0;
  974. vcpu->arch.mcsrr1 = sregs->u.e.mcsrr1;
  975. vcpu->arch.decar = sregs->u.e.decar;
  976. vcpu->arch.ivpr = sregs->u.e.ivpr;
  977. return 0;
  978. }
  979. void kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
  980. {
  981. sregs->u.e.features |= KVM_SREGS_E_IVOR;
  982. sregs->u.e.ivor_low[0] = vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL];
  983. sregs->u.e.ivor_low[1] = vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK];
  984. sregs->u.e.ivor_low[2] = vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE];
  985. sregs->u.e.ivor_low[3] = vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE];
  986. sregs->u.e.ivor_low[4] = vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL];
  987. sregs->u.e.ivor_low[5] = vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT];
  988. sregs->u.e.ivor_low[6] = vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM];
  989. sregs->u.e.ivor_low[7] = vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL];
  990. sregs->u.e.ivor_low[8] = vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL];
  991. sregs->u.e.ivor_low[9] = vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL];
  992. sregs->u.e.ivor_low[10] = vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER];
  993. sregs->u.e.ivor_low[11] = vcpu->arch.ivor[BOOKE_IRQPRIO_FIT];
  994. sregs->u.e.ivor_low[12] = vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG];
  995. sregs->u.e.ivor_low[13] = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS];
  996. sregs->u.e.ivor_low[14] = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS];
  997. sregs->u.e.ivor_low[15] = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG];
  998. }
  999. int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
  1000. {
  1001. if (!(sregs->u.e.features & KVM_SREGS_E_IVOR))
  1002. return 0;
  1003. vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = sregs->u.e.ivor_low[0];
  1004. vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK] = sregs->u.e.ivor_low[1];
  1005. vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = sregs->u.e.ivor_low[2];
  1006. vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = sregs->u.e.ivor_low[3];
  1007. vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL] = sregs->u.e.ivor_low[4];
  1008. vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT] = sregs->u.e.ivor_low[5];
  1009. vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM] = sregs->u.e.ivor_low[6];
  1010. vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL] = sregs->u.e.ivor_low[7];
  1011. vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = sregs->u.e.ivor_low[8];
  1012. vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = sregs->u.e.ivor_low[9];
  1013. vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER] = sregs->u.e.ivor_low[10];
  1014. vcpu->arch.ivor[BOOKE_IRQPRIO_FIT] = sregs->u.e.ivor_low[11];
  1015. vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG] = sregs->u.e.ivor_low[12];
  1016. vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS] = sregs->u.e.ivor_low[13];
  1017. vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS] = sregs->u.e.ivor_low[14];
  1018. vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = sregs->u.e.ivor_low[15];
  1019. return 0;
  1020. }
  1021. int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
  1022. struct kvm_sregs *sregs)
  1023. {
  1024. sregs->pvr = vcpu->arch.pvr;
  1025. get_sregs_base(vcpu, sregs);
  1026. get_sregs_arch206(vcpu, sregs);
  1027. kvmppc_core_get_sregs(vcpu, sregs);
  1028. return 0;
  1029. }
  1030. int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
  1031. struct kvm_sregs *sregs)
  1032. {
  1033. int ret;
  1034. if (vcpu->arch.pvr != sregs->pvr)
  1035. return -EINVAL;
  1036. ret = set_sregs_base(vcpu, sregs);
  1037. if (ret < 0)
  1038. return ret;
  1039. ret = set_sregs_arch206(vcpu, sregs);
  1040. if (ret < 0)
  1041. return ret;
  1042. return kvmppc_core_set_sregs(vcpu, sregs);
  1043. }
  1044. int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
  1045. {
  1046. return -EINVAL;
  1047. }
  1048. int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
  1049. {
  1050. return -EINVAL;
  1051. }
  1052. int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
  1053. {
  1054. return -ENOTSUPP;
  1055. }
  1056. int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
  1057. {
  1058. return -ENOTSUPP;
  1059. }
  1060. int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
  1061. struct kvm_translation *tr)
  1062. {
  1063. int r;
  1064. r = kvmppc_core_vcpu_translate(vcpu, tr);
  1065. return r;
  1066. }
  1067. int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
  1068. {
  1069. return -ENOTSUPP;
  1070. }
  1071. int kvmppc_core_prepare_memory_region(struct kvm *kvm,
  1072. struct kvm_userspace_memory_region *mem)
  1073. {
  1074. return 0;
  1075. }
  1076. void kvmppc_core_commit_memory_region(struct kvm *kvm,
  1077. struct kvm_userspace_memory_region *mem)
  1078. {
  1079. }
  1080. void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr)
  1081. {
  1082. vcpu->arch.tcr = new_tcr;
  1083. update_timer_ints(vcpu);
  1084. }
  1085. void kvmppc_set_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
  1086. {
  1087. set_bits(tsr_bits, &vcpu->arch.tsr);
  1088. smp_wmb();
  1089. kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
  1090. kvm_vcpu_kick(vcpu);
  1091. }
  1092. void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
  1093. {
  1094. clear_bits(tsr_bits, &vcpu->arch.tsr);
  1095. update_timer_ints(vcpu);
  1096. }
  1097. void kvmppc_decrementer_func(unsigned long data)
  1098. {
  1099. struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
  1100. if (vcpu->arch.tcr & TCR_ARE) {
  1101. vcpu->arch.dec = vcpu->arch.decar;
  1102. kvmppc_emulate_dec(vcpu);
  1103. }
  1104. kvmppc_set_tsr_bits(vcpu, TSR_DIS);
  1105. }
  1106. void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
  1107. {
  1108. current->thread.kvm_vcpu = vcpu;
  1109. }
  1110. void kvmppc_booke_vcpu_put(struct kvm_vcpu *vcpu)
  1111. {
  1112. current->thread.kvm_vcpu = NULL;
  1113. }
  1114. int __init kvmppc_booke_init(void)
  1115. {
  1116. #ifndef CONFIG_KVM_BOOKE_HV
  1117. unsigned long ivor[16];
  1118. unsigned long max_ivor = 0;
  1119. int i;
  1120. /* We install our own exception handlers by hijacking IVPR. IVPR must
  1121. * be 16-bit aligned, so we need a 64KB allocation. */
  1122. kvmppc_booke_handlers = __get_free_pages(GFP_KERNEL | __GFP_ZERO,
  1123. VCPU_SIZE_ORDER);
  1124. if (!kvmppc_booke_handlers)
  1125. return -ENOMEM;
  1126. /* XXX make sure our handlers are smaller than Linux's */
  1127. /* Copy our interrupt handlers to match host IVORs. That way we don't
  1128. * have to swap the IVORs on every guest/host transition. */
  1129. ivor[0] = mfspr(SPRN_IVOR0);
  1130. ivor[1] = mfspr(SPRN_IVOR1);
  1131. ivor[2] = mfspr(SPRN_IVOR2);
  1132. ivor[3] = mfspr(SPRN_IVOR3);
  1133. ivor[4] = mfspr(SPRN_IVOR4);
  1134. ivor[5] = mfspr(SPRN_IVOR5);
  1135. ivor[6] = mfspr(SPRN_IVOR6);
  1136. ivor[7] = mfspr(SPRN_IVOR7);
  1137. ivor[8] = mfspr(SPRN_IVOR8);
  1138. ivor[9] = mfspr(SPRN_IVOR9);
  1139. ivor[10] = mfspr(SPRN_IVOR10);
  1140. ivor[11] = mfspr(SPRN_IVOR11);
  1141. ivor[12] = mfspr(SPRN_IVOR12);
  1142. ivor[13] = mfspr(SPRN_IVOR13);
  1143. ivor[14] = mfspr(SPRN_IVOR14);
  1144. ivor[15] = mfspr(SPRN_IVOR15);
  1145. for (i = 0; i < 16; i++) {
  1146. if (ivor[i] > max_ivor)
  1147. max_ivor = ivor[i];
  1148. memcpy((void *)kvmppc_booke_handlers + ivor[i],
  1149. kvmppc_handlers_start + i * kvmppc_handler_len,
  1150. kvmppc_handler_len);
  1151. }
  1152. flush_icache_range(kvmppc_booke_handlers,
  1153. kvmppc_booke_handlers + max_ivor + kvmppc_handler_len);
  1154. #endif /* !BOOKE_HV */
  1155. return 0;
  1156. }
  1157. void __exit kvmppc_booke_exit(void)
  1158. {
  1159. free_pages(kvmppc_booke_handlers, VCPU_SIZE_ORDER);
  1160. kvm_exit();
  1161. }