44x.c 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176
  1. /*
  2. * This program is free software; you can redistribute it and/or modify
  3. * it under the terms of the GNU General Public License, version 2, as
  4. * published by the Free Software Foundation.
  5. *
  6. * This program is distributed in the hope that it will be useful,
  7. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  8. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  9. * GNU General Public License for more details.
  10. *
  11. * You should have received a copy of the GNU General Public License
  12. * along with this program; if not, write to the Free Software
  13. * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  14. *
  15. * Copyright IBM Corp. 2008
  16. *
  17. * Authors: Hollis Blanchard <hollisb@us.ibm.com>
  18. */
  19. #include <linux/kvm_host.h>
  20. #include <asm/reg.h>
  21. #include <asm/cputable.h>
  22. #include <asm/tlbflush.h>
  23. #include "44x_tlb.h"
  24. /* Note: clearing MSR[DE] just means that the debug interrupt will not be
  25. * delivered *immediately*. Instead, it simply sets the appropriate DBSR bits.
  26. * If those DBSR bits are still set when MSR[DE] is re-enabled, the interrupt
  27. * will be delivered as an "imprecise debug event" (which is indicated by
  28. * DBSR[IDE].
  29. */
  30. static void kvm44x_disable_debug_interrupts(void)
  31. {
  32. mtmsr(mfmsr() & ~MSR_DE);
  33. }
  34. void kvmppc_core_load_host_debugstate(struct kvm_vcpu *vcpu)
  35. {
  36. kvm44x_disable_debug_interrupts();
  37. mtspr(SPRN_IAC1, vcpu->arch.host_iac[0]);
  38. mtspr(SPRN_IAC2, vcpu->arch.host_iac[1]);
  39. mtspr(SPRN_IAC3, vcpu->arch.host_iac[2]);
  40. mtspr(SPRN_IAC4, vcpu->arch.host_iac[3]);
  41. mtspr(SPRN_DBCR1, vcpu->arch.host_dbcr1);
  42. mtspr(SPRN_DBCR2, vcpu->arch.host_dbcr2);
  43. mtspr(SPRN_DBCR0, vcpu->arch.host_dbcr0);
  44. mtmsr(vcpu->arch.host_msr);
  45. }
  46. void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu)
  47. {
  48. struct kvm_guest_debug *dbg = &vcpu->guest_debug;
  49. u32 dbcr0 = 0;
  50. vcpu->arch.host_msr = mfmsr();
  51. kvm44x_disable_debug_interrupts();
  52. /* Save host debug register state. */
  53. vcpu->arch.host_iac[0] = mfspr(SPRN_IAC1);
  54. vcpu->arch.host_iac[1] = mfspr(SPRN_IAC2);
  55. vcpu->arch.host_iac[2] = mfspr(SPRN_IAC3);
  56. vcpu->arch.host_iac[3] = mfspr(SPRN_IAC4);
  57. vcpu->arch.host_dbcr0 = mfspr(SPRN_DBCR0);
  58. vcpu->arch.host_dbcr1 = mfspr(SPRN_DBCR1);
  59. vcpu->arch.host_dbcr2 = mfspr(SPRN_DBCR2);
  60. /* set registers up for guest */
  61. if (dbg->bp[0]) {
  62. mtspr(SPRN_IAC1, dbg->bp[0]);
  63. dbcr0 |= DBCR0_IAC1 | DBCR0_IDM;
  64. }
  65. if (dbg->bp[1]) {
  66. mtspr(SPRN_IAC2, dbg->bp[1]);
  67. dbcr0 |= DBCR0_IAC2 | DBCR0_IDM;
  68. }
  69. if (dbg->bp[2]) {
  70. mtspr(SPRN_IAC3, dbg->bp[2]);
  71. dbcr0 |= DBCR0_IAC3 | DBCR0_IDM;
  72. }
  73. if (dbg->bp[3]) {
  74. mtspr(SPRN_IAC4, dbg->bp[3]);
  75. dbcr0 |= DBCR0_IAC4 | DBCR0_IDM;
  76. }
  77. mtspr(SPRN_DBCR0, dbcr0);
  78. mtspr(SPRN_DBCR1, 0);
  79. mtspr(SPRN_DBCR2, 0);
  80. }
  81. void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
  82. {
  83. int i;
  84. /* Mark every guest entry in the shadow TLB entry modified, so that they
  85. * will all be reloaded on the next vcpu run (instead of being
  86. * demand-faulted). */
  87. for (i = 0; i <= tlb_44x_hwater; i++)
  88. kvmppc_tlbe_set_modified(vcpu, i);
  89. }
  90. void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
  91. {
  92. /* Don't leave guest TLB entries resident when being de-scheduled. */
  93. /* XXX It would be nice to differentiate between heavyweight exit and
  94. * sched_out here, since we could avoid the TLB flush for heavyweight
  95. * exits. */
  96. _tlbia();
  97. }
  98. int kvmppc_core_check_processor_compat(void)
  99. {
  100. int r;
  101. if (strcmp(cur_cpu_spec->platform, "ppc440") == 0)
  102. r = 0;
  103. else
  104. r = -ENOTSUPP;
  105. return r;
  106. }
  107. int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu)
  108. {
  109. struct kvmppc_44x_tlbe *tlbe = &vcpu->arch.guest_tlb[0];
  110. tlbe->tid = 0;
  111. tlbe->word0 = PPC44x_TLB_16M | PPC44x_TLB_VALID;
  112. tlbe->word1 = 0;
  113. tlbe->word2 = PPC44x_TLB_SX | PPC44x_TLB_SW | PPC44x_TLB_SR;
  114. tlbe++;
  115. tlbe->tid = 0;
  116. tlbe->word0 = 0xef600000 | PPC44x_TLB_4K | PPC44x_TLB_VALID;
  117. tlbe->word1 = 0xef600000;
  118. tlbe->word2 = PPC44x_TLB_SX | PPC44x_TLB_SW | PPC44x_TLB_SR
  119. | PPC44x_TLB_I | PPC44x_TLB_G;
  120. /* Since the guest can directly access the timebase, it must know the
  121. * real timebase frequency. Accordingly, it must see the state of
  122. * CCR1[TCS]. */
  123. vcpu->arch.ccr1 = mfspr(SPRN_CCR1);
  124. return 0;
  125. }
  126. /* 'linear_address' is actually an encoding of AS|PID|EADDR . */
  127. int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
  128. struct kvm_translation *tr)
  129. {
  130. struct kvmppc_44x_tlbe *gtlbe;
  131. int index;
  132. gva_t eaddr;
  133. u8 pid;
  134. u8 as;
  135. eaddr = tr->linear_address;
  136. pid = (tr->linear_address >> 32) & 0xff;
  137. as = (tr->linear_address >> 40) & 0x1;
  138. index = kvmppc_44x_tlb_index(vcpu, eaddr, pid, as);
  139. if (index == -1) {
  140. tr->valid = 0;
  141. return 0;
  142. }
  143. gtlbe = &vcpu->arch.guest_tlb[index];
  144. tr->physical_address = tlb_xlate(gtlbe, eaddr);
  145. /* XXX what does "writeable" and "usermode" even mean? */
  146. tr->valid = 1;
  147. return 0;
  148. }