mmio.c 3.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156
  1. /*
  2. * Copyright (C) 2012 - Virtual Open Systems and Columbia University
  3. * Author: Christoffer Dall <c.dall@virtualopensystems.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License, version 2, as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  17. */
  18. #include <linux/kvm_host.h>
  19. #include <asm/kvm_mmio.h>
  20. #include <asm/kvm_emulate.h>
  21. #include <trace/events/kvm.h>
  22. #include "trace.h"
  23. /**
  24. * kvm_handle_mmio_return -- Handle MMIO loads after user space emulation
  25. * @vcpu: The VCPU pointer
  26. * @run: The VCPU run struct containing the mmio data
  27. *
  28. * This should only be called after returning from userspace for MMIO load
  29. * emulation.
  30. */
  31. int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
  32. {
  33. __u32 *dest;
  34. unsigned int len;
  35. int mask;
  36. if (!run->mmio.is_write) {
  37. dest = vcpu_reg(vcpu, vcpu->arch.mmio_decode.rt);
  38. memset(dest, 0, sizeof(int));
  39. len = run->mmio.len;
  40. if (len > 4)
  41. return -EINVAL;
  42. memcpy(dest, run->mmio.data, len);
  43. trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr,
  44. *((u64 *)run->mmio.data));
  45. if (vcpu->arch.mmio_decode.sign_extend && len < 4) {
  46. mask = 1U << ((len * 8) - 1);
  47. *dest = (*dest ^ mask) - mask;
  48. }
  49. }
  50. return 0;
  51. }
  52. static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
  53. struct kvm_exit_mmio *mmio)
  54. {
  55. unsigned long rt, len;
  56. bool is_write, sign_extend;
  57. if ((vcpu->arch.hsr >> 8) & 1) {
  58. /* cache operation on I/O addr, tell guest unsupported */
  59. kvm_inject_dabt(vcpu, vcpu->arch.hxfar);
  60. return 1;
  61. }
  62. if ((vcpu->arch.hsr >> 7) & 1) {
  63. /* page table accesses IO mem: tell guest to fix its TTBR */
  64. kvm_inject_dabt(vcpu, vcpu->arch.hxfar);
  65. return 1;
  66. }
  67. switch ((vcpu->arch.hsr >> 22) & 0x3) {
  68. case 0:
  69. len = 1;
  70. break;
  71. case 1:
  72. len = 2;
  73. break;
  74. case 2:
  75. len = 4;
  76. break;
  77. default:
  78. kvm_err("Hardware is weird: SAS 0b11 is reserved\n");
  79. return -EFAULT;
  80. }
  81. is_write = vcpu->arch.hsr & HSR_WNR;
  82. sign_extend = vcpu->arch.hsr & HSR_SSE;
  83. rt = (vcpu->arch.hsr & HSR_SRT_MASK) >> HSR_SRT_SHIFT;
  84. if (kvm_vcpu_reg_is_pc(vcpu, rt)) {
  85. /* IO memory trying to read/write pc */
  86. kvm_inject_pabt(vcpu, vcpu->arch.hxfar);
  87. return 1;
  88. }
  89. mmio->is_write = is_write;
  90. mmio->phys_addr = fault_ipa;
  91. mmio->len = len;
  92. vcpu->arch.mmio_decode.sign_extend = sign_extend;
  93. vcpu->arch.mmio_decode.rt = rt;
  94. /*
  95. * The MMIO instruction is emulated and should not be re-executed
  96. * in the guest.
  97. */
  98. kvm_skip_instr(vcpu, (vcpu->arch.hsr >> 25) & 1);
  99. return 0;
  100. }
  101. int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
  102. phys_addr_t fault_ipa)
  103. {
  104. struct kvm_exit_mmio mmio;
  105. unsigned long rt;
  106. int ret;
  107. /*
  108. * Prepare MMIO operation. First stash it in a private
  109. * structure that we can use for in-kernel emulation. If the
  110. * kernel can't handle it, copy it into run->mmio and let user
  111. * space do its magic.
  112. */
  113. if (vcpu->arch.hsr & HSR_ISV) {
  114. ret = decode_hsr(vcpu, fault_ipa, &mmio);
  115. if (ret)
  116. return ret;
  117. } else {
  118. kvm_err("load/store instruction decoding not implemented\n");
  119. return -ENOSYS;
  120. }
  121. rt = vcpu->arch.mmio_decode.rt;
  122. trace_kvm_mmio((mmio.is_write) ? KVM_TRACE_MMIO_WRITE :
  123. KVM_TRACE_MMIO_READ_UNSATISFIED,
  124. mmio.len, fault_ipa,
  125. (mmio.is_write) ? *vcpu_reg(vcpu, rt) : 0);
  126. if (mmio.is_write)
  127. memcpy(mmio.data, vcpu_reg(vcpu, rt), mmio.len);
  128. if (vgic_handle_mmio(vcpu, run, &mmio))
  129. return 1;
  130. kvm_prepare_mmio(run, &mmio);
  131. return 0;
  132. }