emulate.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402
  1. /*
  2. * Copyright (C) 2012 - Virtual Open Systems and Columbia University
  3. * Author: Christoffer Dall <c.dall@virtualopensystems.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License, version 2, as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  17. */
  18. #include <linux/mm.h>
  19. #include <linux/kvm_host.h>
  20. #include <asm/kvm_arm.h>
  21. #include <asm/kvm_emulate.h>
  22. #include <asm/opcodes.h>
  23. #include <trace/events/kvm.h>
  24. #include "trace.h"
  25. #define VCPU_NR_MODES 6
  26. #define VCPU_REG_OFFSET_USR 0
  27. #define VCPU_REG_OFFSET_FIQ 1
  28. #define VCPU_REG_OFFSET_IRQ 2
  29. #define VCPU_REG_OFFSET_SVC 3
  30. #define VCPU_REG_OFFSET_ABT 4
  31. #define VCPU_REG_OFFSET_UND 5
  32. #define REG_OFFSET(_reg) \
  33. (offsetof(struct kvm_regs, _reg) / sizeof(u32))
  34. #define USR_REG_OFFSET(_num) REG_OFFSET(usr_regs.uregs[_num])
  35. static const unsigned long vcpu_reg_offsets[VCPU_NR_MODES][15] = {
  36. /* USR/SYS Registers */
  37. [VCPU_REG_OFFSET_USR] = {
  38. USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
  39. USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
  40. USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
  41. USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
  42. USR_REG_OFFSET(12), USR_REG_OFFSET(13), USR_REG_OFFSET(14),
  43. },
  44. /* FIQ Registers */
  45. [VCPU_REG_OFFSET_FIQ] = {
  46. USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
  47. USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
  48. USR_REG_OFFSET(6), USR_REG_OFFSET(7),
  49. REG_OFFSET(fiq_regs[0]), /* r8 */
  50. REG_OFFSET(fiq_regs[1]), /* r9 */
  51. REG_OFFSET(fiq_regs[2]), /* r10 */
  52. REG_OFFSET(fiq_regs[3]), /* r11 */
  53. REG_OFFSET(fiq_regs[4]), /* r12 */
  54. REG_OFFSET(fiq_regs[5]), /* r13 */
  55. REG_OFFSET(fiq_regs[6]), /* r14 */
  56. },
  57. /* IRQ Registers */
  58. [VCPU_REG_OFFSET_IRQ] = {
  59. USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
  60. USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
  61. USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
  62. USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
  63. USR_REG_OFFSET(12),
  64. REG_OFFSET(irq_regs[0]), /* r13 */
  65. REG_OFFSET(irq_regs[1]), /* r14 */
  66. },
  67. /* SVC Registers */
  68. [VCPU_REG_OFFSET_SVC] = {
  69. USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
  70. USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
  71. USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
  72. USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
  73. USR_REG_OFFSET(12),
  74. REG_OFFSET(svc_regs[0]), /* r13 */
  75. REG_OFFSET(svc_regs[1]), /* r14 */
  76. },
  77. /* ABT Registers */
  78. [VCPU_REG_OFFSET_ABT] = {
  79. USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
  80. USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
  81. USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
  82. USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
  83. USR_REG_OFFSET(12),
  84. REG_OFFSET(abt_regs[0]), /* r13 */
  85. REG_OFFSET(abt_regs[1]), /* r14 */
  86. },
  87. /* UND Registers */
  88. [VCPU_REG_OFFSET_UND] = {
  89. USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
  90. USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
  91. USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
  92. USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
  93. USR_REG_OFFSET(12),
  94. REG_OFFSET(und_regs[0]), /* r13 */
  95. REG_OFFSET(und_regs[1]), /* r14 */
  96. },
  97. };
  98. /*
  99. * Return a pointer to the register number valid in the current mode of
  100. * the virtual CPU.
  101. */
  102. unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num)
  103. {
  104. unsigned long *reg_array = (unsigned long *)&vcpu->arch.regs;
  105. unsigned long mode = *vcpu_cpsr(vcpu) & MODE_MASK;
  106. switch (mode) {
  107. case USR_MODE...SVC_MODE:
  108. mode &= ~MODE32_BIT; /* 0 ... 3 */
  109. break;
  110. case ABT_MODE:
  111. mode = VCPU_REG_OFFSET_ABT;
  112. break;
  113. case UND_MODE:
  114. mode = VCPU_REG_OFFSET_UND;
  115. break;
  116. case SYSTEM_MODE:
  117. mode = VCPU_REG_OFFSET_USR;
  118. break;
  119. default:
  120. BUG();
  121. }
  122. return reg_array + vcpu_reg_offsets[mode][reg_num];
  123. }
  124. /*
  125. * Return the SPSR for the current mode of the virtual CPU.
  126. */
  127. unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu)
  128. {
  129. unsigned long mode = *vcpu_cpsr(vcpu) & MODE_MASK;
  130. switch (mode) {
  131. case SVC_MODE:
  132. return &vcpu->arch.regs.KVM_ARM_SVC_spsr;
  133. case ABT_MODE:
  134. return &vcpu->arch.regs.KVM_ARM_ABT_spsr;
  135. case UND_MODE:
  136. return &vcpu->arch.regs.KVM_ARM_UND_spsr;
  137. case IRQ_MODE:
  138. return &vcpu->arch.regs.KVM_ARM_IRQ_spsr;
  139. case FIQ_MODE:
  140. return &vcpu->arch.regs.KVM_ARM_FIQ_spsr;
  141. default:
  142. BUG();
  143. }
  144. }
  145. /*
  146. * A conditional instruction is allowed to trap, even though it
  147. * wouldn't be executed. So let's re-implement the hardware, in
  148. * software!
  149. */
  150. bool kvm_condition_valid(struct kvm_vcpu *vcpu)
  151. {
  152. unsigned long cpsr, cond, insn;
  153. /*
  154. * Exception Code 0 can only happen if we set HCR.TGE to 1, to
  155. * catch undefined instructions, and then we won't get past
  156. * the arm_exit_handlers test anyway.
  157. */
  158. BUG_ON(!kvm_vcpu_trap_get_class(vcpu));
  159. /* Top two bits non-zero? Unconditional. */
  160. if (kvm_vcpu_get_hsr(vcpu) >> 30)
  161. return true;
  162. cpsr = *vcpu_cpsr(vcpu);
  163. /* Is condition field valid? */
  164. if ((kvm_vcpu_get_hsr(vcpu) & HSR_CV) >> HSR_CV_SHIFT)
  165. cond = (kvm_vcpu_get_hsr(vcpu) & HSR_COND) >> HSR_COND_SHIFT;
  166. else {
  167. /* This can happen in Thumb mode: examine IT state. */
  168. unsigned long it;
  169. it = ((cpsr >> 8) & 0xFC) | ((cpsr >> 25) & 0x3);
  170. /* it == 0 => unconditional. */
  171. if (it == 0)
  172. return true;
  173. /* The cond for this insn works out as the top 4 bits. */
  174. cond = (it >> 4);
  175. }
  176. /* Shift makes it look like an ARM-mode instruction */
  177. insn = cond << 28;
  178. return arm_check_condition(insn, cpsr) != ARM_OPCODE_CONDTEST_FAIL;
  179. }
  180. /**
  181. * adjust_itstate - adjust ITSTATE when emulating instructions in IT-block
  182. * @vcpu: The VCPU pointer
  183. *
  184. * When exceptions occur while instructions are executed in Thumb IF-THEN
  185. * blocks, the ITSTATE field of the CPSR is not advanved (updated), so we have
  186. * to do this little bit of work manually. The fields map like this:
  187. *
  188. * IT[7:0] -> CPSR[26:25],CPSR[15:10]
  189. */
  190. static void kvm_adjust_itstate(struct kvm_vcpu *vcpu)
  191. {
  192. unsigned long itbits, cond;
  193. unsigned long cpsr = *vcpu_cpsr(vcpu);
  194. bool is_arm = !(cpsr & PSR_T_BIT);
  195. BUG_ON(is_arm && (cpsr & PSR_IT_MASK));
  196. if (!(cpsr & PSR_IT_MASK))
  197. return;
  198. cond = (cpsr & 0xe000) >> 13;
  199. itbits = (cpsr & 0x1c00) >> (10 - 2);
  200. itbits |= (cpsr & (0x3 << 25)) >> 25;
  201. /* Perform ITAdvance (see page A-52 in ARM DDI 0406C) */
  202. if ((itbits & 0x7) == 0)
  203. itbits = cond = 0;
  204. else
  205. itbits = (itbits << 1) & 0x1f;
  206. cpsr &= ~PSR_IT_MASK;
  207. cpsr |= cond << 13;
  208. cpsr |= (itbits & 0x1c) << (10 - 2);
  209. cpsr |= (itbits & 0x3) << 25;
  210. *vcpu_cpsr(vcpu) = cpsr;
  211. }
  212. /**
  213. * kvm_skip_instr - skip a trapped instruction and proceed to the next
  214. * @vcpu: The vcpu pointer
  215. */
  216. void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
  217. {
  218. bool is_thumb;
  219. is_thumb = !!(*vcpu_cpsr(vcpu) & PSR_T_BIT);
  220. if (is_thumb && !is_wide_instr)
  221. *vcpu_pc(vcpu) += 2;
  222. else
  223. *vcpu_pc(vcpu) += 4;
  224. kvm_adjust_itstate(vcpu);
  225. }
  226. /******************************************************************************
  227. * Inject exceptions into the guest
  228. */
  229. static u32 exc_vector_base(struct kvm_vcpu *vcpu)
  230. {
  231. u32 sctlr = vcpu->arch.cp15[c1_SCTLR];
  232. u32 vbar = vcpu->arch.cp15[c12_VBAR];
  233. if (sctlr & SCTLR_V)
  234. return 0xffff0000;
  235. else /* always have security exceptions */
  236. return vbar;
  237. }
  238. /**
  239. * kvm_inject_undefined - inject an undefined exception into the guest
  240. * @vcpu: The VCPU to receive the undefined exception
  241. *
  242. * It is assumed that this code is called from the VCPU thread and that the
  243. * VCPU therefore is not currently executing guest code.
  244. *
  245. * Modelled after TakeUndefInstrException() pseudocode.
  246. */
  247. void kvm_inject_undefined(struct kvm_vcpu *vcpu)
  248. {
  249. unsigned long new_lr_value;
  250. unsigned long new_spsr_value;
  251. unsigned long cpsr = *vcpu_cpsr(vcpu);
  252. u32 sctlr = vcpu->arch.cp15[c1_SCTLR];
  253. bool is_thumb = (cpsr & PSR_T_BIT);
  254. u32 vect_offset = 4;
  255. u32 return_offset = (is_thumb) ? 2 : 4;
  256. new_spsr_value = cpsr;
  257. new_lr_value = *vcpu_pc(vcpu) - return_offset;
  258. *vcpu_cpsr(vcpu) = (cpsr & ~MODE_MASK) | UND_MODE;
  259. *vcpu_cpsr(vcpu) |= PSR_I_BIT;
  260. *vcpu_cpsr(vcpu) &= ~(PSR_IT_MASK | PSR_J_BIT | PSR_E_BIT | PSR_T_BIT);
  261. if (sctlr & SCTLR_TE)
  262. *vcpu_cpsr(vcpu) |= PSR_T_BIT;
  263. if (sctlr & SCTLR_EE)
  264. *vcpu_cpsr(vcpu) |= PSR_E_BIT;
  265. /* Note: These now point to UND banked copies */
  266. *vcpu_spsr(vcpu) = cpsr;
  267. *vcpu_reg(vcpu, 14) = new_lr_value;
  268. /* Branch to exception vector */
  269. *vcpu_pc(vcpu) = exc_vector_base(vcpu) + vect_offset;
  270. }
  271. /*
  272. * Modelled after TakeDataAbortException() and TakePrefetchAbortException
  273. * pseudocode.
  274. */
  275. static void inject_abt(struct kvm_vcpu *vcpu, bool is_pabt, unsigned long addr)
  276. {
  277. unsigned long new_lr_value;
  278. unsigned long new_spsr_value;
  279. unsigned long cpsr = *vcpu_cpsr(vcpu);
  280. u32 sctlr = vcpu->arch.cp15[c1_SCTLR];
  281. bool is_thumb = (cpsr & PSR_T_BIT);
  282. u32 vect_offset;
  283. u32 return_offset = (is_thumb) ? 4 : 0;
  284. bool is_lpae;
  285. new_spsr_value = cpsr;
  286. new_lr_value = *vcpu_pc(vcpu) + return_offset;
  287. *vcpu_cpsr(vcpu) = (cpsr & ~MODE_MASK) | ABT_MODE;
  288. *vcpu_cpsr(vcpu) |= PSR_I_BIT | PSR_A_BIT;
  289. *vcpu_cpsr(vcpu) &= ~(PSR_IT_MASK | PSR_J_BIT | PSR_E_BIT | PSR_T_BIT);
  290. if (sctlr & SCTLR_TE)
  291. *vcpu_cpsr(vcpu) |= PSR_T_BIT;
  292. if (sctlr & SCTLR_EE)
  293. *vcpu_cpsr(vcpu) |= PSR_E_BIT;
  294. /* Note: These now point to ABT banked copies */
  295. *vcpu_spsr(vcpu) = cpsr;
  296. *vcpu_reg(vcpu, 14) = new_lr_value;
  297. if (is_pabt)
  298. vect_offset = 12;
  299. else
  300. vect_offset = 16;
  301. /* Branch to exception vector */
  302. *vcpu_pc(vcpu) = exc_vector_base(vcpu) + vect_offset;
  303. if (is_pabt) {
  304. /* Set DFAR and DFSR */
  305. vcpu->arch.cp15[c6_IFAR] = addr;
  306. is_lpae = (vcpu->arch.cp15[c2_TTBCR] >> 31);
  307. /* Always give debug fault for now - should give guest a clue */
  308. if (is_lpae)
  309. vcpu->arch.cp15[c5_IFSR] = 1 << 9 | 0x22;
  310. else
  311. vcpu->arch.cp15[c5_IFSR] = 2;
  312. } else { /* !iabt */
  313. /* Set DFAR and DFSR */
  314. vcpu->arch.cp15[c6_DFAR] = addr;
  315. is_lpae = (vcpu->arch.cp15[c2_TTBCR] >> 31);
  316. /* Always give debug fault for now - should give guest a clue */
  317. if (is_lpae)
  318. vcpu->arch.cp15[c5_DFSR] = 1 << 9 | 0x22;
  319. else
  320. vcpu->arch.cp15[c5_DFSR] = 2;
  321. }
  322. }
  323. /**
  324. * kvm_inject_dabt - inject a data abort into the guest
  325. * @vcpu: The VCPU to receive the undefined exception
  326. * @addr: The address to report in the DFAR
  327. *
  328. * It is assumed that this code is called from the VCPU thread and that the
  329. * VCPU therefore is not currently executing guest code.
  330. */
  331. void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr)
  332. {
  333. inject_abt(vcpu, false, addr);
  334. }
  335. /**
  336. * kvm_inject_pabt - inject a prefetch abort into the guest
  337. * @vcpu: The VCPU to receive the undefined exception
  338. * @addr: The address to report in the DFAR
  339. *
  340. * It is assumed that this code is called from the VCPU thread and that the
  341. * VCPU therefore is not currently executing guest code.
  342. */
  343. void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr)
  344. {
  345. inject_abt(vcpu, true, addr);
  346. }