emulate.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373
  1. /*
  2. * Copyright (C) 2012 - Virtual Open Systems and Columbia University
  3. * Author: Christoffer Dall <c.dall@virtualopensystems.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License, version 2, as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  17. */
  18. #include <linux/mm.h>
  19. #include <linux/kvm_host.h>
  20. #include <asm/kvm_arm.h>
  21. #include <asm/kvm_emulate.h>
  22. #include <trace/events/kvm.h>
  23. #include "trace.h"
  24. #define VCPU_NR_MODES 6
  25. #define VCPU_REG_OFFSET_USR 0
  26. #define VCPU_REG_OFFSET_FIQ 1
  27. #define VCPU_REG_OFFSET_IRQ 2
  28. #define VCPU_REG_OFFSET_SVC 3
  29. #define VCPU_REG_OFFSET_ABT 4
  30. #define VCPU_REG_OFFSET_UND 5
  31. #define REG_OFFSET(_reg) \
  32. (offsetof(struct kvm_regs, _reg) / sizeof(u32))
  33. #define USR_REG_OFFSET(_num) REG_OFFSET(usr_regs.uregs[_num])
  34. static const unsigned long vcpu_reg_offsets[VCPU_NR_MODES][15] = {
  35. /* USR/SYS Registers */
  36. [VCPU_REG_OFFSET_USR] = {
  37. USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
  38. USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
  39. USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
  40. USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
  41. USR_REG_OFFSET(12), USR_REG_OFFSET(13), USR_REG_OFFSET(14),
  42. },
  43. /* FIQ Registers */
  44. [VCPU_REG_OFFSET_FIQ] = {
  45. USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
  46. USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
  47. USR_REG_OFFSET(6), USR_REG_OFFSET(7),
  48. REG_OFFSET(fiq_regs[0]), /* r8 */
  49. REG_OFFSET(fiq_regs[1]), /* r9 */
  50. REG_OFFSET(fiq_regs[2]), /* r10 */
  51. REG_OFFSET(fiq_regs[3]), /* r11 */
  52. REG_OFFSET(fiq_regs[4]), /* r12 */
  53. REG_OFFSET(fiq_regs[5]), /* r13 */
  54. REG_OFFSET(fiq_regs[6]), /* r14 */
  55. },
  56. /* IRQ Registers */
  57. [VCPU_REG_OFFSET_IRQ] = {
  58. USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
  59. USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
  60. USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
  61. USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
  62. USR_REG_OFFSET(12),
  63. REG_OFFSET(irq_regs[0]), /* r13 */
  64. REG_OFFSET(irq_regs[1]), /* r14 */
  65. },
  66. /* SVC Registers */
  67. [VCPU_REG_OFFSET_SVC] = {
  68. USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
  69. USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
  70. USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
  71. USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
  72. USR_REG_OFFSET(12),
  73. REG_OFFSET(svc_regs[0]), /* r13 */
  74. REG_OFFSET(svc_regs[1]), /* r14 */
  75. },
  76. /* ABT Registers */
  77. [VCPU_REG_OFFSET_ABT] = {
  78. USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
  79. USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
  80. USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
  81. USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
  82. USR_REG_OFFSET(12),
  83. REG_OFFSET(abt_regs[0]), /* r13 */
  84. REG_OFFSET(abt_regs[1]), /* r14 */
  85. },
  86. /* UND Registers */
  87. [VCPU_REG_OFFSET_UND] = {
  88. USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
  89. USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
  90. USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
  91. USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
  92. USR_REG_OFFSET(12),
  93. REG_OFFSET(und_regs[0]), /* r13 */
  94. REG_OFFSET(und_regs[1]), /* r14 */
  95. },
  96. };
  97. /*
  98. * Return a pointer to the register number valid in the current mode of
  99. * the virtual CPU.
  100. */
  101. u32 *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num)
  102. {
  103. u32 *reg_array = (u32 *)&vcpu->arch.regs;
  104. u32 mode = *vcpu_cpsr(vcpu) & MODE_MASK;
  105. switch (mode) {
  106. case USR_MODE...SVC_MODE:
  107. mode &= ~MODE32_BIT; /* 0 ... 3 */
  108. break;
  109. case ABT_MODE:
  110. mode = VCPU_REG_OFFSET_ABT;
  111. break;
  112. case UND_MODE:
  113. mode = VCPU_REG_OFFSET_UND;
  114. break;
  115. case SYSTEM_MODE:
  116. mode = VCPU_REG_OFFSET_USR;
  117. break;
  118. default:
  119. BUG();
  120. }
  121. return reg_array + vcpu_reg_offsets[mode][reg_num];
  122. }
  123. /*
  124. * Return the SPSR for the current mode of the virtual CPU.
  125. */
  126. u32 *vcpu_spsr(struct kvm_vcpu *vcpu)
  127. {
  128. u32 mode = *vcpu_cpsr(vcpu) & MODE_MASK;
  129. switch (mode) {
  130. case SVC_MODE:
  131. return &vcpu->arch.regs.KVM_ARM_SVC_spsr;
  132. case ABT_MODE:
  133. return &vcpu->arch.regs.KVM_ARM_ABT_spsr;
  134. case UND_MODE:
  135. return &vcpu->arch.regs.KVM_ARM_UND_spsr;
  136. case IRQ_MODE:
  137. return &vcpu->arch.regs.KVM_ARM_IRQ_spsr;
  138. case FIQ_MODE:
  139. return &vcpu->arch.regs.KVM_ARM_FIQ_spsr;
  140. default:
  141. BUG();
  142. }
  143. }
  144. /**
  145. * kvm_handle_wfi - handle a wait-for-interrupts instruction executed by a guest
  146. * @vcpu: the vcpu pointer
  147. * @run: the kvm_run structure pointer
  148. *
  149. * Simply sets the wait_for_interrupts flag on the vcpu structure, which will
  150. * halt execution of world-switches and schedule other host processes until
  151. * there is an incoming IRQ or FIQ to the VM.
  152. */
  153. int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run)
  154. {
  155. trace_kvm_wfi(*vcpu_pc(vcpu));
  156. kvm_vcpu_block(vcpu);
  157. return 1;
  158. }
  159. /**
  160. * adjust_itstate - adjust ITSTATE when emulating instructions in IT-block
  161. * @vcpu: The VCPU pointer
  162. *
  163. * When exceptions occur while instructions are executed in Thumb IF-THEN
  164. * blocks, the ITSTATE field of the CPSR is not advanved (updated), so we have
  165. * to do this little bit of work manually. The fields map like this:
  166. *
  167. * IT[7:0] -> CPSR[26:25],CPSR[15:10]
  168. */
  169. static void kvm_adjust_itstate(struct kvm_vcpu *vcpu)
  170. {
  171. unsigned long itbits, cond;
  172. unsigned long cpsr = *vcpu_cpsr(vcpu);
  173. bool is_arm = !(cpsr & PSR_T_BIT);
  174. BUG_ON(is_arm && (cpsr & PSR_IT_MASK));
  175. if (!(cpsr & PSR_IT_MASK))
  176. return;
  177. cond = (cpsr & 0xe000) >> 13;
  178. itbits = (cpsr & 0x1c00) >> (10 - 2);
  179. itbits |= (cpsr & (0x3 << 25)) >> 25;
  180. /* Perform ITAdvance (see page A-52 in ARM DDI 0406C) */
  181. if ((itbits & 0x7) == 0)
  182. itbits = cond = 0;
  183. else
  184. itbits = (itbits << 1) & 0x1f;
  185. cpsr &= ~PSR_IT_MASK;
  186. cpsr |= cond << 13;
  187. cpsr |= (itbits & 0x1c) << (10 - 2);
  188. cpsr |= (itbits & 0x3) << 25;
  189. *vcpu_cpsr(vcpu) = cpsr;
  190. }
  191. /**
  192. * kvm_skip_instr - skip a trapped instruction and proceed to the next
  193. * @vcpu: The vcpu pointer
  194. */
  195. void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
  196. {
  197. bool is_thumb;
  198. is_thumb = !!(*vcpu_cpsr(vcpu) & PSR_T_BIT);
  199. if (is_thumb && !is_wide_instr)
  200. *vcpu_pc(vcpu) += 2;
  201. else
  202. *vcpu_pc(vcpu) += 4;
  203. kvm_adjust_itstate(vcpu);
  204. }
  205. /******************************************************************************
  206. * Inject exceptions into the guest
  207. */
  208. static u32 exc_vector_base(struct kvm_vcpu *vcpu)
  209. {
  210. u32 sctlr = vcpu->arch.cp15[c1_SCTLR];
  211. u32 vbar = vcpu->arch.cp15[c12_VBAR];
  212. if (sctlr & SCTLR_V)
  213. return 0xffff0000;
  214. else /* always have security exceptions */
  215. return vbar;
  216. }
  217. /**
  218. * kvm_inject_undefined - inject an undefined exception into the guest
  219. * @vcpu: The VCPU to receive the undefined exception
  220. *
  221. * It is assumed that this code is called from the VCPU thread and that the
  222. * VCPU therefore is not currently executing guest code.
  223. *
  224. * Modelled after TakeUndefInstrException() pseudocode.
  225. */
  226. void kvm_inject_undefined(struct kvm_vcpu *vcpu)
  227. {
  228. u32 new_lr_value;
  229. u32 new_spsr_value;
  230. u32 cpsr = *vcpu_cpsr(vcpu);
  231. u32 sctlr = vcpu->arch.cp15[c1_SCTLR];
  232. bool is_thumb = (cpsr & PSR_T_BIT);
  233. u32 vect_offset = 4;
  234. u32 return_offset = (is_thumb) ? 2 : 4;
  235. new_spsr_value = cpsr;
  236. new_lr_value = *vcpu_pc(vcpu) - return_offset;
  237. *vcpu_cpsr(vcpu) = (cpsr & ~MODE_MASK) | UND_MODE;
  238. *vcpu_cpsr(vcpu) |= PSR_I_BIT;
  239. *vcpu_cpsr(vcpu) &= ~(PSR_IT_MASK | PSR_J_BIT | PSR_E_BIT | PSR_T_BIT);
  240. if (sctlr & SCTLR_TE)
  241. *vcpu_cpsr(vcpu) |= PSR_T_BIT;
  242. if (sctlr & SCTLR_EE)
  243. *vcpu_cpsr(vcpu) |= PSR_E_BIT;
  244. /* Note: These now point to UND banked copies */
  245. *vcpu_spsr(vcpu) = cpsr;
  246. *vcpu_reg(vcpu, 14) = new_lr_value;
  247. /* Branch to exception vector */
  248. *vcpu_pc(vcpu) = exc_vector_base(vcpu) + vect_offset;
  249. }
  250. /*
  251. * Modelled after TakeDataAbortException() and TakePrefetchAbortException
  252. * pseudocode.
  253. */
  254. static void inject_abt(struct kvm_vcpu *vcpu, bool is_pabt, unsigned long addr)
  255. {
  256. u32 new_lr_value;
  257. u32 new_spsr_value;
  258. u32 cpsr = *vcpu_cpsr(vcpu);
  259. u32 sctlr = vcpu->arch.cp15[c1_SCTLR];
  260. bool is_thumb = (cpsr & PSR_T_BIT);
  261. u32 vect_offset;
  262. u32 return_offset = (is_thumb) ? 4 : 0;
  263. bool is_lpae;
  264. new_spsr_value = cpsr;
  265. new_lr_value = *vcpu_pc(vcpu) + return_offset;
  266. *vcpu_cpsr(vcpu) = (cpsr & ~MODE_MASK) | ABT_MODE;
  267. *vcpu_cpsr(vcpu) |= PSR_I_BIT | PSR_A_BIT;
  268. *vcpu_cpsr(vcpu) &= ~(PSR_IT_MASK | PSR_J_BIT | PSR_E_BIT | PSR_T_BIT);
  269. if (sctlr & SCTLR_TE)
  270. *vcpu_cpsr(vcpu) |= PSR_T_BIT;
  271. if (sctlr & SCTLR_EE)
  272. *vcpu_cpsr(vcpu) |= PSR_E_BIT;
  273. /* Note: These now point to ABT banked copies */
  274. *vcpu_spsr(vcpu) = cpsr;
  275. *vcpu_reg(vcpu, 14) = new_lr_value;
  276. if (is_pabt)
  277. vect_offset = 12;
  278. else
  279. vect_offset = 16;
  280. /* Branch to exception vector */
  281. *vcpu_pc(vcpu) = exc_vector_base(vcpu) + vect_offset;
  282. if (is_pabt) {
  283. /* Set DFAR and DFSR */
  284. vcpu->arch.cp15[c6_IFAR] = addr;
  285. is_lpae = (vcpu->arch.cp15[c2_TTBCR] >> 31);
  286. /* Always give debug fault for now - should give guest a clue */
  287. if (is_lpae)
  288. vcpu->arch.cp15[c5_IFSR] = 1 << 9 | 0x22;
  289. else
  290. vcpu->arch.cp15[c5_IFSR] = 2;
  291. } else { /* !iabt */
  292. /* Set DFAR and DFSR */
  293. vcpu->arch.cp15[c6_DFAR] = addr;
  294. is_lpae = (vcpu->arch.cp15[c2_TTBCR] >> 31);
  295. /* Always give debug fault for now - should give guest a clue */
  296. if (is_lpae)
  297. vcpu->arch.cp15[c5_DFSR] = 1 << 9 | 0x22;
  298. else
  299. vcpu->arch.cp15[c5_DFSR] = 2;
  300. }
  301. }
  302. /**
  303. * kvm_inject_dabt - inject a data abort into the guest
  304. * @vcpu: The VCPU to receive the undefined exception
  305. * @addr: The address to report in the DFAR
  306. *
  307. * It is assumed that this code is called from the VCPU thread and that the
  308. * VCPU therefore is not currently executing guest code.
  309. */
  310. void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr)
  311. {
  312. inject_abt(vcpu, false, addr);
  313. }
  314. /**
  315. * kvm_inject_pabt - inject a prefetch abort into the guest
  316. * @vcpu: The VCPU to receive the undefined exception
  317. * @addr: The address to report in the DFAR
  318. *
  319. * It is assumed that this code is called from the VCPU thread and that the
  320. * VCPU therefore is not currently executing guest code.
  321. */
  322. void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr)
  323. {
  324. inject_abt(vcpu, true, addr);
  325. }