coproc.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383
  1. /*
  2. * Copyright (C) 2012 - Virtual Open Systems and Columbia University
  3. * Authors: Rusty Russell <rusty@rustcorp.com.au>
  4. * Christoffer Dall <c.dall@virtualopensystems.com>
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License, version 2, as
  8. * published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  18. */
  19. #include <linux/mm.h>
  20. #include <linux/kvm_host.h>
  21. #include <asm/kvm_arm.h>
  22. #include <asm/kvm_host.h>
  23. #include <asm/kvm_emulate.h>
  24. #include <asm/kvm_coproc.h>
  25. #include <asm/cacheflush.h>
  26. #include <asm/cputype.h>
  27. #include <trace/events/kvm.h>
  28. #include "trace.h"
  29. #include "coproc.h"
  30. /******************************************************************************
  31. * Co-processor emulation
  32. *****************************************************************************/
  33. int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run)
  34. {
  35. kvm_inject_undefined(vcpu);
  36. return 1;
  37. }
  38. int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run)
  39. {
  40. /*
  41. * We can get here, if the host has been built without VFPv3 support,
  42. * but the guest attempted a floating point operation.
  43. */
  44. kvm_inject_undefined(vcpu);
  45. return 1;
  46. }
  47. int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
  48. {
  49. kvm_inject_undefined(vcpu);
  50. return 1;
  51. }
  52. int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run)
  53. {
  54. kvm_inject_undefined(vcpu);
  55. return 1;
  56. }
  57. /* See note at ARM ARM B1.14.4 */
  58. static bool access_dcsw(struct kvm_vcpu *vcpu,
  59. const struct coproc_params *p,
  60. const struct coproc_reg *r)
  61. {
  62. u32 val;
  63. int cpu;
  64. cpu = get_cpu();
  65. if (!p->is_write)
  66. return read_from_write_only(vcpu, p);
  67. cpumask_setall(&vcpu->arch.require_dcache_flush);
  68. cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush);
  69. /* If we were already preempted, take the long way around */
  70. if (cpu != vcpu->arch.last_pcpu) {
  71. flush_cache_all();
  72. goto done;
  73. }
  74. val = *vcpu_reg(vcpu, p->Rt1);
  75. switch (p->CRm) {
  76. case 6: /* Upgrade DCISW to DCCISW, as per HCR.SWIO */
  77. case 14: /* DCCISW */
  78. asm volatile("mcr p15, 0, %0, c7, c14, 2" : : "r" (val));
  79. break;
  80. case 10: /* DCCSW */
  81. asm volatile("mcr p15, 0, %0, c7, c10, 2" : : "r" (val));
  82. break;
  83. }
  84. done:
  85. put_cpu();
  86. return true;
  87. }
  88. /*
  89. * We could trap ID_DFR0 and tell the guest we don't support performance
  90. * monitoring. Unfortunately the patch to make the kernel check ID_DFR0 was
  91. * NAKed, so it will read the PMCR anyway.
  92. *
  93. * Therefore we tell the guest we have 0 counters. Unfortunately, we
  94. * must always support PMCCNTR (the cycle counter): we just RAZ/WI for
  95. * all PM registers, which doesn't crash the guest kernel at least.
  96. */
  97. static bool pm_fake(struct kvm_vcpu *vcpu,
  98. const struct coproc_params *p,
  99. const struct coproc_reg *r)
  100. {
  101. if (p->is_write)
  102. return ignore_write(vcpu, p);
  103. else
  104. return read_zero(vcpu, p);
  105. }
  106. #define access_pmcr pm_fake
  107. #define access_pmcntenset pm_fake
  108. #define access_pmcntenclr pm_fake
  109. #define access_pmovsr pm_fake
  110. #define access_pmselr pm_fake
  111. #define access_pmceid0 pm_fake
  112. #define access_pmceid1 pm_fake
  113. #define access_pmccntr pm_fake
  114. #define access_pmxevtyper pm_fake
  115. #define access_pmxevcntr pm_fake
  116. #define access_pmuserenr pm_fake
  117. #define access_pmintenset pm_fake
  118. #define access_pmintenclr pm_fake
  119. /* Architected CP15 registers.
  120. * Important: Must be sorted ascending by CRn, CRM, Op1, Op2
  121. */
  122. static const struct coproc_reg cp15_regs[] = {
  123. /* CSSELR: swapped by interrupt.S. */
  124. { CRn( 0), CRm( 0), Op1( 2), Op2( 0), is32,
  125. NULL, reset_unknown, c0_CSSELR },
  126. /* TTBR0/TTBR1: swapped by interrupt.S. */
  127. { CRm( 2), Op1( 0), is64, NULL, reset_unknown64, c2_TTBR0 },
  128. { CRm( 2), Op1( 1), is64, NULL, reset_unknown64, c2_TTBR1 },
  129. /* TTBCR: swapped by interrupt.S. */
  130. { CRn( 2), CRm( 0), Op1( 0), Op2( 2), is32,
  131. NULL, reset_val, c2_TTBCR, 0x00000000 },
  132. /* DACR: swapped by interrupt.S. */
  133. { CRn( 3), CRm( 0), Op1( 0), Op2( 0), is32,
  134. NULL, reset_unknown, c3_DACR },
  135. /* DFSR/IFSR/ADFSR/AIFSR: swapped by interrupt.S. */
  136. { CRn( 5), CRm( 0), Op1( 0), Op2( 0), is32,
  137. NULL, reset_unknown, c5_DFSR },
  138. { CRn( 5), CRm( 0), Op1( 0), Op2( 1), is32,
  139. NULL, reset_unknown, c5_IFSR },
  140. { CRn( 5), CRm( 1), Op1( 0), Op2( 0), is32,
  141. NULL, reset_unknown, c5_ADFSR },
  142. { CRn( 5), CRm( 1), Op1( 0), Op2( 1), is32,
  143. NULL, reset_unknown, c5_AIFSR },
  144. /* DFAR/IFAR: swapped by interrupt.S. */
  145. { CRn( 6), CRm( 0), Op1( 0), Op2( 0), is32,
  146. NULL, reset_unknown, c6_DFAR },
  147. { CRn( 6), CRm( 0), Op1( 0), Op2( 2), is32,
  148. NULL, reset_unknown, c6_IFAR },
  149. /*
  150. * DC{C,I,CI}SW operations:
  151. */
  152. { CRn( 7), CRm( 6), Op1( 0), Op2( 2), is32, access_dcsw},
  153. { CRn( 7), CRm(10), Op1( 0), Op2( 2), is32, access_dcsw},
  154. { CRn( 7), CRm(14), Op1( 0), Op2( 2), is32, access_dcsw},
  155. /*
  156. * Dummy performance monitor implementation.
  157. */
  158. { CRn( 9), CRm(12), Op1( 0), Op2( 0), is32, access_pmcr},
  159. { CRn( 9), CRm(12), Op1( 0), Op2( 1), is32, access_pmcntenset},
  160. { CRn( 9), CRm(12), Op1( 0), Op2( 2), is32, access_pmcntenclr},
  161. { CRn( 9), CRm(12), Op1( 0), Op2( 3), is32, access_pmovsr},
  162. { CRn( 9), CRm(12), Op1( 0), Op2( 5), is32, access_pmselr},
  163. { CRn( 9), CRm(12), Op1( 0), Op2( 6), is32, access_pmceid0},
  164. { CRn( 9), CRm(12), Op1( 0), Op2( 7), is32, access_pmceid1},
  165. { CRn( 9), CRm(13), Op1( 0), Op2( 0), is32, access_pmccntr},
  166. { CRn( 9), CRm(13), Op1( 0), Op2( 1), is32, access_pmxevtyper},
  167. { CRn( 9), CRm(13), Op1( 0), Op2( 2), is32, access_pmxevcntr},
  168. { CRn( 9), CRm(14), Op1( 0), Op2( 0), is32, access_pmuserenr},
  169. { CRn( 9), CRm(14), Op1( 0), Op2( 1), is32, access_pmintenset},
  170. { CRn( 9), CRm(14), Op1( 0), Op2( 2), is32, access_pmintenclr},
  171. /* PRRR/NMRR (aka MAIR0/MAIR1): swapped by interrupt.S. */
  172. { CRn(10), CRm( 2), Op1( 0), Op2( 0), is32,
  173. NULL, reset_unknown, c10_PRRR},
  174. { CRn(10), CRm( 2), Op1( 0), Op2( 1), is32,
  175. NULL, reset_unknown, c10_NMRR},
  176. /* VBAR: swapped by interrupt.S. */
  177. { CRn(12), CRm( 0), Op1( 0), Op2( 0), is32,
  178. NULL, reset_val, c12_VBAR, 0x00000000 },
  179. /* CONTEXTIDR/TPIDRURW/TPIDRURO/TPIDRPRW: swapped by interrupt.S. */
  180. { CRn(13), CRm( 0), Op1( 0), Op2( 1), is32,
  181. NULL, reset_val, c13_CID, 0x00000000 },
  182. { CRn(13), CRm( 0), Op1( 0), Op2( 2), is32,
  183. NULL, reset_unknown, c13_TID_URW },
  184. { CRn(13), CRm( 0), Op1( 0), Op2( 3), is32,
  185. NULL, reset_unknown, c13_TID_URO },
  186. { CRn(13), CRm( 0), Op1( 0), Op2( 4), is32,
  187. NULL, reset_unknown, c13_TID_PRIV },
  188. };
  189. /* Target specific emulation tables */
  190. static struct kvm_coproc_target_table *target_tables[KVM_ARM_NUM_TARGETS];
  191. void kvm_register_target_coproc_table(struct kvm_coproc_target_table *table)
  192. {
  193. target_tables[table->target] = table;
  194. }
  195. /* Get specific register table for this target. */
  196. static const struct coproc_reg *get_target_table(unsigned target, size_t *num)
  197. {
  198. struct kvm_coproc_target_table *table;
  199. table = target_tables[target];
  200. *num = table->num;
  201. return table->table;
  202. }
  203. static const struct coproc_reg *find_reg(const struct coproc_params *params,
  204. const struct coproc_reg table[],
  205. unsigned int num)
  206. {
  207. unsigned int i;
  208. for (i = 0; i < num; i++) {
  209. const struct coproc_reg *r = &table[i];
  210. if (params->is_64bit != r->is_64)
  211. continue;
  212. if (params->CRn != r->CRn)
  213. continue;
  214. if (params->CRm != r->CRm)
  215. continue;
  216. if (params->Op1 != r->Op1)
  217. continue;
  218. if (params->Op2 != r->Op2)
  219. continue;
  220. return r;
  221. }
  222. return NULL;
  223. }
  224. static int emulate_cp15(struct kvm_vcpu *vcpu,
  225. const struct coproc_params *params)
  226. {
  227. size_t num;
  228. const struct coproc_reg *table, *r;
  229. trace_kvm_emulate_cp15_imp(params->Op1, params->Rt1, params->CRn,
  230. params->CRm, params->Op2, params->is_write);
  231. table = get_target_table(vcpu->arch.target, &num);
  232. /* Search target-specific then generic table. */
  233. r = find_reg(params, table, num);
  234. if (!r)
  235. r = find_reg(params, cp15_regs, ARRAY_SIZE(cp15_regs));
  236. if (likely(r)) {
  237. /* If we don't have an accessor, we should never get here! */
  238. BUG_ON(!r->access);
  239. if (likely(r->access(vcpu, params, r))) {
  240. /* Skip instruction, since it was emulated */
  241. kvm_skip_instr(vcpu, (vcpu->arch.hsr >> 25) & 1);
  242. return 1;
  243. }
  244. /* If access function fails, it should complain. */
  245. } else {
  246. kvm_err("Unsupported guest CP15 access at: %08x\n",
  247. *vcpu_pc(vcpu));
  248. print_cp_instr(params);
  249. }
  250. kvm_inject_undefined(vcpu);
  251. return 1;
  252. }
  253. /**
  254. * kvm_handle_cp15_64 -- handles a mrrc/mcrr trap on a guest CP15 access
  255. * @vcpu: The VCPU pointer
  256. * @run: The kvm_run struct
  257. */
  258. int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
  259. {
  260. struct coproc_params params;
  261. params.CRm = (vcpu->arch.hsr >> 1) & 0xf;
  262. params.Rt1 = (vcpu->arch.hsr >> 5) & 0xf;
  263. params.is_write = ((vcpu->arch.hsr & 1) == 0);
  264. params.is_64bit = true;
  265. params.Op1 = (vcpu->arch.hsr >> 16) & 0xf;
  266. params.Op2 = 0;
  267. params.Rt2 = (vcpu->arch.hsr >> 10) & 0xf;
  268. params.CRn = 0;
  269. return emulate_cp15(vcpu, &params);
  270. }
  271. static void reset_coproc_regs(struct kvm_vcpu *vcpu,
  272. const struct coproc_reg *table, size_t num)
  273. {
  274. unsigned long i;
  275. for (i = 0; i < num; i++)
  276. if (table[i].reset)
  277. table[i].reset(vcpu, &table[i]);
  278. }
  279. /**
  280. * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access
  281. * @vcpu: The VCPU pointer
  282. * @run: The kvm_run struct
  283. */
  284. int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
  285. {
  286. struct coproc_params params;
  287. params.CRm = (vcpu->arch.hsr >> 1) & 0xf;
  288. params.Rt1 = (vcpu->arch.hsr >> 5) & 0xf;
  289. params.is_write = ((vcpu->arch.hsr & 1) == 0);
  290. params.is_64bit = false;
  291. params.CRn = (vcpu->arch.hsr >> 10) & 0xf;
  292. params.Op1 = (vcpu->arch.hsr >> 14) & 0x7;
  293. params.Op2 = (vcpu->arch.hsr >> 17) & 0x7;
  294. params.Rt2 = 0;
  295. return emulate_cp15(vcpu, &params);
  296. }
  297. void kvm_coproc_table_init(void)
  298. {
  299. unsigned int i;
  300. /* Make sure tables are unique and in order. */
  301. for (i = 1; i < ARRAY_SIZE(cp15_regs); i++)
  302. BUG_ON(cmp_reg(&cp15_regs[i-1], &cp15_regs[i]) >= 0);
  303. }
  304. /**
  305. * kvm_reset_coprocs - sets cp15 registers to reset value
  306. * @vcpu: The VCPU pointer
  307. *
  308. * This function finds the right table above and sets the registers on the
  309. * virtual CPU struct to their architecturally defined reset values.
  310. */
  311. void kvm_reset_coprocs(struct kvm_vcpu *vcpu)
  312. {
  313. size_t num;
  314. const struct coproc_reg *table;
  315. /* Catch someone adding a register without putting in reset entry. */
  316. memset(vcpu->arch.cp15, 0x42, sizeof(vcpu->arch.cp15));
  317. /* Generic chip reset first (so target could override). */
  318. reset_coproc_regs(vcpu, cp15_regs, ARRAY_SIZE(cp15_regs));
  319. table = get_target_table(vcpu->arch.target, &num);
  320. reset_coproc_regs(vcpu, table, num);
  321. for (num = 1; num < NR_CP15_REGS; num++)
  322. if (vcpu->arch.cp15[num] == 0x42424242)
  323. panic("Didn't reset vcpu->arch.cp15[%zi]", num);
  324. }