sys_regs.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053
  1. /*
  2. * Copyright (C) 2012,2013 - ARM Ltd
  3. * Author: Marc Zyngier <marc.zyngier@arm.com>
  4. *
  5. * Derived from arch/arm/kvm/coproc.c:
  6. * Copyright (C) 2012 - Virtual Open Systems and Columbia University
  7. * Authors: Rusty Russell <rusty@rustcorp.com.au>
  8. * Christoffer Dall <c.dall@virtualopensystems.com>
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License, version 2, as
  12. * published by the Free Software Foundation.
  13. *
  14. * This program is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17. * GNU General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU General Public License
  20. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  21. */
  22. #include <linux/mm.h>
  23. #include <linux/kvm_host.h>
  24. #include <linux/uaccess.h>
  25. #include <asm/kvm_arm.h>
  26. #include <asm/kvm_host.h>
  27. #include <asm/kvm_emulate.h>
  28. #include <asm/kvm_coproc.h>
  29. #include <asm/cacheflush.h>
  30. #include <asm/cputype.h>
  31. #include <trace/events/kvm.h>
  32. #include "sys_regs.h"
  33. /*
  34. * All of this file is extremly similar to the ARM coproc.c, but the
  35. * types are different. My gut feeling is that it should be pretty
  36. * easy to merge, but that would be an ABI breakage -- again. VFP
  37. * would also need to be abstracted.
  38. *
  39. * For AArch32, we only take care of what is being trapped. Anything
  40. * that has to do with init and userspace access has to go via the
  41. * 64bit interface.
  42. */
  43. /* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */
  44. static u32 cache_levels;
  45. /* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
  46. #define CSSELR_MAX 12
  47. /* Which cache CCSIDR represents depends on CSSELR value. */
  48. static u32 get_ccsidr(u32 csselr)
  49. {
  50. u32 ccsidr;
  51. /* Make sure noone else changes CSSELR during this! */
  52. local_irq_disable();
  53. /* Put value into CSSELR */
  54. asm volatile("msr csselr_el1, %x0" : : "r" (csselr));
  55. isb();
  56. /* Read result out of CCSIDR */
  57. asm volatile("mrs %0, ccsidr_el1" : "=r" (ccsidr));
  58. local_irq_enable();
  59. return ccsidr;
  60. }
  61. static void do_dc_cisw(u32 val)
  62. {
  63. asm volatile("dc cisw, %x0" : : "r" (val));
  64. dsb();
  65. }
  66. static void do_dc_csw(u32 val)
  67. {
  68. asm volatile("dc csw, %x0" : : "r" (val));
  69. dsb();
  70. }
  71. /* See note at ARM ARM B1.14.4 */
  72. static bool access_dcsw(struct kvm_vcpu *vcpu,
  73. const struct sys_reg_params *p,
  74. const struct sys_reg_desc *r)
  75. {
  76. unsigned long val;
  77. int cpu;
  78. if (!p->is_write)
  79. return read_from_write_only(vcpu, p);
  80. cpu = get_cpu();
  81. cpumask_setall(&vcpu->arch.require_dcache_flush);
  82. cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush);
  83. /* If we were already preempted, take the long way around */
  84. if (cpu != vcpu->arch.last_pcpu) {
  85. flush_cache_all();
  86. goto done;
  87. }
  88. val = *vcpu_reg(vcpu, p->Rt);
  89. switch (p->CRm) {
  90. case 6: /* Upgrade DCISW to DCCISW, as per HCR.SWIO */
  91. case 14: /* DCCISW */
  92. do_dc_cisw(val);
  93. break;
  94. case 10: /* DCCSW */
  95. do_dc_csw(val);
  96. break;
  97. }
  98. done:
  99. put_cpu();
  100. return true;
  101. }
  102. /*
  103. * We could trap ID_DFR0 and tell the guest we don't support performance
  104. * monitoring. Unfortunately the patch to make the kernel check ID_DFR0 was
  105. * NAKed, so it will read the PMCR anyway.
  106. *
  107. * Therefore we tell the guest we have 0 counters. Unfortunately, we
  108. * must always support PMCCNTR (the cycle counter): we just RAZ/WI for
  109. * all PM registers, which doesn't crash the guest kernel at least.
  110. */
  111. static bool pm_fake(struct kvm_vcpu *vcpu,
  112. const struct sys_reg_params *p,
  113. const struct sys_reg_desc *r)
  114. {
  115. if (p->is_write)
  116. return ignore_write(vcpu, p);
  117. else
  118. return read_zero(vcpu, p);
  119. }
  120. static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
  121. {
  122. u64 amair;
  123. asm volatile("mrs %0, amair_el1\n" : "=r" (amair));
  124. vcpu_sys_reg(vcpu, AMAIR_EL1) = amair;
  125. }
  126. static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
  127. {
  128. /*
  129. * Simply map the vcpu_id into the Aff0 field of the MPIDR.
  130. */
  131. vcpu_sys_reg(vcpu, MPIDR_EL1) = (1UL << 31) | (vcpu->vcpu_id & 0xff);
  132. }
  133. /*
  134. * Architected system registers.
  135. * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
  136. */
  137. static const struct sys_reg_desc sys_reg_descs[] = {
  138. /* DC ISW */
  139. { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b0110), Op2(0b010),
  140. access_dcsw },
  141. /* DC CSW */
  142. { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b1010), Op2(0b010),
  143. access_dcsw },
  144. /* DC CISW */
  145. { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b010),
  146. access_dcsw },
  147. /* TEECR32_EL1 */
  148. { Op0(0b10), Op1(0b010), CRn(0b0000), CRm(0b0000), Op2(0b000),
  149. NULL, reset_val, TEECR32_EL1, 0 },
  150. /* TEEHBR32_EL1 */
  151. { Op0(0b10), Op1(0b010), CRn(0b0001), CRm(0b0000), Op2(0b000),
  152. NULL, reset_val, TEEHBR32_EL1, 0 },
  153. /* DBGVCR32_EL2 */
  154. { Op0(0b10), Op1(0b100), CRn(0b0000), CRm(0b0111), Op2(0b000),
  155. NULL, reset_val, DBGVCR32_EL2, 0 },
  156. /* MPIDR_EL1 */
  157. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b101),
  158. NULL, reset_mpidr, MPIDR_EL1 },
  159. /* SCTLR_EL1 */
  160. { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000),
  161. NULL, reset_val, SCTLR_EL1, 0x00C50078 },
  162. /* CPACR_EL1 */
  163. { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b010),
  164. NULL, reset_val, CPACR_EL1, 0 },
  165. /* TTBR0_EL1 */
  166. { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b000),
  167. NULL, reset_unknown, TTBR0_EL1 },
  168. /* TTBR1_EL1 */
  169. { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b001),
  170. NULL, reset_unknown, TTBR1_EL1 },
  171. /* TCR_EL1 */
  172. { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b010),
  173. NULL, reset_val, TCR_EL1, 0 },
  174. /* AFSR0_EL1 */
  175. { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b000),
  176. NULL, reset_unknown, AFSR0_EL1 },
  177. /* AFSR1_EL1 */
  178. { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b001),
  179. NULL, reset_unknown, AFSR1_EL1 },
  180. /* ESR_EL1 */
  181. { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0010), Op2(0b000),
  182. NULL, reset_unknown, ESR_EL1 },
  183. /* FAR_EL1 */
  184. { Op0(0b11), Op1(0b000), CRn(0b0110), CRm(0b0000), Op2(0b000),
  185. NULL, reset_unknown, FAR_EL1 },
  186. /* PAR_EL1 */
  187. { Op0(0b11), Op1(0b000), CRn(0b0111), CRm(0b0100), Op2(0b000),
  188. NULL, reset_unknown, PAR_EL1 },
  189. /* PMINTENSET_EL1 */
  190. { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b001),
  191. pm_fake },
  192. /* PMINTENCLR_EL1 */
  193. { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b010),
  194. pm_fake },
  195. /* MAIR_EL1 */
  196. { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0010), Op2(0b000),
  197. NULL, reset_unknown, MAIR_EL1 },
  198. /* AMAIR_EL1 */
  199. { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0011), Op2(0b000),
  200. NULL, reset_amair_el1, AMAIR_EL1 },
  201. /* VBAR_EL1 */
  202. { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b0000), Op2(0b000),
  203. NULL, reset_val, VBAR_EL1, 0 },
  204. /* CONTEXTIDR_EL1 */
  205. { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b001),
  206. NULL, reset_val, CONTEXTIDR_EL1, 0 },
  207. /* TPIDR_EL1 */
  208. { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b100),
  209. NULL, reset_unknown, TPIDR_EL1 },
  210. /* CNTKCTL_EL1 */
  211. { Op0(0b11), Op1(0b000), CRn(0b1110), CRm(0b0001), Op2(0b000),
  212. NULL, reset_val, CNTKCTL_EL1, 0},
  213. /* CSSELR_EL1 */
  214. { Op0(0b11), Op1(0b010), CRn(0b0000), CRm(0b0000), Op2(0b000),
  215. NULL, reset_unknown, CSSELR_EL1 },
  216. /* PMCR_EL0 */
  217. { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b000),
  218. pm_fake },
  219. /* PMCNTENSET_EL0 */
  220. { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b001),
  221. pm_fake },
  222. /* PMCNTENCLR_EL0 */
  223. { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b010),
  224. pm_fake },
  225. /* PMOVSCLR_EL0 */
  226. { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b011),
  227. pm_fake },
  228. /* PMSWINC_EL0 */
  229. { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b100),
  230. pm_fake },
  231. /* PMSELR_EL0 */
  232. { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b101),
  233. pm_fake },
  234. /* PMCEID0_EL0 */
  235. { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b110),
  236. pm_fake },
  237. /* PMCEID1_EL0 */
  238. { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b111),
  239. pm_fake },
  240. /* PMCCNTR_EL0 */
  241. { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b000),
  242. pm_fake },
  243. /* PMXEVTYPER_EL0 */
  244. { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b001),
  245. pm_fake },
  246. /* PMXEVCNTR_EL0 */
  247. { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b010),
  248. pm_fake },
  249. /* PMUSERENR_EL0 */
  250. { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b000),
  251. pm_fake },
  252. /* PMOVSSET_EL0 */
  253. { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b011),
  254. pm_fake },
  255. /* TPIDR_EL0 */
  256. { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b010),
  257. NULL, reset_unknown, TPIDR_EL0 },
  258. /* TPIDRRO_EL0 */
  259. { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b011),
  260. NULL, reset_unknown, TPIDRRO_EL0 },
  261. /* DACR32_EL2 */
  262. { Op0(0b11), Op1(0b100), CRn(0b0011), CRm(0b0000), Op2(0b000),
  263. NULL, reset_unknown, DACR32_EL2 },
  264. /* IFSR32_EL2 */
  265. { Op0(0b11), Op1(0b100), CRn(0b0101), CRm(0b0000), Op2(0b001),
  266. NULL, reset_unknown, IFSR32_EL2 },
  267. /* FPEXC32_EL2 */
  268. { Op0(0b11), Op1(0b100), CRn(0b0101), CRm(0b0011), Op2(0b000),
  269. NULL, reset_val, FPEXC32_EL2, 0x70 },
  270. };
  271. /* Trapped cp15 registers */
  272. static const struct sys_reg_desc cp15_regs[] = {
  273. /*
  274. * DC{C,I,CI}SW operations:
  275. */
  276. { Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw },
  277. { Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw },
  278. { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw },
  279. { Op1( 0), CRn( 9), CRm(12), Op2( 0), pm_fake },
  280. { Op1( 0), CRn( 9), CRm(12), Op2( 1), pm_fake },
  281. { Op1( 0), CRn( 9), CRm(12), Op2( 2), pm_fake },
  282. { Op1( 0), CRn( 9), CRm(12), Op2( 3), pm_fake },
  283. { Op1( 0), CRn( 9), CRm(12), Op2( 5), pm_fake },
  284. { Op1( 0), CRn( 9), CRm(12), Op2( 6), pm_fake },
  285. { Op1( 0), CRn( 9), CRm(12), Op2( 7), pm_fake },
  286. { Op1( 0), CRn( 9), CRm(13), Op2( 0), pm_fake },
  287. { Op1( 0), CRn( 9), CRm(13), Op2( 1), pm_fake },
  288. { Op1( 0), CRn( 9), CRm(13), Op2( 2), pm_fake },
  289. { Op1( 0), CRn( 9), CRm(14), Op2( 0), pm_fake },
  290. { Op1( 0), CRn( 9), CRm(14), Op2( 1), pm_fake },
  291. { Op1( 0), CRn( 9), CRm(14), Op2( 2), pm_fake },
  292. };
  293. /* Target specific emulation tables */
  294. static struct kvm_sys_reg_target_table *target_tables[KVM_ARM_NUM_TARGETS];
  295. void kvm_register_target_sys_reg_table(unsigned int target,
  296. struct kvm_sys_reg_target_table *table)
  297. {
  298. target_tables[target] = table;
  299. }
  300. /* Get specific register table for this target. */
  301. static const struct sys_reg_desc *get_target_table(unsigned target,
  302. bool mode_is_64,
  303. size_t *num)
  304. {
  305. struct kvm_sys_reg_target_table *table;
  306. table = target_tables[target];
  307. if (mode_is_64) {
  308. *num = table->table64.num;
  309. return table->table64.table;
  310. } else {
  311. *num = table->table32.num;
  312. return table->table32.table;
  313. }
  314. }
  315. static const struct sys_reg_desc *find_reg(const struct sys_reg_params *params,
  316. const struct sys_reg_desc table[],
  317. unsigned int num)
  318. {
  319. unsigned int i;
  320. for (i = 0; i < num; i++) {
  321. const struct sys_reg_desc *r = &table[i];
  322. if (params->Op0 != r->Op0)
  323. continue;
  324. if (params->Op1 != r->Op1)
  325. continue;
  326. if (params->CRn != r->CRn)
  327. continue;
  328. if (params->CRm != r->CRm)
  329. continue;
  330. if (params->Op2 != r->Op2)
  331. continue;
  332. return r;
  333. }
  334. return NULL;
  335. }
  336. int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
  337. {
  338. kvm_inject_undefined(vcpu);
  339. return 1;
  340. }
  341. int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run)
  342. {
  343. kvm_inject_undefined(vcpu);
  344. return 1;
  345. }
  346. static void emulate_cp15(struct kvm_vcpu *vcpu,
  347. const struct sys_reg_params *params)
  348. {
  349. size_t num;
  350. const struct sys_reg_desc *table, *r;
  351. table = get_target_table(vcpu->arch.target, false, &num);
  352. /* Search target-specific then generic table. */
  353. r = find_reg(params, table, num);
  354. if (!r)
  355. r = find_reg(params, cp15_regs, ARRAY_SIZE(cp15_regs));
  356. if (likely(r)) {
  357. /*
  358. * Not having an accessor means that we have
  359. * configured a trap that we don't know how to
  360. * handle. This certainly qualifies as a gross bug
  361. * that should be fixed right away.
  362. */
  363. BUG_ON(!r->access);
  364. if (likely(r->access(vcpu, params, r))) {
  365. /* Skip instruction, since it was emulated */
  366. kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
  367. return;
  368. }
  369. /* If access function fails, it should complain. */
  370. }
  371. kvm_err("Unsupported guest CP15 access at: %08lx\n", *vcpu_pc(vcpu));
  372. print_sys_reg_instr(params);
  373. kvm_inject_undefined(vcpu);
  374. }
  375. /**
  376. * kvm_handle_cp15_64 -- handles a mrrc/mcrr trap on a guest CP15 access
  377. * @vcpu: The VCPU pointer
  378. * @run: The kvm_run struct
  379. */
  380. int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
  381. {
  382. struct sys_reg_params params;
  383. u32 hsr = kvm_vcpu_get_hsr(vcpu);
  384. int Rt2 = (hsr >> 10) & 0xf;
  385. params.CRm = (hsr >> 1) & 0xf;
  386. params.Rt = (hsr >> 5) & 0xf;
  387. params.is_write = ((hsr & 1) == 0);
  388. params.Op0 = 0;
  389. params.Op1 = (hsr >> 16) & 0xf;
  390. params.Op2 = 0;
  391. params.CRn = 0;
  392. /*
  393. * Massive hack here. Store Rt2 in the top 32bits so we only
  394. * have one register to deal with. As we use the same trap
  395. * backends between AArch32 and AArch64, we get away with it.
  396. */
  397. if (params.is_write) {
  398. u64 val = *vcpu_reg(vcpu, params.Rt);
  399. val &= 0xffffffff;
  400. val |= *vcpu_reg(vcpu, Rt2) << 32;
  401. *vcpu_reg(vcpu, params.Rt) = val;
  402. }
  403. emulate_cp15(vcpu, &params);
  404. /* Do the opposite hack for the read side */
  405. if (!params.is_write) {
  406. u64 val = *vcpu_reg(vcpu, params.Rt);
  407. val >>= 32;
  408. *vcpu_reg(vcpu, Rt2) = val;
  409. }
  410. return 1;
  411. }
  412. /**
  413. * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access
  414. * @vcpu: The VCPU pointer
  415. * @run: The kvm_run struct
  416. */
  417. int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
  418. {
  419. struct sys_reg_params params;
  420. u32 hsr = kvm_vcpu_get_hsr(vcpu);
  421. params.CRm = (hsr >> 1) & 0xf;
  422. params.Rt = (hsr >> 5) & 0xf;
  423. params.is_write = ((hsr & 1) == 0);
  424. params.CRn = (hsr >> 10) & 0xf;
  425. params.Op0 = 0;
  426. params.Op1 = (hsr >> 14) & 0x7;
  427. params.Op2 = (hsr >> 17) & 0x7;
  428. emulate_cp15(vcpu, &params);
  429. return 1;
  430. }
  431. static int emulate_sys_reg(struct kvm_vcpu *vcpu,
  432. const struct sys_reg_params *params)
  433. {
  434. size_t num;
  435. const struct sys_reg_desc *table, *r;
  436. table = get_target_table(vcpu->arch.target, true, &num);
  437. /* Search target-specific then generic table. */
  438. r = find_reg(params, table, num);
  439. if (!r)
  440. r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
  441. if (likely(r)) {
  442. /*
  443. * Not having an accessor means that we have
  444. * configured a trap that we don't know how to
  445. * handle. This certainly qualifies as a gross bug
  446. * that should be fixed right away.
  447. */
  448. BUG_ON(!r->access);
  449. if (likely(r->access(vcpu, params, r))) {
  450. /* Skip instruction, since it was emulated */
  451. kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
  452. return 1;
  453. }
  454. /* If access function fails, it should complain. */
  455. } else {
  456. kvm_err("Unsupported guest sys_reg access at: %lx\n",
  457. *vcpu_pc(vcpu));
  458. print_sys_reg_instr(params);
  459. }
  460. kvm_inject_undefined(vcpu);
  461. return 1;
  462. }
  463. static void reset_sys_reg_descs(struct kvm_vcpu *vcpu,
  464. const struct sys_reg_desc *table, size_t num)
  465. {
  466. unsigned long i;
  467. for (i = 0; i < num; i++)
  468. if (table[i].reset)
  469. table[i].reset(vcpu, &table[i]);
  470. }
  471. /**
  472. * kvm_handle_sys_reg -- handles a mrs/msr trap on a guest sys_reg access
  473. * @vcpu: The VCPU pointer
  474. * @run: The kvm_run struct
  475. */
  476. int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run)
  477. {
  478. struct sys_reg_params params;
  479. unsigned long esr = kvm_vcpu_get_hsr(vcpu);
  480. params.Op0 = (esr >> 20) & 3;
  481. params.Op1 = (esr >> 14) & 0x7;
  482. params.CRn = (esr >> 10) & 0xf;
  483. params.CRm = (esr >> 1) & 0xf;
  484. params.Op2 = (esr >> 17) & 0x7;
  485. params.Rt = (esr >> 5) & 0x1f;
  486. params.is_write = !(esr & 1);
  487. return emulate_sys_reg(vcpu, &params);
  488. }
  489. /******************************************************************************
  490. * Userspace API
  491. *****************************************************************************/
  492. static bool index_to_params(u64 id, struct sys_reg_params *params)
  493. {
  494. switch (id & KVM_REG_SIZE_MASK) {
  495. case KVM_REG_SIZE_U64:
  496. /* Any unused index bits means it's not valid. */
  497. if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
  498. | KVM_REG_ARM_COPROC_MASK
  499. | KVM_REG_ARM64_SYSREG_OP0_MASK
  500. | KVM_REG_ARM64_SYSREG_OP1_MASK
  501. | KVM_REG_ARM64_SYSREG_CRN_MASK
  502. | KVM_REG_ARM64_SYSREG_CRM_MASK
  503. | KVM_REG_ARM64_SYSREG_OP2_MASK))
  504. return false;
  505. params->Op0 = ((id & KVM_REG_ARM64_SYSREG_OP0_MASK)
  506. >> KVM_REG_ARM64_SYSREG_OP0_SHIFT);
  507. params->Op1 = ((id & KVM_REG_ARM64_SYSREG_OP1_MASK)
  508. >> KVM_REG_ARM64_SYSREG_OP1_SHIFT);
  509. params->CRn = ((id & KVM_REG_ARM64_SYSREG_CRN_MASK)
  510. >> KVM_REG_ARM64_SYSREG_CRN_SHIFT);
  511. params->CRm = ((id & KVM_REG_ARM64_SYSREG_CRM_MASK)
  512. >> KVM_REG_ARM64_SYSREG_CRM_SHIFT);
  513. params->Op2 = ((id & KVM_REG_ARM64_SYSREG_OP2_MASK)
  514. >> KVM_REG_ARM64_SYSREG_OP2_SHIFT);
  515. return true;
  516. default:
  517. return false;
  518. }
  519. }
  520. /* Decode an index value, and find the sys_reg_desc entry. */
  521. static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu,
  522. u64 id)
  523. {
  524. size_t num;
  525. const struct sys_reg_desc *table, *r;
  526. struct sys_reg_params params;
  527. /* We only do sys_reg for now. */
  528. if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG)
  529. return NULL;
  530. if (!index_to_params(id, &params))
  531. return NULL;
  532. table = get_target_table(vcpu->arch.target, true, &num);
  533. r = find_reg(&params, table, num);
  534. if (!r)
  535. r = find_reg(&params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
  536. /* Not saved in the sys_reg array? */
  537. if (r && !r->reg)
  538. r = NULL;
  539. return r;
  540. }
  541. /*
  542. * These are the invariant sys_reg registers: we let the guest see the
  543. * host versions of these, so they're part of the guest state.
  544. *
  545. * A future CPU may provide a mechanism to present different values to
  546. * the guest, or a future kvm may trap them.
  547. */
  548. #define FUNCTION_INVARIANT(reg) \
  549. static void get_##reg(struct kvm_vcpu *v, \
  550. const struct sys_reg_desc *r) \
  551. { \
  552. u64 val; \
  553. \
  554. asm volatile("mrs %0, " __stringify(reg) "\n" \
  555. : "=r" (val)); \
  556. ((struct sys_reg_desc *)r)->val = val; \
  557. }
  558. FUNCTION_INVARIANT(midr_el1)
  559. FUNCTION_INVARIANT(ctr_el0)
  560. FUNCTION_INVARIANT(revidr_el1)
  561. FUNCTION_INVARIANT(id_pfr0_el1)
  562. FUNCTION_INVARIANT(id_pfr1_el1)
  563. FUNCTION_INVARIANT(id_dfr0_el1)
  564. FUNCTION_INVARIANT(id_afr0_el1)
  565. FUNCTION_INVARIANT(id_mmfr0_el1)
  566. FUNCTION_INVARIANT(id_mmfr1_el1)
  567. FUNCTION_INVARIANT(id_mmfr2_el1)
  568. FUNCTION_INVARIANT(id_mmfr3_el1)
  569. FUNCTION_INVARIANT(id_isar0_el1)
  570. FUNCTION_INVARIANT(id_isar1_el1)
  571. FUNCTION_INVARIANT(id_isar2_el1)
  572. FUNCTION_INVARIANT(id_isar3_el1)
  573. FUNCTION_INVARIANT(id_isar4_el1)
  574. FUNCTION_INVARIANT(id_isar5_el1)
  575. FUNCTION_INVARIANT(clidr_el1)
  576. FUNCTION_INVARIANT(aidr_el1)
  577. /* ->val is filled in by kvm_sys_reg_table_init() */
  578. static struct sys_reg_desc invariant_sys_regs[] = {
  579. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b000),
  580. NULL, get_midr_el1 },
  581. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b110),
  582. NULL, get_revidr_el1 },
  583. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b000),
  584. NULL, get_id_pfr0_el1 },
  585. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b001),
  586. NULL, get_id_pfr1_el1 },
  587. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b010),
  588. NULL, get_id_dfr0_el1 },
  589. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b011),
  590. NULL, get_id_afr0_el1 },
  591. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b100),
  592. NULL, get_id_mmfr0_el1 },
  593. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b101),
  594. NULL, get_id_mmfr1_el1 },
  595. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b110),
  596. NULL, get_id_mmfr2_el1 },
  597. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b111),
  598. NULL, get_id_mmfr3_el1 },
  599. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b000),
  600. NULL, get_id_isar0_el1 },
  601. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b001),
  602. NULL, get_id_isar1_el1 },
  603. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b010),
  604. NULL, get_id_isar2_el1 },
  605. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b011),
  606. NULL, get_id_isar3_el1 },
  607. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b100),
  608. NULL, get_id_isar4_el1 },
  609. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b101),
  610. NULL, get_id_isar5_el1 },
  611. { Op0(0b11), Op1(0b001), CRn(0b0000), CRm(0b0000), Op2(0b001),
  612. NULL, get_clidr_el1 },
  613. { Op0(0b11), Op1(0b001), CRn(0b0000), CRm(0b0000), Op2(0b111),
  614. NULL, get_aidr_el1 },
  615. { Op0(0b11), Op1(0b011), CRn(0b0000), CRm(0b0000), Op2(0b001),
  616. NULL, get_ctr_el0 },
  617. };
  618. static int reg_from_user(void *val, const void __user *uaddr, u64 id)
  619. {
  620. /* This Just Works because we are little endian. */
  621. if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0)
  622. return -EFAULT;
  623. return 0;
  624. }
  625. static int reg_to_user(void __user *uaddr, const void *val, u64 id)
  626. {
  627. /* This Just Works because we are little endian. */
  628. if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0)
  629. return -EFAULT;
  630. return 0;
  631. }
  632. static int get_invariant_sys_reg(u64 id, void __user *uaddr)
  633. {
  634. struct sys_reg_params params;
  635. const struct sys_reg_desc *r;
  636. if (!index_to_params(id, &params))
  637. return -ENOENT;
  638. r = find_reg(&params, invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs));
  639. if (!r)
  640. return -ENOENT;
  641. return reg_to_user(uaddr, &r->val, id);
  642. }
  643. static int set_invariant_sys_reg(u64 id, void __user *uaddr)
  644. {
  645. struct sys_reg_params params;
  646. const struct sys_reg_desc *r;
  647. int err;
  648. u64 val = 0; /* Make sure high bits are 0 for 32-bit regs */
  649. if (!index_to_params(id, &params))
  650. return -ENOENT;
  651. r = find_reg(&params, invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs));
  652. if (!r)
  653. return -ENOENT;
  654. err = reg_from_user(&val, uaddr, id);
  655. if (err)
  656. return err;
  657. /* This is what we mean by invariant: you can't change it. */
  658. if (r->val != val)
  659. return -EINVAL;
  660. return 0;
  661. }
  662. static bool is_valid_cache(u32 val)
  663. {
  664. u32 level, ctype;
  665. if (val >= CSSELR_MAX)
  666. return -ENOENT;
  667. /* Bottom bit is Instruction or Data bit. Next 3 bits are level. */
  668. level = (val >> 1);
  669. ctype = (cache_levels >> (level * 3)) & 7;
  670. switch (ctype) {
  671. case 0: /* No cache */
  672. return false;
  673. case 1: /* Instruction cache only */
  674. return (val & 1);
  675. case 2: /* Data cache only */
  676. case 4: /* Unified cache */
  677. return !(val & 1);
  678. case 3: /* Separate instruction and data caches */
  679. return true;
  680. default: /* Reserved: we can't know instruction or data. */
  681. return false;
  682. }
  683. }
  684. static int demux_c15_get(u64 id, void __user *uaddr)
  685. {
  686. u32 val;
  687. u32 __user *uval = uaddr;
  688. /* Fail if we have unknown bits set. */
  689. if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
  690. | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
  691. return -ENOENT;
  692. switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
  693. case KVM_REG_ARM_DEMUX_ID_CCSIDR:
  694. if (KVM_REG_SIZE(id) != 4)
  695. return -ENOENT;
  696. val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
  697. >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
  698. if (!is_valid_cache(val))
  699. return -ENOENT;
  700. return put_user(get_ccsidr(val), uval);
  701. default:
  702. return -ENOENT;
  703. }
  704. }
  705. static int demux_c15_set(u64 id, void __user *uaddr)
  706. {
  707. u32 val, newval;
  708. u32 __user *uval = uaddr;
  709. /* Fail if we have unknown bits set. */
  710. if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
  711. | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
  712. return -ENOENT;
  713. switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
  714. case KVM_REG_ARM_DEMUX_ID_CCSIDR:
  715. if (KVM_REG_SIZE(id) != 4)
  716. return -ENOENT;
  717. val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
  718. >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
  719. if (!is_valid_cache(val))
  720. return -ENOENT;
  721. if (get_user(newval, uval))
  722. return -EFAULT;
  723. /* This is also invariant: you can't change it. */
  724. if (newval != get_ccsidr(val))
  725. return -EINVAL;
  726. return 0;
  727. default:
  728. return -ENOENT;
  729. }
  730. }
  731. int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
  732. {
  733. const struct sys_reg_desc *r;
  734. void __user *uaddr = (void __user *)(unsigned long)reg->addr;
  735. if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
  736. return demux_c15_get(reg->id, uaddr);
  737. if (KVM_REG_SIZE(reg->id) != sizeof(__u64))
  738. return -ENOENT;
  739. r = index_to_sys_reg_desc(vcpu, reg->id);
  740. if (!r)
  741. return get_invariant_sys_reg(reg->id, uaddr);
  742. return reg_to_user(uaddr, &vcpu_sys_reg(vcpu, r->reg), reg->id);
  743. }
  744. int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
  745. {
  746. const struct sys_reg_desc *r;
  747. void __user *uaddr = (void __user *)(unsigned long)reg->addr;
  748. if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
  749. return demux_c15_set(reg->id, uaddr);
  750. if (KVM_REG_SIZE(reg->id) != sizeof(__u64))
  751. return -ENOENT;
  752. r = index_to_sys_reg_desc(vcpu, reg->id);
  753. if (!r)
  754. return set_invariant_sys_reg(reg->id, uaddr);
  755. return reg_from_user(&vcpu_sys_reg(vcpu, r->reg), uaddr, reg->id);
  756. }
  757. static unsigned int num_demux_regs(void)
  758. {
  759. unsigned int i, count = 0;
  760. for (i = 0; i < CSSELR_MAX; i++)
  761. if (is_valid_cache(i))
  762. count++;
  763. return count;
  764. }
  765. static int write_demux_regids(u64 __user *uindices)
  766. {
  767. u64 val = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
  768. unsigned int i;
  769. val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
  770. for (i = 0; i < CSSELR_MAX; i++) {
  771. if (!is_valid_cache(i))
  772. continue;
  773. if (put_user(val | i, uindices))
  774. return -EFAULT;
  775. uindices++;
  776. }
  777. return 0;
  778. }
  779. static u64 sys_reg_to_index(const struct sys_reg_desc *reg)
  780. {
  781. return (KVM_REG_ARM64 | KVM_REG_SIZE_U64 |
  782. KVM_REG_ARM64_SYSREG |
  783. (reg->Op0 << KVM_REG_ARM64_SYSREG_OP0_SHIFT) |
  784. (reg->Op1 << KVM_REG_ARM64_SYSREG_OP1_SHIFT) |
  785. (reg->CRn << KVM_REG_ARM64_SYSREG_CRN_SHIFT) |
  786. (reg->CRm << KVM_REG_ARM64_SYSREG_CRM_SHIFT) |
  787. (reg->Op2 << KVM_REG_ARM64_SYSREG_OP2_SHIFT));
  788. }
  789. static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind)
  790. {
  791. if (!*uind)
  792. return true;
  793. if (put_user(sys_reg_to_index(reg), *uind))
  794. return false;
  795. (*uind)++;
  796. return true;
  797. }
  798. /* Assumed ordered tables, see kvm_sys_reg_table_init. */
  799. static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind)
  800. {
  801. const struct sys_reg_desc *i1, *i2, *end1, *end2;
  802. unsigned int total = 0;
  803. size_t num;
  804. /* We check for duplicates here, to allow arch-specific overrides. */
  805. i1 = get_target_table(vcpu->arch.target, true, &num);
  806. end1 = i1 + num;
  807. i2 = sys_reg_descs;
  808. end2 = sys_reg_descs + ARRAY_SIZE(sys_reg_descs);
  809. BUG_ON(i1 == end1 || i2 == end2);
  810. /* Walk carefully, as both tables may refer to the same register. */
  811. while (i1 || i2) {
  812. int cmp = cmp_sys_reg(i1, i2);
  813. /* target-specific overrides generic entry. */
  814. if (cmp <= 0) {
  815. /* Ignore registers we trap but don't save. */
  816. if (i1->reg) {
  817. if (!copy_reg_to_user(i1, &uind))
  818. return -EFAULT;
  819. total++;
  820. }
  821. } else {
  822. /* Ignore registers we trap but don't save. */
  823. if (i2->reg) {
  824. if (!copy_reg_to_user(i2, &uind))
  825. return -EFAULT;
  826. total++;
  827. }
  828. }
  829. if (cmp <= 0 && ++i1 == end1)
  830. i1 = NULL;
  831. if (cmp >= 0 && ++i2 == end2)
  832. i2 = NULL;
  833. }
  834. return total;
  835. }
  836. unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu)
  837. {
  838. return ARRAY_SIZE(invariant_sys_regs)
  839. + num_demux_regs()
  840. + walk_sys_regs(vcpu, (u64 __user *)NULL);
  841. }
  842. int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
  843. {
  844. unsigned int i;
  845. int err;
  846. /* Then give them all the invariant registers' indices. */
  847. for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) {
  848. if (put_user(sys_reg_to_index(&invariant_sys_regs[i]), uindices))
  849. return -EFAULT;
  850. uindices++;
  851. }
  852. err = walk_sys_regs(vcpu, uindices);
  853. if (err < 0)
  854. return err;
  855. uindices += err;
  856. return write_demux_regids(uindices);
  857. }
  858. void kvm_sys_reg_table_init(void)
  859. {
  860. unsigned int i;
  861. struct sys_reg_desc clidr;
  862. /* Make sure tables are unique and in order. */
  863. for (i = 1; i < ARRAY_SIZE(sys_reg_descs); i++)
  864. BUG_ON(cmp_sys_reg(&sys_reg_descs[i-1], &sys_reg_descs[i]) >= 0);
  865. /* We abuse the reset function to overwrite the table itself. */
  866. for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++)
  867. invariant_sys_regs[i].reset(NULL, &invariant_sys_regs[i]);
  868. /*
  869. * CLIDR format is awkward, so clean it up. See ARM B4.1.20:
  870. *
  871. * If software reads the Cache Type fields from Ctype1
  872. * upwards, once it has seen a value of 0b000, no caches
  873. * exist at further-out levels of the hierarchy. So, for
  874. * example, if Ctype3 is the first Cache Type field with a
  875. * value of 0b000, the values of Ctype4 to Ctype7 must be
  876. * ignored.
  877. */
  878. get_clidr_el1(NULL, &clidr); /* Ugly... */
  879. cache_levels = clidr.val;
  880. for (i = 0; i < 7; i++)
  881. if (((cache_levels >> (i*3)) & 7) == 0)
  882. break;
  883. /* Clear all higher bits. */
  884. cache_levels &= (1 << (i*3))-1;
  885. }
  886. /**
  887. * kvm_reset_sys_regs - sets system registers to reset value
  888. * @vcpu: The VCPU pointer
  889. *
  890. * This function finds the right table above and sets the registers on the
  891. * virtual CPU struct to their architecturally defined reset values.
  892. */
  893. void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
  894. {
  895. size_t num;
  896. const struct sys_reg_desc *table;
  897. /* Catch someone adding a register without putting in reset entry. */
  898. memset(&vcpu->arch.ctxt.sys_regs, 0x42, sizeof(vcpu->arch.ctxt.sys_regs));
  899. /* Generic chip reset first (so target could override). */
  900. reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
  901. table = get_target_table(vcpu->arch.target, true, &num);
  902. reset_sys_reg_descs(vcpu, table, num);
  903. for (num = 1; num < NR_SYS_REGS; num++)
  904. if (vcpu_sys_reg(vcpu, num) == 0x4242424242424242)
  905. panic("Didn't reset vcpu_sys_reg(%zi)", num);
  906. }