sys_regs.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050
  1. /*
  2. * Copyright (C) 2012,2013 - ARM Ltd
  3. * Author: Marc Zyngier <marc.zyngier@arm.com>
  4. *
  5. * Derived from arch/arm/kvm/coproc.c:
  6. * Copyright (C) 2012 - Virtual Open Systems and Columbia University
  7. * Authors: Rusty Russell <rusty@rustcorp.com.au>
  8. * Christoffer Dall <c.dall@virtualopensystems.com>
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License, version 2, as
  12. * published by the Free Software Foundation.
  13. *
  14. * This program is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17. * GNU General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU General Public License
  20. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  21. */
  22. #include <linux/mm.h>
  23. #include <linux/kvm_host.h>
  24. #include <linux/uaccess.h>
  25. #include <asm/kvm_arm.h>
  26. #include <asm/kvm_host.h>
  27. #include <asm/kvm_emulate.h>
  28. #include <asm/kvm_coproc.h>
  29. #include <asm/cacheflush.h>
  30. #include <asm/cputype.h>
  31. #include <trace/events/kvm.h>
  32. #include "sys_regs.h"
  33. /*
  34. * All of this file is extremly similar to the ARM coproc.c, but the
  35. * types are different. My gut feeling is that it should be pretty
  36. * easy to merge, but that would be an ABI breakage -- again. VFP
  37. * would also need to be abstracted.
  38. *
  39. * For AArch32, we only take care of what is being trapped. Anything
  40. * that has to do with init and userspace access has to go via the
  41. * 64bit interface.
  42. */
  43. /* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */
  44. static u32 cache_levels;
  45. /* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
  46. #define CSSELR_MAX 12
  47. /* Which cache CCSIDR represents depends on CSSELR value. */
  48. static u32 get_ccsidr(u32 csselr)
  49. {
  50. u32 ccsidr;
  51. /* Make sure noone else changes CSSELR during this! */
  52. local_irq_disable();
  53. /* Put value into CSSELR */
  54. asm volatile("msr csselr_el1, %x0" : : "r" (csselr));
  55. isb();
  56. /* Read result out of CCSIDR */
  57. asm volatile("mrs %0, ccsidr_el1" : "=r" (ccsidr));
  58. local_irq_enable();
  59. return ccsidr;
  60. }
  61. static void do_dc_cisw(u32 val)
  62. {
  63. asm volatile("dc cisw, %x0" : : "r" (val));
  64. dsb();
  65. }
  66. static void do_dc_csw(u32 val)
  67. {
  68. asm volatile("dc csw, %x0" : : "r" (val));
  69. dsb();
  70. }
  71. /* See note at ARM ARM B1.14.4 */
  72. static bool access_dcsw(struct kvm_vcpu *vcpu,
  73. const struct sys_reg_params *p,
  74. const struct sys_reg_desc *r)
  75. {
  76. unsigned long val;
  77. int cpu;
  78. if (!p->is_write)
  79. return read_from_write_only(vcpu, p);
  80. cpu = get_cpu();
  81. cpumask_setall(&vcpu->arch.require_dcache_flush);
  82. cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush);
  83. /* If we were already preempted, take the long way around */
  84. if (cpu != vcpu->arch.last_pcpu) {
  85. flush_cache_all();
  86. goto done;
  87. }
  88. val = *vcpu_reg(vcpu, p->Rt);
  89. switch (p->CRm) {
  90. case 6: /* Upgrade DCISW to DCCISW, as per HCR.SWIO */
  91. case 14: /* DCCISW */
  92. do_dc_cisw(val);
  93. break;
  94. case 10: /* DCCSW */
  95. do_dc_csw(val);
  96. break;
  97. }
  98. done:
  99. put_cpu();
  100. return true;
  101. }
  102. /*
  103. * We could trap ID_DFR0 and tell the guest we don't support performance
  104. * monitoring. Unfortunately the patch to make the kernel check ID_DFR0 was
  105. * NAKed, so it will read the PMCR anyway.
  106. *
  107. * Therefore we tell the guest we have 0 counters. Unfortunately, we
  108. * must always support PMCCNTR (the cycle counter): we just RAZ/WI for
  109. * all PM registers, which doesn't crash the guest kernel at least.
  110. */
  111. static bool pm_fake(struct kvm_vcpu *vcpu,
  112. const struct sys_reg_params *p,
  113. const struct sys_reg_desc *r)
  114. {
  115. if (p->is_write)
  116. return ignore_write(vcpu, p);
  117. else
  118. return read_zero(vcpu, p);
  119. }
  120. static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
  121. {
  122. u64 amair;
  123. asm volatile("mrs %0, amair_el1\n" : "=r" (amair));
  124. vcpu_sys_reg(vcpu, AMAIR_EL1) = amair;
  125. }
  126. static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
  127. {
  128. /*
  129. * Simply map the vcpu_id into the Aff0 field of the MPIDR.
  130. */
  131. vcpu_sys_reg(vcpu, MPIDR_EL1) = (1UL << 31) | (vcpu->vcpu_id & 0xff);
  132. }
  133. /*
  134. * Architected system registers.
  135. * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
  136. */
  137. static const struct sys_reg_desc sys_reg_descs[] = {
  138. /* DC ISW */
  139. { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b0110), Op2(0b010),
  140. access_dcsw },
  141. /* DC CSW */
  142. { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b1010), Op2(0b010),
  143. access_dcsw },
  144. /* DC CISW */
  145. { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b010),
  146. access_dcsw },
  147. /* TEECR32_EL1 */
  148. { Op0(0b10), Op1(0b010), CRn(0b0000), CRm(0b0000), Op2(0b000),
  149. NULL, reset_val, TEECR32_EL1, 0 },
  150. /* TEEHBR32_EL1 */
  151. { Op0(0b10), Op1(0b010), CRn(0b0001), CRm(0b0000), Op2(0b000),
  152. NULL, reset_val, TEEHBR32_EL1, 0 },
  153. /* DBGVCR32_EL2 */
  154. { Op0(0b10), Op1(0b100), CRn(0b0000), CRm(0b0111), Op2(0b000),
  155. NULL, reset_val, DBGVCR32_EL2, 0 },
  156. /* MPIDR_EL1 */
  157. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b101),
  158. NULL, reset_mpidr, MPIDR_EL1 },
  159. /* SCTLR_EL1 */
  160. { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000),
  161. NULL, reset_val, SCTLR_EL1, 0x00C50078 },
  162. /* CPACR_EL1 */
  163. { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b010),
  164. NULL, reset_val, CPACR_EL1, 0 },
  165. /* TTBR0_EL1 */
  166. { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b000),
  167. NULL, reset_unknown, TTBR0_EL1 },
  168. /* TTBR1_EL1 */
  169. { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b001),
  170. NULL, reset_unknown, TTBR1_EL1 },
  171. /* TCR_EL1 */
  172. { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b010),
  173. NULL, reset_val, TCR_EL1, 0 },
  174. /* AFSR0_EL1 */
  175. { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b000),
  176. NULL, reset_unknown, AFSR0_EL1 },
  177. /* AFSR1_EL1 */
  178. { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b001),
  179. NULL, reset_unknown, AFSR1_EL1 },
  180. /* ESR_EL1 */
  181. { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0010), Op2(0b000),
  182. NULL, reset_unknown, ESR_EL1 },
  183. /* FAR_EL1 */
  184. { Op0(0b11), Op1(0b000), CRn(0b0110), CRm(0b0000), Op2(0b000),
  185. NULL, reset_unknown, FAR_EL1 },
  186. /* PMINTENSET_EL1 */
  187. { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b001),
  188. pm_fake },
  189. /* PMINTENCLR_EL1 */
  190. { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b010),
  191. pm_fake },
  192. /* MAIR_EL1 */
  193. { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0010), Op2(0b000),
  194. NULL, reset_unknown, MAIR_EL1 },
  195. /* AMAIR_EL1 */
  196. { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0011), Op2(0b000),
  197. NULL, reset_amair_el1, AMAIR_EL1 },
  198. /* VBAR_EL1 */
  199. { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b0000), Op2(0b000),
  200. NULL, reset_val, VBAR_EL1, 0 },
  201. /* CONTEXTIDR_EL1 */
  202. { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b001),
  203. NULL, reset_val, CONTEXTIDR_EL1, 0 },
  204. /* TPIDR_EL1 */
  205. { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b100),
  206. NULL, reset_unknown, TPIDR_EL1 },
  207. /* CNTKCTL_EL1 */
  208. { Op0(0b11), Op1(0b000), CRn(0b1110), CRm(0b0001), Op2(0b000),
  209. NULL, reset_val, CNTKCTL_EL1, 0},
  210. /* CSSELR_EL1 */
  211. { Op0(0b11), Op1(0b010), CRn(0b0000), CRm(0b0000), Op2(0b000),
  212. NULL, reset_unknown, CSSELR_EL1 },
  213. /* PMCR_EL0 */
  214. { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b000),
  215. pm_fake },
  216. /* PMCNTENSET_EL0 */
  217. { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b001),
  218. pm_fake },
  219. /* PMCNTENCLR_EL0 */
  220. { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b010),
  221. pm_fake },
  222. /* PMOVSCLR_EL0 */
  223. { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b011),
  224. pm_fake },
  225. /* PMSWINC_EL0 */
  226. { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b100),
  227. pm_fake },
  228. /* PMSELR_EL0 */
  229. { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b101),
  230. pm_fake },
  231. /* PMCEID0_EL0 */
  232. { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b110),
  233. pm_fake },
  234. /* PMCEID1_EL0 */
  235. { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b111),
  236. pm_fake },
  237. /* PMCCNTR_EL0 */
  238. { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b000),
  239. pm_fake },
  240. /* PMXEVTYPER_EL0 */
  241. { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b001),
  242. pm_fake },
  243. /* PMXEVCNTR_EL0 */
  244. { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b010),
  245. pm_fake },
  246. /* PMUSERENR_EL0 */
  247. { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b000),
  248. pm_fake },
  249. /* PMOVSSET_EL0 */
  250. { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b011),
  251. pm_fake },
  252. /* TPIDR_EL0 */
  253. { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b010),
  254. NULL, reset_unknown, TPIDR_EL0 },
  255. /* TPIDRRO_EL0 */
  256. { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b011),
  257. NULL, reset_unknown, TPIDRRO_EL0 },
  258. /* DACR32_EL2 */
  259. { Op0(0b11), Op1(0b100), CRn(0b0011), CRm(0b0000), Op2(0b000),
  260. NULL, reset_unknown, DACR32_EL2 },
  261. /* IFSR32_EL2 */
  262. { Op0(0b11), Op1(0b100), CRn(0b0101), CRm(0b0000), Op2(0b001),
  263. NULL, reset_unknown, IFSR32_EL2 },
  264. /* FPEXC32_EL2 */
  265. { Op0(0b11), Op1(0b100), CRn(0b0101), CRm(0b0011), Op2(0b000),
  266. NULL, reset_val, FPEXC32_EL2, 0x70 },
  267. };
  268. /* Trapped cp15 registers */
  269. static const struct sys_reg_desc cp15_regs[] = {
  270. /*
  271. * DC{C,I,CI}SW operations:
  272. */
  273. { Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw },
  274. { Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw },
  275. { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw },
  276. { Op1( 0), CRn( 9), CRm(12), Op2( 0), pm_fake },
  277. { Op1( 0), CRn( 9), CRm(12), Op2( 1), pm_fake },
  278. { Op1( 0), CRn( 9), CRm(12), Op2( 2), pm_fake },
  279. { Op1( 0), CRn( 9), CRm(12), Op2( 3), pm_fake },
  280. { Op1( 0), CRn( 9), CRm(12), Op2( 5), pm_fake },
  281. { Op1( 0), CRn( 9), CRm(12), Op2( 6), pm_fake },
  282. { Op1( 0), CRn( 9), CRm(12), Op2( 7), pm_fake },
  283. { Op1( 0), CRn( 9), CRm(13), Op2( 0), pm_fake },
  284. { Op1( 0), CRn( 9), CRm(13), Op2( 1), pm_fake },
  285. { Op1( 0), CRn( 9), CRm(13), Op2( 2), pm_fake },
  286. { Op1( 0), CRn( 9), CRm(14), Op2( 0), pm_fake },
  287. { Op1( 0), CRn( 9), CRm(14), Op2( 1), pm_fake },
  288. { Op1( 0), CRn( 9), CRm(14), Op2( 2), pm_fake },
  289. };
  290. /* Target specific emulation tables */
  291. static struct kvm_sys_reg_target_table *target_tables[KVM_ARM_NUM_TARGETS];
  292. void kvm_register_target_sys_reg_table(unsigned int target,
  293. struct kvm_sys_reg_target_table *table)
  294. {
  295. target_tables[target] = table;
  296. }
  297. /* Get specific register table for this target. */
  298. static const struct sys_reg_desc *get_target_table(unsigned target,
  299. bool mode_is_64,
  300. size_t *num)
  301. {
  302. struct kvm_sys_reg_target_table *table;
  303. table = target_tables[target];
  304. if (mode_is_64) {
  305. *num = table->table64.num;
  306. return table->table64.table;
  307. } else {
  308. *num = table->table32.num;
  309. return table->table32.table;
  310. }
  311. }
  312. static const struct sys_reg_desc *find_reg(const struct sys_reg_params *params,
  313. const struct sys_reg_desc table[],
  314. unsigned int num)
  315. {
  316. unsigned int i;
  317. for (i = 0; i < num; i++) {
  318. const struct sys_reg_desc *r = &table[i];
  319. if (params->Op0 != r->Op0)
  320. continue;
  321. if (params->Op1 != r->Op1)
  322. continue;
  323. if (params->CRn != r->CRn)
  324. continue;
  325. if (params->CRm != r->CRm)
  326. continue;
  327. if (params->Op2 != r->Op2)
  328. continue;
  329. return r;
  330. }
  331. return NULL;
  332. }
  333. int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
  334. {
  335. kvm_inject_undefined(vcpu);
  336. return 1;
  337. }
  338. int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run)
  339. {
  340. kvm_inject_undefined(vcpu);
  341. return 1;
  342. }
  343. static void emulate_cp15(struct kvm_vcpu *vcpu,
  344. const struct sys_reg_params *params)
  345. {
  346. size_t num;
  347. const struct sys_reg_desc *table, *r;
  348. table = get_target_table(vcpu->arch.target, false, &num);
  349. /* Search target-specific then generic table. */
  350. r = find_reg(params, table, num);
  351. if (!r)
  352. r = find_reg(params, cp15_regs, ARRAY_SIZE(cp15_regs));
  353. if (likely(r)) {
  354. /*
  355. * Not having an accessor means that we have
  356. * configured a trap that we don't know how to
  357. * handle. This certainly qualifies as a gross bug
  358. * that should be fixed right away.
  359. */
  360. BUG_ON(!r->access);
  361. if (likely(r->access(vcpu, params, r))) {
  362. /* Skip instruction, since it was emulated */
  363. kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
  364. return;
  365. }
  366. /* If access function fails, it should complain. */
  367. }
  368. kvm_err("Unsupported guest CP15 access at: %08lx\n", *vcpu_pc(vcpu));
  369. print_sys_reg_instr(params);
  370. kvm_inject_undefined(vcpu);
  371. }
  372. /**
  373. * kvm_handle_cp15_64 -- handles a mrrc/mcrr trap on a guest CP15 access
  374. * @vcpu: The VCPU pointer
  375. * @run: The kvm_run struct
  376. */
  377. int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
  378. {
  379. struct sys_reg_params params;
  380. u32 hsr = kvm_vcpu_get_hsr(vcpu);
  381. int Rt2 = (hsr >> 10) & 0xf;
  382. params.CRm = (hsr >> 1) & 0xf;
  383. params.Rt = (hsr >> 5) & 0xf;
  384. params.is_write = ((hsr & 1) == 0);
  385. params.Op0 = 0;
  386. params.Op1 = (hsr >> 16) & 0xf;
  387. params.Op2 = 0;
  388. params.CRn = 0;
  389. /*
  390. * Massive hack here. Store Rt2 in the top 32bits so we only
  391. * have one register to deal with. As we use the same trap
  392. * backends between AArch32 and AArch64, we get away with it.
  393. */
  394. if (params.is_write) {
  395. u64 val = *vcpu_reg(vcpu, params.Rt);
  396. val &= 0xffffffff;
  397. val |= *vcpu_reg(vcpu, Rt2) << 32;
  398. *vcpu_reg(vcpu, params.Rt) = val;
  399. }
  400. emulate_cp15(vcpu, &params);
  401. /* Do the opposite hack for the read side */
  402. if (!params.is_write) {
  403. u64 val = *vcpu_reg(vcpu, params.Rt);
  404. val >>= 32;
  405. *vcpu_reg(vcpu, Rt2) = val;
  406. }
  407. return 1;
  408. }
  409. /**
  410. * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access
  411. * @vcpu: The VCPU pointer
  412. * @run: The kvm_run struct
  413. */
  414. int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
  415. {
  416. struct sys_reg_params params;
  417. u32 hsr = kvm_vcpu_get_hsr(vcpu);
  418. params.CRm = (hsr >> 1) & 0xf;
  419. params.Rt = (hsr >> 5) & 0xf;
  420. params.is_write = ((hsr & 1) == 0);
  421. params.CRn = (hsr >> 10) & 0xf;
  422. params.Op0 = 0;
  423. params.Op1 = (hsr >> 14) & 0x7;
  424. params.Op2 = (hsr >> 17) & 0x7;
  425. emulate_cp15(vcpu, &params);
  426. return 1;
  427. }
  428. static int emulate_sys_reg(struct kvm_vcpu *vcpu,
  429. const struct sys_reg_params *params)
  430. {
  431. size_t num;
  432. const struct sys_reg_desc *table, *r;
  433. table = get_target_table(vcpu->arch.target, true, &num);
  434. /* Search target-specific then generic table. */
  435. r = find_reg(params, table, num);
  436. if (!r)
  437. r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
  438. if (likely(r)) {
  439. /*
  440. * Not having an accessor means that we have
  441. * configured a trap that we don't know how to
  442. * handle. This certainly qualifies as a gross bug
  443. * that should be fixed right away.
  444. */
  445. BUG_ON(!r->access);
  446. if (likely(r->access(vcpu, params, r))) {
  447. /* Skip instruction, since it was emulated */
  448. kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
  449. return 1;
  450. }
  451. /* If access function fails, it should complain. */
  452. } else {
  453. kvm_err("Unsupported guest sys_reg access at: %lx\n",
  454. *vcpu_pc(vcpu));
  455. print_sys_reg_instr(params);
  456. }
  457. kvm_inject_undefined(vcpu);
  458. return 1;
  459. }
  460. static void reset_sys_reg_descs(struct kvm_vcpu *vcpu,
  461. const struct sys_reg_desc *table, size_t num)
  462. {
  463. unsigned long i;
  464. for (i = 0; i < num; i++)
  465. if (table[i].reset)
  466. table[i].reset(vcpu, &table[i]);
  467. }
  468. /**
  469. * kvm_handle_sys_reg -- handles a mrs/msr trap on a guest sys_reg access
  470. * @vcpu: The VCPU pointer
  471. * @run: The kvm_run struct
  472. */
  473. int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run)
  474. {
  475. struct sys_reg_params params;
  476. unsigned long esr = kvm_vcpu_get_hsr(vcpu);
  477. params.Op0 = (esr >> 20) & 3;
  478. params.Op1 = (esr >> 14) & 0x7;
  479. params.CRn = (esr >> 10) & 0xf;
  480. params.CRm = (esr >> 1) & 0xf;
  481. params.Op2 = (esr >> 17) & 0x7;
  482. params.Rt = (esr >> 5) & 0x1f;
  483. params.is_write = !(esr & 1);
  484. return emulate_sys_reg(vcpu, &params);
  485. }
  486. /******************************************************************************
  487. * Userspace API
  488. *****************************************************************************/
  489. static bool index_to_params(u64 id, struct sys_reg_params *params)
  490. {
  491. switch (id & KVM_REG_SIZE_MASK) {
  492. case KVM_REG_SIZE_U64:
  493. /* Any unused index bits means it's not valid. */
  494. if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
  495. | KVM_REG_ARM_COPROC_MASK
  496. | KVM_REG_ARM64_SYSREG_OP0_MASK
  497. | KVM_REG_ARM64_SYSREG_OP1_MASK
  498. | KVM_REG_ARM64_SYSREG_CRN_MASK
  499. | KVM_REG_ARM64_SYSREG_CRM_MASK
  500. | KVM_REG_ARM64_SYSREG_OP2_MASK))
  501. return false;
  502. params->Op0 = ((id & KVM_REG_ARM64_SYSREG_OP0_MASK)
  503. >> KVM_REG_ARM64_SYSREG_OP0_SHIFT);
  504. params->Op1 = ((id & KVM_REG_ARM64_SYSREG_OP1_MASK)
  505. >> KVM_REG_ARM64_SYSREG_OP1_SHIFT);
  506. params->CRn = ((id & KVM_REG_ARM64_SYSREG_CRN_MASK)
  507. >> KVM_REG_ARM64_SYSREG_CRN_SHIFT);
  508. params->CRm = ((id & KVM_REG_ARM64_SYSREG_CRM_MASK)
  509. >> KVM_REG_ARM64_SYSREG_CRM_SHIFT);
  510. params->Op2 = ((id & KVM_REG_ARM64_SYSREG_OP2_MASK)
  511. >> KVM_REG_ARM64_SYSREG_OP2_SHIFT);
  512. return true;
  513. default:
  514. return false;
  515. }
  516. }
  517. /* Decode an index value, and find the sys_reg_desc entry. */
  518. static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu,
  519. u64 id)
  520. {
  521. size_t num;
  522. const struct sys_reg_desc *table, *r;
  523. struct sys_reg_params params;
  524. /* We only do sys_reg for now. */
  525. if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG)
  526. return NULL;
  527. if (!index_to_params(id, &params))
  528. return NULL;
  529. table = get_target_table(vcpu->arch.target, true, &num);
  530. r = find_reg(&params, table, num);
  531. if (!r)
  532. r = find_reg(&params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
  533. /* Not saved in the sys_reg array? */
  534. if (r && !r->reg)
  535. r = NULL;
  536. return r;
  537. }
  538. /*
  539. * These are the invariant sys_reg registers: we let the guest see the
  540. * host versions of these, so they're part of the guest state.
  541. *
  542. * A future CPU may provide a mechanism to present different values to
  543. * the guest, or a future kvm may trap them.
  544. */
  545. #define FUNCTION_INVARIANT(reg) \
  546. static void get_##reg(struct kvm_vcpu *v, \
  547. const struct sys_reg_desc *r) \
  548. { \
  549. u64 val; \
  550. \
  551. asm volatile("mrs %0, " __stringify(reg) "\n" \
  552. : "=r" (val)); \
  553. ((struct sys_reg_desc *)r)->val = val; \
  554. }
  555. FUNCTION_INVARIANT(midr_el1)
  556. FUNCTION_INVARIANT(ctr_el0)
  557. FUNCTION_INVARIANT(revidr_el1)
  558. FUNCTION_INVARIANT(id_pfr0_el1)
  559. FUNCTION_INVARIANT(id_pfr1_el1)
  560. FUNCTION_INVARIANT(id_dfr0_el1)
  561. FUNCTION_INVARIANT(id_afr0_el1)
  562. FUNCTION_INVARIANT(id_mmfr0_el1)
  563. FUNCTION_INVARIANT(id_mmfr1_el1)
  564. FUNCTION_INVARIANT(id_mmfr2_el1)
  565. FUNCTION_INVARIANT(id_mmfr3_el1)
  566. FUNCTION_INVARIANT(id_isar0_el1)
  567. FUNCTION_INVARIANT(id_isar1_el1)
  568. FUNCTION_INVARIANT(id_isar2_el1)
  569. FUNCTION_INVARIANT(id_isar3_el1)
  570. FUNCTION_INVARIANT(id_isar4_el1)
  571. FUNCTION_INVARIANT(id_isar5_el1)
  572. FUNCTION_INVARIANT(clidr_el1)
  573. FUNCTION_INVARIANT(aidr_el1)
  574. /* ->val is filled in by kvm_sys_reg_table_init() */
  575. static struct sys_reg_desc invariant_sys_regs[] = {
  576. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b000),
  577. NULL, get_midr_el1 },
  578. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b110),
  579. NULL, get_revidr_el1 },
  580. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b000),
  581. NULL, get_id_pfr0_el1 },
  582. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b001),
  583. NULL, get_id_pfr1_el1 },
  584. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b010),
  585. NULL, get_id_dfr0_el1 },
  586. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b011),
  587. NULL, get_id_afr0_el1 },
  588. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b100),
  589. NULL, get_id_mmfr0_el1 },
  590. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b101),
  591. NULL, get_id_mmfr1_el1 },
  592. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b110),
  593. NULL, get_id_mmfr2_el1 },
  594. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b111),
  595. NULL, get_id_mmfr3_el1 },
  596. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b000),
  597. NULL, get_id_isar0_el1 },
  598. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b001),
  599. NULL, get_id_isar1_el1 },
  600. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b010),
  601. NULL, get_id_isar2_el1 },
  602. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b011),
  603. NULL, get_id_isar3_el1 },
  604. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b100),
  605. NULL, get_id_isar4_el1 },
  606. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b101),
  607. NULL, get_id_isar5_el1 },
  608. { Op0(0b11), Op1(0b001), CRn(0b0000), CRm(0b0000), Op2(0b001),
  609. NULL, get_clidr_el1 },
  610. { Op0(0b11), Op1(0b001), CRn(0b0000), CRm(0b0000), Op2(0b111),
  611. NULL, get_aidr_el1 },
  612. { Op0(0b11), Op1(0b011), CRn(0b0000), CRm(0b0000), Op2(0b001),
  613. NULL, get_ctr_el0 },
  614. };
  615. static int reg_from_user(void *val, const void __user *uaddr, u64 id)
  616. {
  617. /* This Just Works because we are little endian. */
  618. if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0)
  619. return -EFAULT;
  620. return 0;
  621. }
  622. static int reg_to_user(void __user *uaddr, const void *val, u64 id)
  623. {
  624. /* This Just Works because we are little endian. */
  625. if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0)
  626. return -EFAULT;
  627. return 0;
  628. }
  629. static int get_invariant_sys_reg(u64 id, void __user *uaddr)
  630. {
  631. struct sys_reg_params params;
  632. const struct sys_reg_desc *r;
  633. if (!index_to_params(id, &params))
  634. return -ENOENT;
  635. r = find_reg(&params, invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs));
  636. if (!r)
  637. return -ENOENT;
  638. return reg_to_user(uaddr, &r->val, id);
  639. }
  640. static int set_invariant_sys_reg(u64 id, void __user *uaddr)
  641. {
  642. struct sys_reg_params params;
  643. const struct sys_reg_desc *r;
  644. int err;
  645. u64 val = 0; /* Make sure high bits are 0 for 32-bit regs */
  646. if (!index_to_params(id, &params))
  647. return -ENOENT;
  648. r = find_reg(&params, invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs));
  649. if (!r)
  650. return -ENOENT;
  651. err = reg_from_user(&val, uaddr, id);
  652. if (err)
  653. return err;
  654. /* This is what we mean by invariant: you can't change it. */
  655. if (r->val != val)
  656. return -EINVAL;
  657. return 0;
  658. }
  659. static bool is_valid_cache(u32 val)
  660. {
  661. u32 level, ctype;
  662. if (val >= CSSELR_MAX)
  663. return -ENOENT;
  664. /* Bottom bit is Instruction or Data bit. Next 3 bits are level. */
  665. level = (val >> 1);
  666. ctype = (cache_levels >> (level * 3)) & 7;
  667. switch (ctype) {
  668. case 0: /* No cache */
  669. return false;
  670. case 1: /* Instruction cache only */
  671. return (val & 1);
  672. case 2: /* Data cache only */
  673. case 4: /* Unified cache */
  674. return !(val & 1);
  675. case 3: /* Separate instruction and data caches */
  676. return true;
  677. default: /* Reserved: we can't know instruction or data. */
  678. return false;
  679. }
  680. }
  681. static int demux_c15_get(u64 id, void __user *uaddr)
  682. {
  683. u32 val;
  684. u32 __user *uval = uaddr;
  685. /* Fail if we have unknown bits set. */
  686. if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
  687. | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
  688. return -ENOENT;
  689. switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
  690. case KVM_REG_ARM_DEMUX_ID_CCSIDR:
  691. if (KVM_REG_SIZE(id) != 4)
  692. return -ENOENT;
  693. val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
  694. >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
  695. if (!is_valid_cache(val))
  696. return -ENOENT;
  697. return put_user(get_ccsidr(val), uval);
  698. default:
  699. return -ENOENT;
  700. }
  701. }
  702. static int demux_c15_set(u64 id, void __user *uaddr)
  703. {
  704. u32 val, newval;
  705. u32 __user *uval = uaddr;
  706. /* Fail if we have unknown bits set. */
  707. if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
  708. | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
  709. return -ENOENT;
  710. switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
  711. case KVM_REG_ARM_DEMUX_ID_CCSIDR:
  712. if (KVM_REG_SIZE(id) != 4)
  713. return -ENOENT;
  714. val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
  715. >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
  716. if (!is_valid_cache(val))
  717. return -ENOENT;
  718. if (get_user(newval, uval))
  719. return -EFAULT;
  720. /* This is also invariant: you can't change it. */
  721. if (newval != get_ccsidr(val))
  722. return -EINVAL;
  723. return 0;
  724. default:
  725. return -ENOENT;
  726. }
  727. }
  728. int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
  729. {
  730. const struct sys_reg_desc *r;
  731. void __user *uaddr = (void __user *)(unsigned long)reg->addr;
  732. if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
  733. return demux_c15_get(reg->id, uaddr);
  734. if (KVM_REG_SIZE(reg->id) != sizeof(__u64))
  735. return -ENOENT;
  736. r = index_to_sys_reg_desc(vcpu, reg->id);
  737. if (!r)
  738. return get_invariant_sys_reg(reg->id, uaddr);
  739. return reg_to_user(uaddr, &vcpu_sys_reg(vcpu, r->reg), reg->id);
  740. }
  741. int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
  742. {
  743. const struct sys_reg_desc *r;
  744. void __user *uaddr = (void __user *)(unsigned long)reg->addr;
  745. if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
  746. return demux_c15_set(reg->id, uaddr);
  747. if (KVM_REG_SIZE(reg->id) != sizeof(__u64))
  748. return -ENOENT;
  749. r = index_to_sys_reg_desc(vcpu, reg->id);
  750. if (!r)
  751. return set_invariant_sys_reg(reg->id, uaddr);
  752. return reg_from_user(&vcpu_sys_reg(vcpu, r->reg), uaddr, reg->id);
  753. }
  754. static unsigned int num_demux_regs(void)
  755. {
  756. unsigned int i, count = 0;
  757. for (i = 0; i < CSSELR_MAX; i++)
  758. if (is_valid_cache(i))
  759. count++;
  760. return count;
  761. }
  762. static int write_demux_regids(u64 __user *uindices)
  763. {
  764. u64 val = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
  765. unsigned int i;
  766. val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
  767. for (i = 0; i < CSSELR_MAX; i++) {
  768. if (!is_valid_cache(i))
  769. continue;
  770. if (put_user(val | i, uindices))
  771. return -EFAULT;
  772. uindices++;
  773. }
  774. return 0;
  775. }
  776. static u64 sys_reg_to_index(const struct sys_reg_desc *reg)
  777. {
  778. return (KVM_REG_ARM64 | KVM_REG_SIZE_U64 |
  779. KVM_REG_ARM64_SYSREG |
  780. (reg->Op0 << KVM_REG_ARM64_SYSREG_OP0_SHIFT) |
  781. (reg->Op1 << KVM_REG_ARM64_SYSREG_OP1_SHIFT) |
  782. (reg->CRn << KVM_REG_ARM64_SYSREG_CRN_SHIFT) |
  783. (reg->CRm << KVM_REG_ARM64_SYSREG_CRM_SHIFT) |
  784. (reg->Op2 << KVM_REG_ARM64_SYSREG_OP2_SHIFT));
  785. }
  786. static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind)
  787. {
  788. if (!*uind)
  789. return true;
  790. if (put_user(sys_reg_to_index(reg), *uind))
  791. return false;
  792. (*uind)++;
  793. return true;
  794. }
  795. /* Assumed ordered tables, see kvm_sys_reg_table_init. */
  796. static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind)
  797. {
  798. const struct sys_reg_desc *i1, *i2, *end1, *end2;
  799. unsigned int total = 0;
  800. size_t num;
  801. /* We check for duplicates here, to allow arch-specific overrides. */
  802. i1 = get_target_table(vcpu->arch.target, true, &num);
  803. end1 = i1 + num;
  804. i2 = sys_reg_descs;
  805. end2 = sys_reg_descs + ARRAY_SIZE(sys_reg_descs);
  806. BUG_ON(i1 == end1 || i2 == end2);
  807. /* Walk carefully, as both tables may refer to the same register. */
  808. while (i1 || i2) {
  809. int cmp = cmp_sys_reg(i1, i2);
  810. /* target-specific overrides generic entry. */
  811. if (cmp <= 0) {
  812. /* Ignore registers we trap but don't save. */
  813. if (i1->reg) {
  814. if (!copy_reg_to_user(i1, &uind))
  815. return -EFAULT;
  816. total++;
  817. }
  818. } else {
  819. /* Ignore registers we trap but don't save. */
  820. if (i2->reg) {
  821. if (!copy_reg_to_user(i2, &uind))
  822. return -EFAULT;
  823. total++;
  824. }
  825. }
  826. if (cmp <= 0 && ++i1 == end1)
  827. i1 = NULL;
  828. if (cmp >= 0 && ++i2 == end2)
  829. i2 = NULL;
  830. }
  831. return total;
  832. }
  833. unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu)
  834. {
  835. return ARRAY_SIZE(invariant_sys_regs)
  836. + num_demux_regs()
  837. + walk_sys_regs(vcpu, (u64 __user *)NULL);
  838. }
  839. int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
  840. {
  841. unsigned int i;
  842. int err;
  843. /* Then give them all the invariant registers' indices. */
  844. for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) {
  845. if (put_user(sys_reg_to_index(&invariant_sys_regs[i]), uindices))
  846. return -EFAULT;
  847. uindices++;
  848. }
  849. err = walk_sys_regs(vcpu, uindices);
  850. if (err < 0)
  851. return err;
  852. uindices += err;
  853. return write_demux_regids(uindices);
  854. }
  855. void kvm_sys_reg_table_init(void)
  856. {
  857. unsigned int i;
  858. struct sys_reg_desc clidr;
  859. /* Make sure tables are unique and in order. */
  860. for (i = 1; i < ARRAY_SIZE(sys_reg_descs); i++)
  861. BUG_ON(cmp_sys_reg(&sys_reg_descs[i-1], &sys_reg_descs[i]) >= 0);
  862. /* We abuse the reset function to overwrite the table itself. */
  863. for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++)
  864. invariant_sys_regs[i].reset(NULL, &invariant_sys_regs[i]);
  865. /*
  866. * CLIDR format is awkward, so clean it up. See ARM B4.1.20:
  867. *
  868. * If software reads the Cache Type fields from Ctype1
  869. * upwards, once it has seen a value of 0b000, no caches
  870. * exist at further-out levels of the hierarchy. So, for
  871. * example, if Ctype3 is the first Cache Type field with a
  872. * value of 0b000, the values of Ctype4 to Ctype7 must be
  873. * ignored.
  874. */
  875. get_clidr_el1(NULL, &clidr); /* Ugly... */
  876. cache_levels = clidr.val;
  877. for (i = 0; i < 7; i++)
  878. if (((cache_levels >> (i*3)) & 7) == 0)
  879. break;
  880. /* Clear all higher bits. */
  881. cache_levels &= (1 << (i*3))-1;
  882. }
  883. /**
  884. * kvm_reset_sys_regs - sets system registers to reset value
  885. * @vcpu: The VCPU pointer
  886. *
  887. * This function finds the right table above and sets the registers on the
  888. * virtual CPU struct to their architecturally defined reset values.
  889. */
  890. void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
  891. {
  892. size_t num;
  893. const struct sys_reg_desc *table;
  894. /* Catch someone adding a register without putting in reset entry. */
  895. memset(&vcpu->arch.ctxt.sys_regs, 0x42, sizeof(vcpu->arch.ctxt.sys_regs));
  896. /* Generic chip reset first (so target could override). */
  897. reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
  898. table = get_target_table(vcpu->arch.target, true, &num);
  899. reset_sys_reg_descs(vcpu, table, num);
  900. for (num = 1; num < NR_SYS_REGS; num++)
  901. if (vcpu_sys_reg(vcpu, num) == 0x4242424242424242)
  902. panic("Didn't reset vcpu_sys_reg(%zi)", num);
  903. }