priv.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772
  1. /*
  2. * handling privileged instructions
  3. *
  4. * Copyright IBM Corp. 2008, 2013
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License (version 2 only)
  8. * as published by the Free Software Foundation.
  9. *
  10. * Author(s): Carsten Otte <cotte@de.ibm.com>
  11. * Christian Borntraeger <borntraeger@de.ibm.com>
  12. */
  13. #include <linux/kvm.h>
  14. #include <linux/gfp.h>
  15. #include <linux/errno.h>
  16. #include <linux/compat.h>
  17. #include <asm/asm-offsets.h>
  18. #include <asm/current.h>
  19. #include <asm/debug.h>
  20. #include <asm/ebcdic.h>
  21. #include <asm/sysinfo.h>
  22. #include <asm/pgtable.h>
  23. #include <asm/pgalloc.h>
  24. #include <asm/io.h>
  25. #include <asm/ptrace.h>
  26. #include <asm/compat.h>
  27. #include "gaccess.h"
  28. #include "kvm-s390.h"
  29. #include "trace.h"
  30. static int handle_set_prefix(struct kvm_vcpu *vcpu)
  31. {
  32. u64 operand2;
  33. u32 address = 0;
  34. u8 tmp;
  35. vcpu->stat.instruction_spx++;
  36. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  37. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  38. operand2 = kvm_s390_get_base_disp_s(vcpu);
  39. /* must be word boundary */
  40. if (operand2 & 3)
  41. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  42. /* get the value */
  43. if (get_guest(vcpu, address, (u32 __user *) operand2))
  44. return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
  45. address = address & 0x7fffe000u;
  46. /* make sure that the new value is valid memory */
  47. if (copy_from_guest_absolute(vcpu, &tmp, address, 1) ||
  48. (copy_from_guest_absolute(vcpu, &tmp, address + PAGE_SIZE, 1)))
  49. return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
  50. kvm_s390_set_prefix(vcpu, address);
  51. VCPU_EVENT(vcpu, 5, "setting prefix to %x", address);
  52. trace_kvm_s390_handle_prefix(vcpu, 1, address);
  53. return 0;
  54. }
  55. static int handle_store_prefix(struct kvm_vcpu *vcpu)
  56. {
  57. u64 operand2;
  58. u32 address;
  59. vcpu->stat.instruction_stpx++;
  60. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  61. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  62. operand2 = kvm_s390_get_base_disp_s(vcpu);
  63. /* must be word boundary */
  64. if (operand2 & 3)
  65. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  66. address = vcpu->arch.sie_block->prefix;
  67. address = address & 0x7fffe000u;
  68. /* get the value */
  69. if (put_guest(vcpu, address, (u32 __user *)operand2))
  70. return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
  71. VCPU_EVENT(vcpu, 5, "storing prefix to %x", address);
  72. trace_kvm_s390_handle_prefix(vcpu, 0, address);
  73. return 0;
  74. }
  75. static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
  76. {
  77. u64 useraddr;
  78. vcpu->stat.instruction_stap++;
  79. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  80. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  81. useraddr = kvm_s390_get_base_disp_s(vcpu);
  82. if (useraddr & 1)
  83. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  84. if (put_guest(vcpu, vcpu->vcpu_id, (u16 __user *)useraddr))
  85. return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
  86. VCPU_EVENT(vcpu, 5, "storing cpu address to %llx", useraddr);
  87. trace_kvm_s390_handle_stap(vcpu, useraddr);
  88. return 0;
  89. }
  90. static int handle_skey(struct kvm_vcpu *vcpu)
  91. {
  92. vcpu->stat.instruction_storage_key++;
  93. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  94. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  95. vcpu->arch.sie_block->gpsw.addr =
  96. __rewind_psw(vcpu->arch.sie_block->gpsw, 4);
  97. VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation");
  98. return 0;
  99. }
  100. static int handle_tpi(struct kvm_vcpu *vcpu)
  101. {
  102. struct kvm_s390_interrupt_info *inti;
  103. u64 addr;
  104. int cc;
  105. addr = kvm_s390_get_base_disp_s(vcpu);
  106. if (addr & 3)
  107. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  108. cc = 0;
  109. inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->run->s.regs.crs[6], 0);
  110. if (!inti)
  111. goto no_interrupt;
  112. cc = 1;
  113. if (addr) {
  114. /*
  115. * Store the two-word I/O interruption code into the
  116. * provided area.
  117. */
  118. if (put_guest(vcpu, inti->io.subchannel_id, (u16 __user *)addr)
  119. || put_guest(vcpu, inti->io.subchannel_nr, (u16 __user *)(addr + 2))
  120. || put_guest(vcpu, inti->io.io_int_parm, (u32 __user *)(addr + 4)))
  121. return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
  122. } else {
  123. /*
  124. * Store the three-word I/O interruption code into
  125. * the appropriate lowcore area.
  126. */
  127. put_guest(vcpu, inti->io.subchannel_id, (u16 __user *) __LC_SUBCHANNEL_ID);
  128. put_guest(vcpu, inti->io.subchannel_nr, (u16 __user *) __LC_SUBCHANNEL_NR);
  129. put_guest(vcpu, inti->io.io_int_parm, (u32 __user *) __LC_IO_INT_PARM);
  130. put_guest(vcpu, inti->io.io_int_word, (u32 __user *) __LC_IO_INT_WORD);
  131. }
  132. kfree(inti);
  133. no_interrupt:
  134. /* Set condition code and we're done. */
  135. vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
  136. vcpu->arch.sie_block->gpsw.mask |= (cc & 3ul) << 44;
  137. return 0;
  138. }
  139. static int handle_tsch(struct kvm_vcpu *vcpu)
  140. {
  141. struct kvm_s390_interrupt_info *inti;
  142. inti = kvm_s390_get_io_int(vcpu->kvm, 0,
  143. vcpu->run->s.regs.gprs[1]);
  144. /*
  145. * Prepare exit to userspace.
  146. * We indicate whether we dequeued a pending I/O interrupt
  147. * so that userspace can re-inject it if the instruction gets
  148. * a program check. While this may re-order the pending I/O
  149. * interrupts, this is no problem since the priority is kept
  150. * intact.
  151. */
  152. vcpu->run->exit_reason = KVM_EXIT_S390_TSCH;
  153. vcpu->run->s390_tsch.dequeued = !!inti;
  154. if (inti) {
  155. vcpu->run->s390_tsch.subchannel_id = inti->io.subchannel_id;
  156. vcpu->run->s390_tsch.subchannel_nr = inti->io.subchannel_nr;
  157. vcpu->run->s390_tsch.io_int_parm = inti->io.io_int_parm;
  158. vcpu->run->s390_tsch.io_int_word = inti->io.io_int_word;
  159. }
  160. vcpu->run->s390_tsch.ipb = vcpu->arch.sie_block->ipb;
  161. kfree(inti);
  162. return -EREMOTE;
  163. }
  164. static int handle_io_inst(struct kvm_vcpu *vcpu)
  165. {
  166. VCPU_EVENT(vcpu, 4, "%s", "I/O instruction");
  167. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  168. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  169. if (vcpu->kvm->arch.css_support) {
  170. /*
  171. * Most I/O instructions will be handled by userspace.
  172. * Exceptions are tpi and the interrupt portion of tsch.
  173. */
  174. if (vcpu->arch.sie_block->ipa == 0xb236)
  175. return handle_tpi(vcpu);
  176. if (vcpu->arch.sie_block->ipa == 0xb235)
  177. return handle_tsch(vcpu);
  178. /* Handle in userspace. */
  179. return -EOPNOTSUPP;
  180. } else {
  181. /*
  182. * Set condition code 3 to stop the guest from issueing channel
  183. * I/O instructions.
  184. */
  185. vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
  186. vcpu->arch.sie_block->gpsw.mask |= (3 & 3ul) << 44;
  187. return 0;
  188. }
  189. }
  190. static int handle_stfl(struct kvm_vcpu *vcpu)
  191. {
  192. unsigned int facility_list;
  193. int rc;
  194. vcpu->stat.instruction_stfl++;
  195. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  196. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  197. /* only pass the facility bits, which we can handle */
  198. facility_list = S390_lowcore.stfl_fac_list & 0xff82fff3;
  199. rc = copy_to_guest(vcpu, offsetof(struct _lowcore, stfl_fac_list),
  200. &facility_list, sizeof(facility_list));
  201. if (rc)
  202. return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
  203. VCPU_EVENT(vcpu, 5, "store facility list value %x", facility_list);
  204. trace_kvm_s390_handle_stfl(vcpu, facility_list);
  205. return 0;
  206. }
  207. static void handle_new_psw(struct kvm_vcpu *vcpu)
  208. {
  209. /* Check whether the new psw is enabled for machine checks. */
  210. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK)
  211. kvm_s390_deliver_pending_machine_checks(vcpu);
  212. }
  213. #define PSW_MASK_ADDR_MODE (PSW_MASK_EA | PSW_MASK_BA)
  214. #define PSW_MASK_UNASSIGNED 0xb80800fe7fffffffUL
  215. #define PSW_ADDR_24 0x0000000000ffffffUL
  216. #define PSW_ADDR_31 0x000000007fffffffUL
  217. static int is_valid_psw(psw_t *psw) {
  218. if (psw->mask & PSW_MASK_UNASSIGNED)
  219. return 0;
  220. if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_BA) {
  221. if (psw->addr & ~PSW_ADDR_31)
  222. return 0;
  223. }
  224. if (!(psw->mask & PSW_MASK_ADDR_MODE) && (psw->addr & ~PSW_ADDR_24))
  225. return 0;
  226. if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_EA)
  227. return 0;
  228. return 1;
  229. }
  230. int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu)
  231. {
  232. psw_t *gpsw = &vcpu->arch.sie_block->gpsw;
  233. psw_compat_t new_psw;
  234. u64 addr;
  235. if (gpsw->mask & PSW_MASK_PSTATE)
  236. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  237. addr = kvm_s390_get_base_disp_s(vcpu);
  238. if (addr & 7)
  239. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  240. if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw)))
  241. return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
  242. if (!(new_psw.mask & PSW32_MASK_BASE))
  243. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  244. gpsw->mask = (new_psw.mask & ~PSW32_MASK_BASE) << 32;
  245. gpsw->mask |= new_psw.addr & PSW32_ADDR_AMODE;
  246. gpsw->addr = new_psw.addr & ~PSW32_ADDR_AMODE;
  247. if (!is_valid_psw(gpsw))
  248. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  249. handle_new_psw(vcpu);
  250. return 0;
  251. }
  252. static int handle_lpswe(struct kvm_vcpu *vcpu)
  253. {
  254. psw_t new_psw;
  255. u64 addr;
  256. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  257. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  258. addr = kvm_s390_get_base_disp_s(vcpu);
  259. if (addr & 7)
  260. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  261. if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw)))
  262. return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
  263. vcpu->arch.sie_block->gpsw = new_psw;
  264. if (!is_valid_psw(&vcpu->arch.sie_block->gpsw))
  265. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  266. handle_new_psw(vcpu);
  267. return 0;
  268. }
  269. static int handle_stidp(struct kvm_vcpu *vcpu)
  270. {
  271. u64 operand2;
  272. vcpu->stat.instruction_stidp++;
  273. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  274. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  275. operand2 = kvm_s390_get_base_disp_s(vcpu);
  276. if (operand2 & 7)
  277. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  278. if (put_guest(vcpu, vcpu->arch.stidp_data, (u64 __user *)operand2))
  279. return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
  280. VCPU_EVENT(vcpu, 5, "%s", "store cpu id");
  281. return 0;
  282. }
  283. static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
  284. {
  285. struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
  286. int cpus = 0;
  287. int n;
  288. spin_lock(&fi->lock);
  289. for (n = 0; n < KVM_MAX_VCPUS; n++)
  290. if (fi->local_int[n])
  291. cpus++;
  292. spin_unlock(&fi->lock);
  293. /* deal with other level 3 hypervisors */
  294. if (stsi(mem, 3, 2, 2))
  295. mem->count = 0;
  296. if (mem->count < 8)
  297. mem->count++;
  298. for (n = mem->count - 1; n > 0 ; n--)
  299. memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0]));
  300. mem->vm[0].cpus_total = cpus;
  301. mem->vm[0].cpus_configured = cpus;
  302. mem->vm[0].cpus_standby = 0;
  303. mem->vm[0].cpus_reserved = 0;
  304. mem->vm[0].caf = 1000;
  305. memcpy(mem->vm[0].name, "KVMguest", 8);
  306. ASCEBC(mem->vm[0].name, 8);
  307. memcpy(mem->vm[0].cpi, "KVM/Linux ", 16);
  308. ASCEBC(mem->vm[0].cpi, 16);
  309. }
  310. static int handle_stsi(struct kvm_vcpu *vcpu)
  311. {
  312. int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28;
  313. int sel1 = vcpu->run->s.regs.gprs[0] & 0xff;
  314. int sel2 = vcpu->run->s.regs.gprs[1] & 0xffff;
  315. unsigned long mem = 0;
  316. u64 operand2;
  317. int rc = 0;
  318. vcpu->stat.instruction_stsi++;
  319. VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2);
  320. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  321. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  322. if (fc > 3) {
  323. vcpu->arch.sie_block->gpsw.mask |= 3ul << 44; /* cc 3 */
  324. return 0;
  325. }
  326. if (vcpu->run->s.regs.gprs[0] & 0x0fffff00
  327. || vcpu->run->s.regs.gprs[1] & 0xffff0000)
  328. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  329. if (fc == 0) {
  330. vcpu->run->s.regs.gprs[0] = 3 << 28;
  331. vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44); /* cc 0 */
  332. return 0;
  333. }
  334. operand2 = kvm_s390_get_base_disp_s(vcpu);
  335. if (operand2 & 0xfff)
  336. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  337. switch (fc) {
  338. case 1: /* same handling for 1 and 2 */
  339. case 2:
  340. mem = get_zeroed_page(GFP_KERNEL);
  341. if (!mem)
  342. goto out_no_data;
  343. if (stsi((void *) mem, fc, sel1, sel2))
  344. goto out_no_data;
  345. break;
  346. case 3:
  347. if (sel1 != 2 || sel2 != 2)
  348. goto out_no_data;
  349. mem = get_zeroed_page(GFP_KERNEL);
  350. if (!mem)
  351. goto out_no_data;
  352. handle_stsi_3_2_2(vcpu, (void *) mem);
  353. break;
  354. }
  355. if (copy_to_guest_absolute(vcpu, operand2, (void *) mem, PAGE_SIZE)) {
  356. rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
  357. goto out_exception;
  358. }
  359. trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2);
  360. free_page(mem);
  361. vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
  362. vcpu->run->s.regs.gprs[0] = 0;
  363. return 0;
  364. out_no_data:
  365. /* condition code 3 */
  366. vcpu->arch.sie_block->gpsw.mask |= 3ul << 44;
  367. out_exception:
  368. free_page(mem);
  369. return rc;
  370. }
  371. static const intercept_handler_t b2_handlers[256] = {
  372. [0x02] = handle_stidp,
  373. [0x10] = handle_set_prefix,
  374. [0x11] = handle_store_prefix,
  375. [0x12] = handle_store_cpu_address,
  376. [0x29] = handle_skey,
  377. [0x2a] = handle_skey,
  378. [0x2b] = handle_skey,
  379. [0x30] = handle_io_inst,
  380. [0x31] = handle_io_inst,
  381. [0x32] = handle_io_inst,
  382. [0x33] = handle_io_inst,
  383. [0x34] = handle_io_inst,
  384. [0x35] = handle_io_inst,
  385. [0x36] = handle_io_inst,
  386. [0x37] = handle_io_inst,
  387. [0x38] = handle_io_inst,
  388. [0x39] = handle_io_inst,
  389. [0x3a] = handle_io_inst,
  390. [0x3b] = handle_io_inst,
  391. [0x3c] = handle_io_inst,
  392. [0x5f] = handle_io_inst,
  393. [0x74] = handle_io_inst,
  394. [0x76] = handle_io_inst,
  395. [0x7d] = handle_stsi,
  396. [0xb1] = handle_stfl,
  397. [0xb2] = handle_lpswe,
  398. };
  399. int kvm_s390_handle_b2(struct kvm_vcpu *vcpu)
  400. {
  401. intercept_handler_t handler;
  402. /*
  403. * A lot of B2 instructions are priviledged. Here we check for
  404. * the privileged ones, that we can handle in the kernel.
  405. * Anything else goes to userspace.
  406. */
  407. handler = b2_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
  408. if (handler)
  409. return handler(vcpu);
  410. return -EOPNOTSUPP;
  411. }
  412. static int handle_epsw(struct kvm_vcpu *vcpu)
  413. {
  414. int reg1, reg2;
  415. kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
  416. /* This basically extracts the mask half of the psw. */
  417. vcpu->run->s.regs.gprs[reg1] &= 0xffffffff00000000;
  418. vcpu->run->s.regs.gprs[reg1] |= vcpu->arch.sie_block->gpsw.mask >> 32;
  419. if (reg2) {
  420. vcpu->run->s.regs.gprs[reg2] &= 0xffffffff00000000;
  421. vcpu->run->s.regs.gprs[reg2] |=
  422. vcpu->arch.sie_block->gpsw.mask & 0x00000000ffffffff;
  423. }
  424. return 0;
  425. }
  426. #define PFMF_RESERVED 0xfffc0101UL
  427. #define PFMF_SK 0x00020000UL
  428. #define PFMF_CF 0x00010000UL
  429. #define PFMF_UI 0x00008000UL
  430. #define PFMF_FSC 0x00007000UL
  431. #define PFMF_NQ 0x00000800UL
  432. #define PFMF_MR 0x00000400UL
  433. #define PFMF_MC 0x00000200UL
  434. #define PFMF_KEY 0x000000feUL
  435. static int handle_pfmf(struct kvm_vcpu *vcpu)
  436. {
  437. int reg1, reg2;
  438. unsigned long start, end;
  439. vcpu->stat.instruction_pfmf++;
  440. kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
  441. if (!MACHINE_HAS_PFMF)
  442. return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
  443. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  444. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  445. if (vcpu->run->s.regs.gprs[reg1] & PFMF_RESERVED)
  446. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  447. /* Only provide non-quiescing support if the host supports it */
  448. if (vcpu->run->s.regs.gprs[reg1] & PFMF_NQ &&
  449. S390_lowcore.stfl_fac_list & 0x00020000)
  450. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  451. /* No support for conditional-SSKE */
  452. if (vcpu->run->s.regs.gprs[reg1] & (PFMF_MR | PFMF_MC))
  453. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  454. start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
  455. switch (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) {
  456. case 0x00000000:
  457. end = (start + (1UL << 12)) & ~((1UL << 12) - 1);
  458. break;
  459. case 0x00001000:
  460. end = (start + (1UL << 20)) & ~((1UL << 20) - 1);
  461. break;
  462. /* We dont support EDAT2
  463. case 0x00002000:
  464. end = (start + (1UL << 31)) & ~((1UL << 31) - 1);
  465. break;*/
  466. default:
  467. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  468. }
  469. while (start < end) {
  470. unsigned long useraddr;
  471. useraddr = gmap_translate(start, vcpu->arch.gmap);
  472. if (IS_ERR((void *)useraddr))
  473. return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
  474. if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
  475. if (clear_user((void __user *)useraddr, PAGE_SIZE))
  476. return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
  477. }
  478. if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK) {
  479. if (set_guest_storage_key(current->mm, useraddr,
  480. vcpu->run->s.regs.gprs[reg1] & PFMF_KEY,
  481. vcpu->run->s.regs.gprs[reg1] & PFMF_NQ))
  482. return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
  483. }
  484. start += PAGE_SIZE;
  485. }
  486. if (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC)
  487. vcpu->run->s.regs.gprs[reg2] = end;
  488. return 0;
  489. }
  490. static const intercept_handler_t b9_handlers[256] = {
  491. [0x8d] = handle_epsw,
  492. [0x9c] = handle_io_inst,
  493. [0xaf] = handle_pfmf,
  494. };
  495. int kvm_s390_handle_b9(struct kvm_vcpu *vcpu)
  496. {
  497. intercept_handler_t handler;
  498. /* This is handled just as for the B2 instructions. */
  499. handler = b9_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
  500. if (handler)
  501. return handler(vcpu);
  502. return -EOPNOTSUPP;
  503. }
  504. int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu)
  505. {
  506. int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
  507. int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
  508. u64 useraddr;
  509. u32 val = 0;
  510. int reg, rc;
  511. vcpu->stat.instruction_lctl++;
  512. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  513. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  514. useraddr = kvm_s390_get_base_disp_rs(vcpu);
  515. if (useraddr & 3)
  516. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  517. VCPU_EVENT(vcpu, 5, "lctl r1:%x, r3:%x, addr:%llx", reg1, reg3,
  518. useraddr);
  519. trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, useraddr);
  520. reg = reg1;
  521. do {
  522. rc = get_guest(vcpu, val, (u32 __user *) useraddr);
  523. if (rc)
  524. return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
  525. vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul;
  526. vcpu->arch.sie_block->gcr[reg] |= val;
  527. useraddr += 4;
  528. if (reg == reg3)
  529. break;
  530. reg = (reg + 1) % 16;
  531. } while (1);
  532. return 0;
  533. }
  534. static int handle_lctlg(struct kvm_vcpu *vcpu)
  535. {
  536. int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
  537. int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
  538. u64 useraddr;
  539. int reg, rc;
  540. vcpu->stat.instruction_lctlg++;
  541. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  542. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  543. useraddr = kvm_s390_get_base_disp_rsy(vcpu);
  544. if (useraddr & 7)
  545. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  546. reg = reg1;
  547. VCPU_EVENT(vcpu, 5, "lctlg r1:%x, r3:%x, addr:%llx", reg1, reg3,
  548. useraddr);
  549. trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, useraddr);
  550. do {
  551. rc = get_guest(vcpu, vcpu->arch.sie_block->gcr[reg],
  552. (u64 __user *) useraddr);
  553. if (rc)
  554. return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
  555. useraddr += 8;
  556. if (reg == reg3)
  557. break;
  558. reg = (reg + 1) % 16;
  559. } while (1);
  560. return 0;
  561. }
  562. static const intercept_handler_t eb_handlers[256] = {
  563. [0x2f] = handle_lctlg,
  564. [0x8a] = handle_io_inst,
  565. };
  566. int kvm_s390_handle_eb(struct kvm_vcpu *vcpu)
  567. {
  568. intercept_handler_t handler;
  569. handler = eb_handlers[vcpu->arch.sie_block->ipb & 0xff];
  570. if (handler)
  571. return handler(vcpu);
  572. return -EOPNOTSUPP;
  573. }
  574. static int handle_tprot(struct kvm_vcpu *vcpu)
  575. {
  576. u64 address1, address2;
  577. struct vm_area_struct *vma;
  578. unsigned long user_address;
  579. vcpu->stat.instruction_tprot++;
  580. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  581. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  582. kvm_s390_get_base_disp_sse(vcpu, &address1, &address2);
  583. /* we only handle the Linux memory detection case:
  584. * access key == 0
  585. * guest DAT == off
  586. * everything else goes to userspace. */
  587. if (address2 & 0xf0)
  588. return -EOPNOTSUPP;
  589. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
  590. return -EOPNOTSUPP;
  591. down_read(&current->mm->mmap_sem);
  592. user_address = __gmap_translate(address1, vcpu->arch.gmap);
  593. if (IS_ERR_VALUE(user_address))
  594. goto out_inject;
  595. vma = find_vma(current->mm, user_address);
  596. if (!vma)
  597. goto out_inject;
  598. vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
  599. if (!(vma->vm_flags & VM_WRITE) && (vma->vm_flags & VM_READ))
  600. vcpu->arch.sie_block->gpsw.mask |= (1ul << 44);
  601. if (!(vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_READ))
  602. vcpu->arch.sie_block->gpsw.mask |= (2ul << 44);
  603. up_read(&current->mm->mmap_sem);
  604. return 0;
  605. out_inject:
  606. up_read(&current->mm->mmap_sem);
  607. return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
  608. }
  609. int kvm_s390_handle_e5(struct kvm_vcpu *vcpu)
  610. {
  611. /* For e5xx... instructions we only handle TPROT */
  612. if ((vcpu->arch.sie_block->ipa & 0x00ff) == 0x01)
  613. return handle_tprot(vcpu);
  614. return -EOPNOTSUPP;
  615. }
  616. static int handle_sckpf(struct kvm_vcpu *vcpu)
  617. {
  618. u32 value;
  619. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  620. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  621. if (vcpu->run->s.regs.gprs[0] & 0x00000000ffff0000)
  622. return kvm_s390_inject_program_int(vcpu,
  623. PGM_SPECIFICATION);
  624. value = vcpu->run->s.regs.gprs[0] & 0x000000000000ffff;
  625. vcpu->arch.sie_block->todpr = value;
  626. return 0;
  627. }
  628. static const intercept_handler_t x01_handlers[256] = {
  629. [0x07] = handle_sckpf,
  630. };
  631. int kvm_s390_handle_01(struct kvm_vcpu *vcpu)
  632. {
  633. intercept_handler_t handler;
  634. handler = x01_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
  635. if (handler)
  636. return handler(vcpu);
  637. return -EOPNOTSUPP;
  638. }