priv.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557
  1. /*
  2. * handling privileged instructions
  3. *
  4. * Copyright IBM Corp. 2008
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License (version 2 only)
  8. * as published by the Free Software Foundation.
  9. *
  10. * Author(s): Carsten Otte <cotte@de.ibm.com>
  11. * Christian Borntraeger <borntraeger@de.ibm.com>
  12. */
  13. #include <linux/kvm.h>
  14. #include <linux/gfp.h>
  15. #include <linux/errno.h>
  16. #include <asm/current.h>
  17. #include <asm/debug.h>
  18. #include <asm/ebcdic.h>
  19. #include <asm/sysinfo.h>
  20. #include <asm/ptrace.h>
  21. #include <asm/compat.h>
  22. #include "gaccess.h"
  23. #include "kvm-s390.h"
  24. #include "trace.h"
  25. static int handle_set_prefix(struct kvm_vcpu *vcpu)
  26. {
  27. u64 operand2;
  28. u32 address = 0;
  29. u8 tmp;
  30. vcpu->stat.instruction_spx++;
  31. operand2 = kvm_s390_get_base_disp_s(vcpu);
  32. /* must be word boundary */
  33. if (operand2 & 3) {
  34. kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  35. goto out;
  36. }
  37. /* get the value */
  38. if (get_guest_u32(vcpu, operand2, &address)) {
  39. kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
  40. goto out;
  41. }
  42. address = address & 0x7fffe000u;
  43. /* make sure that the new value is valid memory */
  44. if (copy_from_guest_absolute(vcpu, &tmp, address, 1) ||
  45. (copy_from_guest_absolute(vcpu, &tmp, address + PAGE_SIZE, 1))) {
  46. kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
  47. goto out;
  48. }
  49. kvm_s390_set_prefix(vcpu, address);
  50. VCPU_EVENT(vcpu, 5, "setting prefix to %x", address);
  51. trace_kvm_s390_handle_prefix(vcpu, 1, address);
  52. out:
  53. return 0;
  54. }
  55. static int handle_store_prefix(struct kvm_vcpu *vcpu)
  56. {
  57. u64 operand2;
  58. u32 address;
  59. vcpu->stat.instruction_stpx++;
  60. operand2 = kvm_s390_get_base_disp_s(vcpu);
  61. /* must be word boundary */
  62. if (operand2 & 3) {
  63. kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  64. goto out;
  65. }
  66. address = vcpu->arch.sie_block->prefix;
  67. address = address & 0x7fffe000u;
  68. /* get the value */
  69. if (put_guest_u32(vcpu, operand2, address)) {
  70. kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
  71. goto out;
  72. }
  73. VCPU_EVENT(vcpu, 5, "storing prefix to %x", address);
  74. trace_kvm_s390_handle_prefix(vcpu, 0, address);
  75. out:
  76. return 0;
  77. }
  78. static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
  79. {
  80. u64 useraddr;
  81. int rc;
  82. vcpu->stat.instruction_stap++;
  83. useraddr = kvm_s390_get_base_disp_s(vcpu);
  84. if (useraddr & 1) {
  85. kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  86. goto out;
  87. }
  88. rc = put_guest_u16(vcpu, useraddr, vcpu->vcpu_id);
  89. if (rc == -EFAULT) {
  90. kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
  91. goto out;
  92. }
  93. VCPU_EVENT(vcpu, 5, "storing cpu address to %llx", useraddr);
  94. trace_kvm_s390_handle_stap(vcpu, useraddr);
  95. out:
  96. return 0;
  97. }
  98. static int handle_skey(struct kvm_vcpu *vcpu)
  99. {
  100. vcpu->stat.instruction_storage_key++;
  101. vcpu->arch.sie_block->gpsw.addr -= 4;
  102. VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation");
  103. return 0;
  104. }
  105. static int handle_io_inst(struct kvm_vcpu *vcpu)
  106. {
  107. VCPU_EVENT(vcpu, 4, "%s", "I/O instruction");
  108. /* condition code 3 */
  109. vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
  110. vcpu->arch.sie_block->gpsw.mask |= (3 & 3ul) << 44;
  111. return 0;
  112. }
  113. static int handle_stfl(struct kvm_vcpu *vcpu)
  114. {
  115. unsigned int facility_list;
  116. int rc;
  117. vcpu->stat.instruction_stfl++;
  118. /* only pass the facility bits, which we can handle */
  119. facility_list = S390_lowcore.stfl_fac_list & 0xff00fff3;
  120. rc = copy_to_guest(vcpu, offsetof(struct _lowcore, stfl_fac_list),
  121. &facility_list, sizeof(facility_list));
  122. if (rc == -EFAULT)
  123. kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
  124. else {
  125. VCPU_EVENT(vcpu, 5, "store facility list value %x",
  126. facility_list);
  127. trace_kvm_s390_handle_stfl(vcpu, facility_list);
  128. }
  129. return 0;
  130. }
  131. static void handle_new_psw(struct kvm_vcpu *vcpu)
  132. {
  133. /* Check whether the new psw is enabled for machine checks. */
  134. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK)
  135. kvm_s390_deliver_pending_machine_checks(vcpu);
  136. }
  137. #define PSW_MASK_ADDR_MODE (PSW_MASK_EA | PSW_MASK_BA)
  138. #define PSW_MASK_UNASSIGNED 0xb80800fe7fffffffUL
  139. #define PSW_ADDR_24 0x00000000000fffffUL
  140. #define PSW_ADDR_31 0x000000007fffffffUL
  141. int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu)
  142. {
  143. u64 addr;
  144. psw_compat_t new_psw;
  145. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  146. return kvm_s390_inject_program_int(vcpu,
  147. PGM_PRIVILEGED_OPERATION);
  148. addr = kvm_s390_get_base_disp_s(vcpu);
  149. if (addr & 7) {
  150. kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  151. goto out;
  152. }
  153. if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw))) {
  154. kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
  155. goto out;
  156. }
  157. if (!(new_psw.mask & PSW32_MASK_BASE)) {
  158. kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  159. goto out;
  160. }
  161. vcpu->arch.sie_block->gpsw.mask =
  162. (new_psw.mask & ~PSW32_MASK_BASE) << 32;
  163. vcpu->arch.sie_block->gpsw.addr = new_psw.addr;
  164. if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_UNASSIGNED) ||
  165. (!(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) &&
  166. (vcpu->arch.sie_block->gpsw.addr & ~PSW_ADDR_24)) ||
  167. ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) ==
  168. PSW_MASK_EA)) {
  169. kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  170. goto out;
  171. }
  172. handle_new_psw(vcpu);
  173. out:
  174. return 0;
  175. }
  176. static int handle_lpswe(struct kvm_vcpu *vcpu)
  177. {
  178. u64 addr;
  179. psw_t new_psw;
  180. addr = kvm_s390_get_base_disp_s(vcpu);
  181. if (addr & 7) {
  182. kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  183. goto out;
  184. }
  185. if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw))) {
  186. kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
  187. goto out;
  188. }
  189. vcpu->arch.sie_block->gpsw.mask = new_psw.mask;
  190. vcpu->arch.sie_block->gpsw.addr = new_psw.addr;
  191. if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_UNASSIGNED) ||
  192. (((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) ==
  193. PSW_MASK_BA) &&
  194. (vcpu->arch.sie_block->gpsw.addr & ~PSW_ADDR_31)) ||
  195. (!(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) &&
  196. (vcpu->arch.sie_block->gpsw.addr & ~PSW_ADDR_24)) ||
  197. ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) ==
  198. PSW_MASK_EA)) {
  199. kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  200. goto out;
  201. }
  202. handle_new_psw(vcpu);
  203. out:
  204. return 0;
  205. }
  206. static int handle_stidp(struct kvm_vcpu *vcpu)
  207. {
  208. u64 operand2;
  209. int rc;
  210. vcpu->stat.instruction_stidp++;
  211. operand2 = kvm_s390_get_base_disp_s(vcpu);
  212. if (operand2 & 7) {
  213. kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  214. goto out;
  215. }
  216. rc = put_guest_u64(vcpu, operand2, vcpu->arch.stidp_data);
  217. if (rc == -EFAULT) {
  218. kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
  219. goto out;
  220. }
  221. VCPU_EVENT(vcpu, 5, "%s", "store cpu id");
  222. out:
  223. return 0;
  224. }
  225. static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
  226. {
  227. struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
  228. int cpus = 0;
  229. int n;
  230. spin_lock(&fi->lock);
  231. for (n = 0; n < KVM_MAX_VCPUS; n++)
  232. if (fi->local_int[n])
  233. cpus++;
  234. spin_unlock(&fi->lock);
  235. /* deal with other level 3 hypervisors */
  236. if (stsi(mem, 3, 2, 2))
  237. mem->count = 0;
  238. if (mem->count < 8)
  239. mem->count++;
  240. for (n = mem->count - 1; n > 0 ; n--)
  241. memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0]));
  242. mem->vm[0].cpus_total = cpus;
  243. mem->vm[0].cpus_configured = cpus;
  244. mem->vm[0].cpus_standby = 0;
  245. mem->vm[0].cpus_reserved = 0;
  246. mem->vm[0].caf = 1000;
  247. memcpy(mem->vm[0].name, "KVMguest", 8);
  248. ASCEBC(mem->vm[0].name, 8);
  249. memcpy(mem->vm[0].cpi, "KVM/Linux ", 16);
  250. ASCEBC(mem->vm[0].cpi, 16);
  251. }
  252. static int handle_stsi(struct kvm_vcpu *vcpu)
  253. {
  254. int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28;
  255. int sel1 = vcpu->run->s.regs.gprs[0] & 0xff;
  256. int sel2 = vcpu->run->s.regs.gprs[1] & 0xffff;
  257. u64 operand2;
  258. unsigned long mem;
  259. vcpu->stat.instruction_stsi++;
  260. VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2);
  261. operand2 = kvm_s390_get_base_disp_s(vcpu);
  262. if (operand2 & 0xfff && fc > 0)
  263. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  264. switch (fc) {
  265. case 0:
  266. vcpu->run->s.regs.gprs[0] = 3 << 28;
  267. vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
  268. return 0;
  269. case 1: /* same handling for 1 and 2 */
  270. case 2:
  271. mem = get_zeroed_page(GFP_KERNEL);
  272. if (!mem)
  273. goto out_fail;
  274. if (stsi((void *) mem, fc, sel1, sel2))
  275. goto out_mem;
  276. break;
  277. case 3:
  278. if (sel1 != 2 || sel2 != 2)
  279. goto out_fail;
  280. mem = get_zeroed_page(GFP_KERNEL);
  281. if (!mem)
  282. goto out_fail;
  283. handle_stsi_3_2_2(vcpu, (void *) mem);
  284. break;
  285. default:
  286. goto out_fail;
  287. }
  288. if (copy_to_guest_absolute(vcpu, operand2, (void *) mem, PAGE_SIZE)) {
  289. kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
  290. goto out_mem;
  291. }
  292. trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2);
  293. free_page(mem);
  294. vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
  295. vcpu->run->s.regs.gprs[0] = 0;
  296. return 0;
  297. out_mem:
  298. free_page(mem);
  299. out_fail:
  300. /* condition code 3 */
  301. vcpu->arch.sie_block->gpsw.mask |= 3ul << 44;
  302. return 0;
  303. }
  304. static const intercept_handler_t b2_handlers[256] = {
  305. [0x02] = handle_stidp,
  306. [0x10] = handle_set_prefix,
  307. [0x11] = handle_store_prefix,
  308. [0x12] = handle_store_cpu_address,
  309. [0x29] = handle_skey,
  310. [0x2a] = handle_skey,
  311. [0x2b] = handle_skey,
  312. [0x30] = handle_io_inst,
  313. [0x31] = handle_io_inst,
  314. [0x32] = handle_io_inst,
  315. [0x33] = handle_io_inst,
  316. [0x34] = handle_io_inst,
  317. [0x35] = handle_io_inst,
  318. [0x36] = handle_io_inst,
  319. [0x37] = handle_io_inst,
  320. [0x38] = handle_io_inst,
  321. [0x39] = handle_io_inst,
  322. [0x3a] = handle_io_inst,
  323. [0x3b] = handle_io_inst,
  324. [0x3c] = handle_io_inst,
  325. [0x5f] = handle_io_inst,
  326. [0x74] = handle_io_inst,
  327. [0x76] = handle_io_inst,
  328. [0x7d] = handle_stsi,
  329. [0xb1] = handle_stfl,
  330. [0xb2] = handle_lpswe,
  331. };
  332. int kvm_s390_handle_b2(struct kvm_vcpu *vcpu)
  333. {
  334. intercept_handler_t handler;
  335. /*
  336. * a lot of B2 instructions are priviledged. We first check for
  337. * the privileged ones, that we can handle in the kernel. If the
  338. * kernel can handle this instruction, we check for the problem
  339. * state bit and (a) handle the instruction or (b) send a code 2
  340. * program check.
  341. * Anything else goes to userspace.*/
  342. handler = b2_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
  343. if (handler) {
  344. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  345. return kvm_s390_inject_program_int(vcpu,
  346. PGM_PRIVILEGED_OPERATION);
  347. else
  348. return handler(vcpu);
  349. }
  350. return -EOPNOTSUPP;
  351. }
  352. static int handle_epsw(struct kvm_vcpu *vcpu)
  353. {
  354. int reg1, reg2;
  355. reg1 = (vcpu->arch.sie_block->ipb & 0x00f00000) >> 24;
  356. reg2 = (vcpu->arch.sie_block->ipb & 0x000f0000) >> 16;
  357. /* This basically extracts the mask half of the psw. */
  358. vcpu->run->s.regs.gprs[reg1] &= 0xffffffff00000000;
  359. vcpu->run->s.regs.gprs[reg1] |= vcpu->arch.sie_block->gpsw.mask >> 32;
  360. if (reg2) {
  361. vcpu->run->s.regs.gprs[reg2] &= 0xffffffff00000000;
  362. vcpu->run->s.regs.gprs[reg2] |=
  363. vcpu->arch.sie_block->gpsw.mask & 0x00000000ffffffff;
  364. }
  365. return 0;
  366. }
  367. static const intercept_handler_t b9_handlers[256] = {
  368. [0x8d] = handle_epsw,
  369. [0x9c] = handle_io_inst,
  370. };
  371. int kvm_s390_handle_b9(struct kvm_vcpu *vcpu)
  372. {
  373. intercept_handler_t handler;
  374. /* This is handled just as for the B2 instructions. */
  375. handler = b9_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
  376. if (handler) {
  377. if ((handler != handle_epsw) &&
  378. (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE))
  379. return kvm_s390_inject_program_int(vcpu,
  380. PGM_PRIVILEGED_OPERATION);
  381. else
  382. return handler(vcpu);
  383. }
  384. return -EOPNOTSUPP;
  385. }
  386. static const intercept_handler_t eb_handlers[256] = {
  387. [0x8a] = handle_io_inst,
  388. };
  389. int kvm_s390_handle_priv_eb(struct kvm_vcpu *vcpu)
  390. {
  391. intercept_handler_t handler;
  392. /* All eb instructions that end up here are privileged. */
  393. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  394. return kvm_s390_inject_program_int(vcpu,
  395. PGM_PRIVILEGED_OPERATION);
  396. handler = eb_handlers[vcpu->arch.sie_block->ipb & 0xff];
  397. if (handler)
  398. return handler(vcpu);
  399. return -EOPNOTSUPP;
  400. }
  401. static int handle_tprot(struct kvm_vcpu *vcpu)
  402. {
  403. u64 address1, address2;
  404. struct vm_area_struct *vma;
  405. unsigned long user_address;
  406. vcpu->stat.instruction_tprot++;
  407. kvm_s390_get_base_disp_sse(vcpu, &address1, &address2);
  408. /* we only handle the Linux memory detection case:
  409. * access key == 0
  410. * guest DAT == off
  411. * everything else goes to userspace. */
  412. if (address2 & 0xf0)
  413. return -EOPNOTSUPP;
  414. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
  415. return -EOPNOTSUPP;
  416. /* we must resolve the address without holding the mmap semaphore.
  417. * This is ok since the userspace hypervisor is not supposed to change
  418. * the mapping while the guest queries the memory. Otherwise the guest
  419. * might crash or get wrong info anyway. */
  420. user_address = (unsigned long) __guestaddr_to_user(vcpu, address1);
  421. down_read(&current->mm->mmap_sem);
  422. vma = find_vma(current->mm, user_address);
  423. if (!vma) {
  424. up_read(&current->mm->mmap_sem);
  425. return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
  426. }
  427. vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
  428. if (!(vma->vm_flags & VM_WRITE) && (vma->vm_flags & VM_READ))
  429. vcpu->arch.sie_block->gpsw.mask |= (1ul << 44);
  430. if (!(vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_READ))
  431. vcpu->arch.sie_block->gpsw.mask |= (2ul << 44);
  432. up_read(&current->mm->mmap_sem);
  433. return 0;
  434. }
  435. int kvm_s390_handle_e5(struct kvm_vcpu *vcpu)
  436. {
  437. /* For e5xx... instructions we only handle TPROT */
  438. if ((vcpu->arch.sie_block->ipa & 0x00ff) == 0x01)
  439. return handle_tprot(vcpu);
  440. return -EOPNOTSUPP;
  441. }
  442. static int handle_sckpf(struct kvm_vcpu *vcpu)
  443. {
  444. u32 value;
  445. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  446. return kvm_s390_inject_program_int(vcpu,
  447. PGM_PRIVILEGED_OPERATION);
  448. if (vcpu->run->s.regs.gprs[0] & 0x00000000ffff0000)
  449. return kvm_s390_inject_program_int(vcpu,
  450. PGM_SPECIFICATION);
  451. value = vcpu->run->s.regs.gprs[0] & 0x000000000000ffff;
  452. vcpu->arch.sie_block->todpr = value;
  453. return 0;
  454. }
  455. static const intercept_handler_t x01_handlers[256] = {
  456. [0x07] = handle_sckpf,
  457. };
  458. int kvm_s390_handle_01(struct kvm_vcpu *vcpu)
  459. {
  460. intercept_handler_t handler;
  461. handler = x01_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
  462. if (handler)
  463. return handler(vcpu);
  464. return -EOPNOTSUPP;
  465. }