priv.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535
  1. /*
  2. * handling privileged instructions
  3. *
  4. * Copyright IBM Corp. 2008
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License (version 2 only)
  8. * as published by the Free Software Foundation.
  9. *
  10. * Author(s): Carsten Otte <cotte@de.ibm.com>
  11. * Christian Borntraeger <borntraeger@de.ibm.com>
  12. */
  13. #include <linux/kvm.h>
  14. #include <linux/gfp.h>
  15. #include <linux/errno.h>
  16. #include <asm/current.h>
  17. #include <asm/debug.h>
  18. #include <asm/ebcdic.h>
  19. #include <asm/sysinfo.h>
  20. #include <asm/ptrace.h>
  21. #include <asm/compat.h>
  22. #include "gaccess.h"
  23. #include "kvm-s390.h"
  24. #include "trace.h"
  25. static int handle_set_prefix(struct kvm_vcpu *vcpu)
  26. {
  27. u64 operand2;
  28. u32 address = 0;
  29. u8 tmp;
  30. vcpu->stat.instruction_spx++;
  31. operand2 = kvm_s390_get_base_disp_s(vcpu);
  32. /* must be word boundary */
  33. if (operand2 & 3) {
  34. kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  35. goto out;
  36. }
  37. /* get the value */
  38. if (get_guest_u32(vcpu, operand2, &address)) {
  39. kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
  40. goto out;
  41. }
  42. address = address & 0x7fffe000u;
  43. /* make sure that the new value is valid memory */
  44. if (copy_from_guest_absolute(vcpu, &tmp, address, 1) ||
  45. (copy_from_guest_absolute(vcpu, &tmp, address + PAGE_SIZE, 1))) {
  46. kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
  47. goto out;
  48. }
  49. kvm_s390_set_prefix(vcpu, address);
  50. VCPU_EVENT(vcpu, 5, "setting prefix to %x", address);
  51. trace_kvm_s390_handle_prefix(vcpu, 1, address);
  52. out:
  53. return 0;
  54. }
  55. static int handle_store_prefix(struct kvm_vcpu *vcpu)
  56. {
  57. u64 operand2;
  58. u32 address;
  59. vcpu->stat.instruction_stpx++;
  60. operand2 = kvm_s390_get_base_disp_s(vcpu);
  61. /* must be word boundary */
  62. if (operand2 & 3) {
  63. kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  64. goto out;
  65. }
  66. address = vcpu->arch.sie_block->prefix;
  67. address = address & 0x7fffe000u;
  68. /* get the value */
  69. if (put_guest_u32(vcpu, operand2, address)) {
  70. kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
  71. goto out;
  72. }
  73. VCPU_EVENT(vcpu, 5, "storing prefix to %x", address);
  74. trace_kvm_s390_handle_prefix(vcpu, 0, address);
  75. out:
  76. return 0;
  77. }
  78. static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
  79. {
  80. u64 useraddr;
  81. int rc;
  82. vcpu->stat.instruction_stap++;
  83. useraddr = kvm_s390_get_base_disp_s(vcpu);
  84. if (useraddr & 1) {
  85. kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  86. goto out;
  87. }
  88. rc = put_guest_u16(vcpu, useraddr, vcpu->vcpu_id);
  89. if (rc == -EFAULT) {
  90. kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
  91. goto out;
  92. }
  93. VCPU_EVENT(vcpu, 5, "storing cpu address to %llx", useraddr);
  94. trace_kvm_s390_handle_stap(vcpu, useraddr);
  95. out:
  96. return 0;
  97. }
  98. static int handle_skey(struct kvm_vcpu *vcpu)
  99. {
  100. vcpu->stat.instruction_storage_key++;
  101. vcpu->arch.sie_block->gpsw.addr -= 4;
  102. VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation");
  103. return 0;
  104. }
  105. static int handle_stsch(struct kvm_vcpu *vcpu)
  106. {
  107. vcpu->stat.instruction_stsch++;
  108. VCPU_EVENT(vcpu, 4, "%s", "store subchannel - CC3");
  109. /* condition code 3 */
  110. vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
  111. vcpu->arch.sie_block->gpsw.mask |= (3 & 3ul) << 44;
  112. return 0;
  113. }
  114. static int handle_chsc(struct kvm_vcpu *vcpu)
  115. {
  116. vcpu->stat.instruction_chsc++;
  117. VCPU_EVENT(vcpu, 4, "%s", "channel subsystem call - CC3");
  118. /* condition code 3 */
  119. vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
  120. vcpu->arch.sie_block->gpsw.mask |= (3 & 3ul) << 44;
  121. return 0;
  122. }
  123. static int handle_stfl(struct kvm_vcpu *vcpu)
  124. {
  125. unsigned int facility_list;
  126. int rc;
  127. vcpu->stat.instruction_stfl++;
  128. /* only pass the facility bits, which we can handle */
  129. facility_list = S390_lowcore.stfl_fac_list & 0xff00fff3;
  130. rc = copy_to_guest(vcpu, offsetof(struct _lowcore, stfl_fac_list),
  131. &facility_list, sizeof(facility_list));
  132. if (rc == -EFAULT)
  133. kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
  134. else {
  135. VCPU_EVENT(vcpu, 5, "store facility list value %x",
  136. facility_list);
  137. trace_kvm_s390_handle_stfl(vcpu, facility_list);
  138. }
  139. return 0;
  140. }
  141. static void handle_new_psw(struct kvm_vcpu *vcpu)
  142. {
  143. /* Check whether the new psw is enabled for machine checks. */
  144. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK)
  145. kvm_s390_deliver_pending_machine_checks(vcpu);
  146. }
  147. #define PSW_MASK_ADDR_MODE (PSW_MASK_EA | PSW_MASK_BA)
  148. #define PSW_MASK_UNASSIGNED 0xb80800fe7fffffffUL
  149. #define PSW_ADDR_24 0x00000000000fffffUL
  150. #define PSW_ADDR_31 0x000000007fffffffUL
  151. int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu)
  152. {
  153. u64 addr;
  154. psw_compat_t new_psw;
  155. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  156. return kvm_s390_inject_program_int(vcpu,
  157. PGM_PRIVILEGED_OPERATION);
  158. addr = kvm_s390_get_base_disp_s(vcpu);
  159. if (addr & 7) {
  160. kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  161. goto out;
  162. }
  163. if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw))) {
  164. kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
  165. goto out;
  166. }
  167. if (!(new_psw.mask & PSW32_MASK_BASE)) {
  168. kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  169. goto out;
  170. }
  171. vcpu->arch.sie_block->gpsw.mask =
  172. (new_psw.mask & ~PSW32_MASK_BASE) << 32;
  173. vcpu->arch.sie_block->gpsw.addr = new_psw.addr;
  174. if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_UNASSIGNED) ||
  175. (!(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) &&
  176. (vcpu->arch.sie_block->gpsw.addr & ~PSW_ADDR_24)) ||
  177. ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) ==
  178. PSW_MASK_EA)) {
  179. kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  180. goto out;
  181. }
  182. handle_new_psw(vcpu);
  183. out:
  184. return 0;
  185. }
  186. static int handle_lpswe(struct kvm_vcpu *vcpu)
  187. {
  188. u64 addr;
  189. psw_t new_psw;
  190. addr = kvm_s390_get_base_disp_s(vcpu);
  191. if (addr & 7) {
  192. kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  193. goto out;
  194. }
  195. if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw))) {
  196. kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
  197. goto out;
  198. }
  199. vcpu->arch.sie_block->gpsw.mask = new_psw.mask;
  200. vcpu->arch.sie_block->gpsw.addr = new_psw.addr;
  201. if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_UNASSIGNED) ||
  202. (((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) ==
  203. PSW_MASK_BA) &&
  204. (vcpu->arch.sie_block->gpsw.addr & ~PSW_ADDR_31)) ||
  205. (!(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) &&
  206. (vcpu->arch.sie_block->gpsw.addr & ~PSW_ADDR_24)) ||
  207. ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) ==
  208. PSW_MASK_EA)) {
  209. kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  210. goto out;
  211. }
  212. handle_new_psw(vcpu);
  213. out:
  214. return 0;
  215. }
  216. static int handle_stidp(struct kvm_vcpu *vcpu)
  217. {
  218. u64 operand2;
  219. int rc;
  220. vcpu->stat.instruction_stidp++;
  221. operand2 = kvm_s390_get_base_disp_s(vcpu);
  222. if (operand2 & 7) {
  223. kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  224. goto out;
  225. }
  226. rc = put_guest_u64(vcpu, operand2, vcpu->arch.stidp_data);
  227. if (rc == -EFAULT) {
  228. kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
  229. goto out;
  230. }
  231. VCPU_EVENT(vcpu, 5, "%s", "store cpu id");
  232. out:
  233. return 0;
  234. }
  235. static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
  236. {
  237. struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
  238. int cpus = 0;
  239. int n;
  240. spin_lock(&fi->lock);
  241. for (n = 0; n < KVM_MAX_VCPUS; n++)
  242. if (fi->local_int[n])
  243. cpus++;
  244. spin_unlock(&fi->lock);
  245. /* deal with other level 3 hypervisors */
  246. if (stsi(mem, 3, 2, 2))
  247. mem->count = 0;
  248. if (mem->count < 8)
  249. mem->count++;
  250. for (n = mem->count - 1; n > 0 ; n--)
  251. memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0]));
  252. mem->vm[0].cpus_total = cpus;
  253. mem->vm[0].cpus_configured = cpus;
  254. mem->vm[0].cpus_standby = 0;
  255. mem->vm[0].cpus_reserved = 0;
  256. mem->vm[0].caf = 1000;
  257. memcpy(mem->vm[0].name, "KVMguest", 8);
  258. ASCEBC(mem->vm[0].name, 8);
  259. memcpy(mem->vm[0].cpi, "KVM/Linux ", 16);
  260. ASCEBC(mem->vm[0].cpi, 16);
  261. }
  262. static int handle_stsi(struct kvm_vcpu *vcpu)
  263. {
  264. int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28;
  265. int sel1 = vcpu->run->s.regs.gprs[0] & 0xff;
  266. int sel2 = vcpu->run->s.regs.gprs[1] & 0xffff;
  267. u64 operand2;
  268. unsigned long mem;
  269. vcpu->stat.instruction_stsi++;
  270. VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2);
  271. operand2 = kvm_s390_get_base_disp_s(vcpu);
  272. if (operand2 & 0xfff && fc > 0)
  273. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  274. switch (fc) {
  275. case 0:
  276. vcpu->run->s.regs.gprs[0] = 3 << 28;
  277. vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
  278. return 0;
  279. case 1: /* same handling for 1 and 2 */
  280. case 2:
  281. mem = get_zeroed_page(GFP_KERNEL);
  282. if (!mem)
  283. goto out_fail;
  284. if (stsi((void *) mem, fc, sel1, sel2))
  285. goto out_mem;
  286. break;
  287. case 3:
  288. if (sel1 != 2 || sel2 != 2)
  289. goto out_fail;
  290. mem = get_zeroed_page(GFP_KERNEL);
  291. if (!mem)
  292. goto out_fail;
  293. handle_stsi_3_2_2(vcpu, (void *) mem);
  294. break;
  295. default:
  296. goto out_fail;
  297. }
  298. if (copy_to_guest_absolute(vcpu, operand2, (void *) mem, PAGE_SIZE)) {
  299. kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
  300. goto out_mem;
  301. }
  302. trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2);
  303. free_page(mem);
  304. vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
  305. vcpu->run->s.regs.gprs[0] = 0;
  306. return 0;
  307. out_mem:
  308. free_page(mem);
  309. out_fail:
  310. /* condition code 3 */
  311. vcpu->arch.sie_block->gpsw.mask |= 3ul << 44;
  312. return 0;
  313. }
  314. static const intercept_handler_t priv_handlers[256] = {
  315. [0x02] = handle_stidp,
  316. [0x10] = handle_set_prefix,
  317. [0x11] = handle_store_prefix,
  318. [0x12] = handle_store_cpu_address,
  319. [0x29] = handle_skey,
  320. [0x2a] = handle_skey,
  321. [0x2b] = handle_skey,
  322. [0x34] = handle_stsch,
  323. [0x5f] = handle_chsc,
  324. [0x7d] = handle_stsi,
  325. [0xb1] = handle_stfl,
  326. [0xb2] = handle_lpswe,
  327. };
  328. int kvm_s390_handle_b2(struct kvm_vcpu *vcpu)
  329. {
  330. intercept_handler_t handler;
  331. /*
  332. * a lot of B2 instructions are priviledged. We first check for
  333. * the privileged ones, that we can handle in the kernel. If the
  334. * kernel can handle this instruction, we check for the problem
  335. * state bit and (a) handle the instruction or (b) send a code 2
  336. * program check.
  337. * Anything else goes to userspace.*/
  338. handler = priv_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
  339. if (handler) {
  340. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  341. return kvm_s390_inject_program_int(vcpu,
  342. PGM_PRIVILEGED_OPERATION);
  343. else
  344. return handler(vcpu);
  345. }
  346. return -EOPNOTSUPP;
  347. }
  348. static int handle_epsw(struct kvm_vcpu *vcpu)
  349. {
  350. int reg1, reg2;
  351. reg1 = (vcpu->arch.sie_block->ipb & 0x00f00000) >> 24;
  352. reg2 = (vcpu->arch.sie_block->ipb & 0x000f0000) >> 16;
  353. /* This basically extracts the mask half of the psw. */
  354. vcpu->run->s.regs.gprs[reg1] &= 0xffffffff00000000;
  355. vcpu->run->s.regs.gprs[reg1] |= vcpu->arch.sie_block->gpsw.mask >> 32;
  356. if (reg2) {
  357. vcpu->run->s.regs.gprs[reg2] &= 0xffffffff00000000;
  358. vcpu->run->s.regs.gprs[reg2] |=
  359. vcpu->arch.sie_block->gpsw.mask & 0x00000000ffffffff;
  360. }
  361. return 0;
  362. }
  363. static const intercept_handler_t b9_handlers[256] = {
  364. [0x8d] = handle_epsw,
  365. };
  366. int kvm_s390_handle_b9(struct kvm_vcpu *vcpu)
  367. {
  368. intercept_handler_t handler;
  369. /* This is handled just as for the B2 instructions. */
  370. handler = b9_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
  371. if (handler) {
  372. if ((handler != handle_epsw) &&
  373. (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE))
  374. return kvm_s390_inject_program_int(vcpu,
  375. PGM_PRIVILEGED_OPERATION);
  376. else
  377. return handler(vcpu);
  378. }
  379. return -EOPNOTSUPP;
  380. }
  381. static int handle_tprot(struct kvm_vcpu *vcpu)
  382. {
  383. u64 address1, address2;
  384. struct vm_area_struct *vma;
  385. unsigned long user_address;
  386. vcpu->stat.instruction_tprot++;
  387. kvm_s390_get_base_disp_sse(vcpu, &address1, &address2);
  388. /* we only handle the Linux memory detection case:
  389. * access key == 0
  390. * guest DAT == off
  391. * everything else goes to userspace. */
  392. if (address2 & 0xf0)
  393. return -EOPNOTSUPP;
  394. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
  395. return -EOPNOTSUPP;
  396. /* we must resolve the address without holding the mmap semaphore.
  397. * This is ok since the userspace hypervisor is not supposed to change
  398. * the mapping while the guest queries the memory. Otherwise the guest
  399. * might crash or get wrong info anyway. */
  400. user_address = (unsigned long) __guestaddr_to_user(vcpu, address1);
  401. down_read(&current->mm->mmap_sem);
  402. vma = find_vma(current->mm, user_address);
  403. if (!vma) {
  404. up_read(&current->mm->mmap_sem);
  405. return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
  406. }
  407. vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
  408. if (!(vma->vm_flags & VM_WRITE) && (vma->vm_flags & VM_READ))
  409. vcpu->arch.sie_block->gpsw.mask |= (1ul << 44);
  410. if (!(vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_READ))
  411. vcpu->arch.sie_block->gpsw.mask |= (2ul << 44);
  412. up_read(&current->mm->mmap_sem);
  413. return 0;
  414. }
  415. int kvm_s390_handle_e5(struct kvm_vcpu *vcpu)
  416. {
  417. /* For e5xx... instructions we only handle TPROT */
  418. if ((vcpu->arch.sie_block->ipa & 0x00ff) == 0x01)
  419. return handle_tprot(vcpu);
  420. return -EOPNOTSUPP;
  421. }
  422. static int handle_sckpf(struct kvm_vcpu *vcpu)
  423. {
  424. u32 value;
  425. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  426. return kvm_s390_inject_program_int(vcpu,
  427. PGM_PRIVILEGED_OPERATION);
  428. if (vcpu->run->s.regs.gprs[0] & 0x00000000ffff0000)
  429. return kvm_s390_inject_program_int(vcpu,
  430. PGM_SPECIFICATION);
  431. value = vcpu->run->s.regs.gprs[0] & 0x000000000000ffff;
  432. vcpu->arch.sie_block->todpr = value;
  433. return 0;
  434. }
  435. static const intercept_handler_t x01_handlers[256] = {
  436. [0x07] = handle_sckpf,
  437. };
  438. int kvm_s390_handle_01(struct kvm_vcpu *vcpu)
  439. {
  440. intercept_handler_t handler;
  441. handler = x01_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
  442. if (handler)
  443. return handler(vcpu);
  444. return -EOPNOTSUPP;
  445. }