sigp.c 10.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402
  1. /*
  2. * handling interprocessor communication
  3. *
  4. * Copyright IBM Corp. 2008, 2009
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License (version 2 only)
  8. * as published by the Free Software Foundation.
  9. *
  10. * Author(s): Carsten Otte <cotte@de.ibm.com>
  11. * Christian Borntraeger <borntraeger@de.ibm.com>
  12. * Christian Ehrhardt <ehrhardt@de.ibm.com>
  13. */
  14. #include <linux/kvm.h>
  15. #include <linux/kvm_host.h>
  16. #include <linux/slab.h>
  17. #include <asm/sigp.h>
  18. #include "gaccess.h"
  19. #include "kvm-s390.h"
  20. #include "trace.h"
  21. static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr,
  22. u64 *reg)
  23. {
  24. struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
  25. int rc;
  26. if (cpu_addr >= KVM_MAX_VCPUS)
  27. return SIGP_CC_NOT_OPERATIONAL;
  28. spin_lock(&fi->lock);
  29. if (fi->local_int[cpu_addr] == NULL)
  30. rc = SIGP_CC_NOT_OPERATIONAL;
  31. else if (!(atomic_read(fi->local_int[cpu_addr]->cpuflags)
  32. & (CPUSTAT_ECALL_PEND | CPUSTAT_STOPPED)))
  33. rc = SIGP_CC_ORDER_CODE_ACCEPTED;
  34. else {
  35. *reg &= 0xffffffff00000000UL;
  36. if (atomic_read(fi->local_int[cpu_addr]->cpuflags)
  37. & CPUSTAT_ECALL_PEND)
  38. *reg |= SIGP_STATUS_EXT_CALL_PENDING;
  39. if (atomic_read(fi->local_int[cpu_addr]->cpuflags)
  40. & CPUSTAT_STOPPED)
  41. *reg |= SIGP_STATUS_STOPPED;
  42. rc = SIGP_CC_STATUS_STORED;
  43. }
  44. spin_unlock(&fi->lock);
  45. VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x", cpu_addr, rc);
  46. return rc;
  47. }
  48. static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr)
  49. {
  50. struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
  51. struct kvm_s390_local_interrupt *li;
  52. struct kvm_s390_interrupt_info *inti;
  53. int rc;
  54. if (cpu_addr >= KVM_MAX_VCPUS)
  55. return SIGP_CC_NOT_OPERATIONAL;
  56. inti = kzalloc(sizeof(*inti), GFP_KERNEL);
  57. if (!inti)
  58. return -ENOMEM;
  59. inti->type = KVM_S390_INT_EMERGENCY;
  60. inti->emerg.code = vcpu->vcpu_id;
  61. spin_lock(&fi->lock);
  62. li = fi->local_int[cpu_addr];
  63. if (li == NULL) {
  64. rc = SIGP_CC_NOT_OPERATIONAL;
  65. kfree(inti);
  66. goto unlock;
  67. }
  68. spin_lock_bh(&li->lock);
  69. list_add_tail(&inti->list, &li->list);
  70. atomic_set(&li->active, 1);
  71. atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
  72. if (waitqueue_active(&li->wq))
  73. wake_up_interruptible(&li->wq);
  74. spin_unlock_bh(&li->lock);
  75. rc = SIGP_CC_ORDER_CODE_ACCEPTED;
  76. VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr);
  77. unlock:
  78. spin_unlock(&fi->lock);
  79. return rc;
  80. }
  81. static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr)
  82. {
  83. struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
  84. struct kvm_s390_local_interrupt *li;
  85. struct kvm_s390_interrupt_info *inti;
  86. int rc;
  87. if (cpu_addr >= KVM_MAX_VCPUS)
  88. return SIGP_CC_NOT_OPERATIONAL;
  89. inti = kzalloc(sizeof(*inti), GFP_KERNEL);
  90. if (!inti)
  91. return -ENOMEM;
  92. inti->type = KVM_S390_INT_EXTERNAL_CALL;
  93. inti->extcall.code = vcpu->vcpu_id;
  94. spin_lock(&fi->lock);
  95. li = fi->local_int[cpu_addr];
  96. if (li == NULL) {
  97. rc = SIGP_CC_NOT_OPERATIONAL;
  98. kfree(inti);
  99. goto unlock;
  100. }
  101. spin_lock_bh(&li->lock);
  102. list_add_tail(&inti->list, &li->list);
  103. atomic_set(&li->active, 1);
  104. atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
  105. if (waitqueue_active(&li->wq))
  106. wake_up_interruptible(&li->wq);
  107. spin_unlock_bh(&li->lock);
  108. rc = SIGP_CC_ORDER_CODE_ACCEPTED;
  109. VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x", cpu_addr);
  110. unlock:
  111. spin_unlock(&fi->lock);
  112. return rc;
  113. }
  114. static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action)
  115. {
  116. struct kvm_s390_interrupt_info *inti;
  117. inti = kzalloc(sizeof(*inti), GFP_ATOMIC);
  118. if (!inti)
  119. return -ENOMEM;
  120. inti->type = KVM_S390_SIGP_STOP;
  121. spin_lock_bh(&li->lock);
  122. if ((atomic_read(li->cpuflags) & CPUSTAT_STOPPED))
  123. goto out;
  124. list_add_tail(&inti->list, &li->list);
  125. atomic_set(&li->active, 1);
  126. atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
  127. li->action_bits |= action;
  128. if (waitqueue_active(&li->wq))
  129. wake_up_interruptible(&li->wq);
  130. out:
  131. spin_unlock_bh(&li->lock);
  132. return SIGP_CC_ORDER_CODE_ACCEPTED;
  133. }
  134. static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action)
  135. {
  136. struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
  137. struct kvm_s390_local_interrupt *li;
  138. int rc;
  139. if (cpu_addr >= KVM_MAX_VCPUS)
  140. return SIGP_CC_NOT_OPERATIONAL;
  141. spin_lock(&fi->lock);
  142. li = fi->local_int[cpu_addr];
  143. if (li == NULL) {
  144. rc = SIGP_CC_NOT_OPERATIONAL;
  145. goto unlock;
  146. }
  147. rc = __inject_sigp_stop(li, action);
  148. unlock:
  149. spin_unlock(&fi->lock);
  150. VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", cpu_addr);
  151. return rc;
  152. }
  153. int kvm_s390_inject_sigp_stop(struct kvm_vcpu *vcpu, int action)
  154. {
  155. struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
  156. return __inject_sigp_stop(li, action);
  157. }
  158. static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter)
  159. {
  160. int rc;
  161. switch (parameter & 0xff) {
  162. case 0:
  163. rc = SIGP_CC_NOT_OPERATIONAL;
  164. break;
  165. case 1:
  166. case 2:
  167. rc = SIGP_CC_ORDER_CODE_ACCEPTED;
  168. break;
  169. default:
  170. rc = -EOPNOTSUPP;
  171. }
  172. return rc;
  173. }
  174. static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
  175. u64 *reg)
  176. {
  177. struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
  178. struct kvm_s390_local_interrupt *li = NULL;
  179. struct kvm_s390_interrupt_info *inti;
  180. int rc;
  181. u8 tmp;
  182. /* make sure that the new value is valid memory */
  183. address = address & 0x7fffe000u;
  184. if (copy_from_guest_absolute(vcpu, &tmp, address, 1) ||
  185. copy_from_guest_absolute(vcpu, &tmp, address + PAGE_SIZE, 1)) {
  186. *reg &= 0xffffffff00000000UL;
  187. *reg |= SIGP_STATUS_INVALID_PARAMETER;
  188. return SIGP_CC_STATUS_STORED;
  189. }
  190. inti = kzalloc(sizeof(*inti), GFP_KERNEL);
  191. if (!inti)
  192. return SIGP_CC_BUSY;
  193. spin_lock(&fi->lock);
  194. if (cpu_addr < KVM_MAX_VCPUS)
  195. li = fi->local_int[cpu_addr];
  196. if (li == NULL) {
  197. *reg &= 0xffffffff00000000UL;
  198. *reg |= SIGP_STATUS_INCORRECT_STATE;
  199. rc = SIGP_CC_STATUS_STORED;
  200. kfree(inti);
  201. goto out_fi;
  202. }
  203. spin_lock_bh(&li->lock);
  204. /* cpu must be in stopped state */
  205. if (!(atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
  206. *reg &= 0xffffffff00000000UL;
  207. *reg |= SIGP_STATUS_INCORRECT_STATE;
  208. rc = SIGP_CC_STATUS_STORED;
  209. kfree(inti);
  210. goto out_li;
  211. }
  212. inti->type = KVM_S390_SIGP_SET_PREFIX;
  213. inti->prefix.address = address;
  214. list_add_tail(&inti->list, &li->list);
  215. atomic_set(&li->active, 1);
  216. if (waitqueue_active(&li->wq))
  217. wake_up_interruptible(&li->wq);
  218. rc = SIGP_CC_ORDER_CODE_ACCEPTED;
  219. VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", cpu_addr, address);
  220. out_li:
  221. spin_unlock_bh(&li->lock);
  222. out_fi:
  223. spin_unlock(&fi->lock);
  224. return rc;
  225. }
  226. static int __sigp_sense_running(struct kvm_vcpu *vcpu, u16 cpu_addr,
  227. u64 *reg)
  228. {
  229. int rc;
  230. struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
  231. if (cpu_addr >= KVM_MAX_VCPUS)
  232. return SIGP_CC_NOT_OPERATIONAL;
  233. spin_lock(&fi->lock);
  234. if (fi->local_int[cpu_addr] == NULL)
  235. rc = SIGP_CC_NOT_OPERATIONAL;
  236. else {
  237. if (atomic_read(fi->local_int[cpu_addr]->cpuflags)
  238. & CPUSTAT_RUNNING) {
  239. /* running */
  240. rc = SIGP_CC_ORDER_CODE_ACCEPTED;
  241. } else {
  242. /* not running */
  243. *reg &= 0xffffffff00000000UL;
  244. *reg |= SIGP_STATUS_NOT_RUNNING;
  245. rc = SIGP_CC_STATUS_STORED;
  246. }
  247. }
  248. spin_unlock(&fi->lock);
  249. VCPU_EVENT(vcpu, 4, "sensed running status of cpu %x rc %x", cpu_addr,
  250. rc);
  251. return rc;
  252. }
  253. static int __sigp_restart(struct kvm_vcpu *vcpu, u16 cpu_addr)
  254. {
  255. struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
  256. struct kvm_s390_local_interrupt *li;
  257. int rc = SIGP_CC_ORDER_CODE_ACCEPTED;
  258. if (cpu_addr >= KVM_MAX_VCPUS)
  259. return SIGP_CC_NOT_OPERATIONAL;
  260. spin_lock(&fi->lock);
  261. li = fi->local_int[cpu_addr];
  262. if (li == NULL) {
  263. rc = SIGP_CC_NOT_OPERATIONAL;
  264. goto out;
  265. }
  266. spin_lock_bh(&li->lock);
  267. if (li->action_bits & ACTION_STOP_ON_STOP)
  268. rc = SIGP_CC_BUSY;
  269. else
  270. VCPU_EVENT(vcpu, 4, "sigp restart %x to handle userspace",
  271. cpu_addr);
  272. spin_unlock_bh(&li->lock);
  273. out:
  274. spin_unlock(&fi->lock);
  275. return rc;
  276. }
  277. int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
  278. {
  279. int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
  280. int r3 = vcpu->arch.sie_block->ipa & 0x000f;
  281. int base2 = vcpu->arch.sie_block->ipb >> 28;
  282. int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
  283. u32 parameter;
  284. u16 cpu_addr = vcpu->run->s.regs.gprs[r3];
  285. u8 order_code;
  286. int rc;
  287. /* sigp in userspace can exit */
  288. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  289. return kvm_s390_inject_program_int(vcpu,
  290. PGM_PRIVILEGED_OPERATION);
  291. order_code = disp2;
  292. if (base2)
  293. order_code += vcpu->run->s.regs.gprs[base2];
  294. if (r1 % 2)
  295. parameter = vcpu->run->s.regs.gprs[r1];
  296. else
  297. parameter = vcpu->run->s.regs.gprs[r1 + 1];
  298. trace_kvm_s390_handle_sigp(vcpu, order_code, cpu_addr, parameter);
  299. switch (order_code) {
  300. case SIGP_SENSE:
  301. vcpu->stat.instruction_sigp_sense++;
  302. rc = __sigp_sense(vcpu, cpu_addr,
  303. &vcpu->run->s.regs.gprs[r1]);
  304. break;
  305. case SIGP_EXTERNAL_CALL:
  306. vcpu->stat.instruction_sigp_external_call++;
  307. rc = __sigp_external_call(vcpu, cpu_addr);
  308. break;
  309. case SIGP_EMERGENCY_SIGNAL:
  310. vcpu->stat.instruction_sigp_emergency++;
  311. rc = __sigp_emergency(vcpu, cpu_addr);
  312. break;
  313. case SIGP_STOP:
  314. vcpu->stat.instruction_sigp_stop++;
  315. rc = __sigp_stop(vcpu, cpu_addr, ACTION_STOP_ON_STOP);
  316. break;
  317. case SIGP_STOP_AND_STORE_STATUS:
  318. vcpu->stat.instruction_sigp_stop++;
  319. rc = __sigp_stop(vcpu, cpu_addr, ACTION_STORE_ON_STOP |
  320. ACTION_STOP_ON_STOP);
  321. break;
  322. case SIGP_SET_ARCHITECTURE:
  323. vcpu->stat.instruction_sigp_arch++;
  324. rc = __sigp_set_arch(vcpu, parameter);
  325. break;
  326. case SIGP_SET_PREFIX:
  327. vcpu->stat.instruction_sigp_prefix++;
  328. rc = __sigp_set_prefix(vcpu, cpu_addr, parameter,
  329. &vcpu->run->s.regs.gprs[r1]);
  330. break;
  331. case SIGP_SENSE_RUNNING:
  332. vcpu->stat.instruction_sigp_sense_running++;
  333. rc = __sigp_sense_running(vcpu, cpu_addr,
  334. &vcpu->run->s.regs.gprs[r1]);
  335. break;
  336. case SIGP_RESTART:
  337. vcpu->stat.instruction_sigp_restart++;
  338. rc = __sigp_restart(vcpu, cpu_addr);
  339. if (rc == SIGP_CC_BUSY)
  340. break;
  341. /* user space must know about restart */
  342. default:
  343. return -EOPNOTSUPP;
  344. }
  345. if (rc < 0)
  346. return rc;
  347. vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
  348. vcpu->arch.sie_block->gpsw.mask |= (rc & 3ul) << 44;
  349. return 0;
  350. }