sigp.c 7.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292
  1. /*
  2. * sigp.c - handlinge interprocessor communication
  3. *
  4. * Copyright IBM Corp. 2008
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License (version 2 only)
  8. * as published by the Free Software Foundation.
  9. *
  10. * Author(s): Carsten Otte <cotte@de.ibm.com>
  11. * Christian Borntraeger <borntraeger@de.ibm.com>
  12. */
  13. #include <linux/kvm.h>
  14. #include <linux/kvm_host.h>
  15. #include "gaccess.h"
  16. #include "kvm-s390.h"
  17. /* sigp order codes */
  18. #define SIGP_SENSE 0x01
  19. #define SIGP_EXTERNAL_CALL 0x02
  20. #define SIGP_EMERGENCY 0x03
  21. #define SIGP_START 0x04
  22. #define SIGP_STOP 0x05
  23. #define SIGP_RESTART 0x06
  24. #define SIGP_STOP_STORE_STATUS 0x09
  25. #define SIGP_INITIAL_CPU_RESET 0x0b
  26. #define SIGP_CPU_RESET 0x0c
  27. #define SIGP_SET_PREFIX 0x0d
  28. #define SIGP_STORE_STATUS_ADDR 0x0e
  29. #define SIGP_SET_ARCH 0x12
  30. /* cpu status bits */
  31. #define SIGP_STAT_EQUIPMENT_CHECK 0x80000000UL
  32. #define SIGP_STAT_INCORRECT_STATE 0x00000200UL
  33. #define SIGP_STAT_INVALID_PARAMETER 0x00000100UL
  34. #define SIGP_STAT_EXT_CALL_PENDING 0x00000080UL
  35. #define SIGP_STAT_STOPPED 0x00000040UL
  36. #define SIGP_STAT_OPERATOR_INTERV 0x00000020UL
  37. #define SIGP_STAT_CHECK_STOP 0x00000010UL
  38. #define SIGP_STAT_INOPERATIVE 0x00000004UL
  39. #define SIGP_STAT_INVALID_ORDER 0x00000002UL
  40. #define SIGP_STAT_RECEIVER_CHECK 0x00000001UL
  41. static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr,
  42. unsigned long *reg)
  43. {
  44. struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
  45. int rc;
  46. if (cpu_addr >= KVM_MAX_VCPUS)
  47. return 3; /* not operational */
  48. spin_lock_bh(&fi->lock);
  49. if (fi->local_int[cpu_addr] == NULL)
  50. rc = 3; /* not operational */
  51. else if (atomic_read(fi->local_int[cpu_addr]->cpuflags)
  52. & CPUSTAT_RUNNING) {
  53. *reg &= 0xffffffff00000000UL;
  54. rc = 1; /* status stored */
  55. } else {
  56. *reg &= 0xffffffff00000000UL;
  57. *reg |= SIGP_STAT_STOPPED;
  58. rc = 1; /* status stored */
  59. }
  60. spin_unlock_bh(&fi->lock);
  61. VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x", cpu_addr, rc);
  62. return rc;
  63. }
  64. static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr)
  65. {
  66. struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
  67. struct kvm_s390_local_interrupt *li;
  68. struct kvm_s390_interrupt_info *inti;
  69. int rc;
  70. if (cpu_addr >= KVM_MAX_VCPUS)
  71. return 3; /* not operational */
  72. inti = kzalloc(sizeof(*inti), GFP_KERNEL);
  73. if (!inti)
  74. return -ENOMEM;
  75. inti->type = KVM_S390_INT_EMERGENCY;
  76. spin_lock_bh(&fi->lock);
  77. li = fi->local_int[cpu_addr];
  78. if (li == NULL) {
  79. rc = 3; /* not operational */
  80. kfree(inti);
  81. goto unlock;
  82. }
  83. spin_lock_bh(&li->lock);
  84. list_add_tail(&inti->list, &li->list);
  85. atomic_set(&li->active, 1);
  86. atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
  87. if (waitqueue_active(&li->wq))
  88. wake_up_interruptible(&li->wq);
  89. spin_unlock_bh(&li->lock);
  90. rc = 0; /* order accepted */
  91. unlock:
  92. spin_unlock_bh(&fi->lock);
  93. VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr);
  94. return rc;
  95. }
  96. static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int store)
  97. {
  98. struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
  99. struct kvm_s390_local_interrupt *li;
  100. struct kvm_s390_interrupt_info *inti;
  101. int rc;
  102. if (cpu_addr >= KVM_MAX_VCPUS)
  103. return 3; /* not operational */
  104. inti = kzalloc(sizeof(*inti), GFP_KERNEL);
  105. if (!inti)
  106. return -ENOMEM;
  107. inti->type = KVM_S390_SIGP_STOP;
  108. spin_lock_bh(&fi->lock);
  109. li = fi->local_int[cpu_addr];
  110. if (li == NULL) {
  111. rc = 3; /* not operational */
  112. kfree(inti);
  113. goto unlock;
  114. }
  115. spin_lock_bh(&li->lock);
  116. list_add_tail(&inti->list, &li->list);
  117. atomic_set(&li->active, 1);
  118. atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
  119. if (store)
  120. li->action_bits |= ACTION_STORE_ON_STOP;
  121. li->action_bits |= ACTION_STOP_ON_STOP;
  122. if (waitqueue_active(&li->wq))
  123. wake_up_interruptible(&li->wq);
  124. spin_unlock_bh(&li->lock);
  125. rc = 0; /* order accepted */
  126. unlock:
  127. spin_unlock_bh(&fi->lock);
  128. VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", cpu_addr);
  129. return rc;
  130. }
  131. static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter)
  132. {
  133. int rc;
  134. switch (parameter & 0xff) {
  135. case 0:
  136. rc = 3; /* not operational */
  137. break;
  138. case 1:
  139. case 2:
  140. rc = 0; /* order accepted */
  141. break;
  142. default:
  143. rc = -ENOTSUPP;
  144. }
  145. return rc;
  146. }
  147. static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
  148. unsigned long *reg)
  149. {
  150. struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
  151. struct kvm_s390_local_interrupt *li;
  152. struct kvm_s390_interrupt_info *inti;
  153. int rc;
  154. u8 tmp;
  155. /* make sure that the new value is valid memory */
  156. address = address & 0x7fffe000u;
  157. if ((copy_from_guest(vcpu, &tmp,
  158. (u64) (address + vcpu->kvm->arch.guest_origin) , 1)) ||
  159. (copy_from_guest(vcpu, &tmp, (u64) (address +
  160. vcpu->kvm->arch.guest_origin + PAGE_SIZE), 1))) {
  161. *reg |= SIGP_STAT_INVALID_PARAMETER;
  162. return 1; /* invalid parameter */
  163. }
  164. inti = kzalloc(sizeof(*inti), GFP_KERNEL);
  165. if (!inti)
  166. return 2; /* busy */
  167. spin_lock_bh(&fi->lock);
  168. li = fi->local_int[cpu_addr];
  169. if ((cpu_addr >= KVM_MAX_VCPUS) || (li == NULL)) {
  170. rc = 1; /* incorrect state */
  171. *reg &= SIGP_STAT_INCORRECT_STATE;
  172. kfree(inti);
  173. goto out_fi;
  174. }
  175. spin_lock_bh(&li->lock);
  176. /* cpu must be in stopped state */
  177. if (atomic_read(li->cpuflags) & CPUSTAT_RUNNING) {
  178. rc = 1; /* incorrect state */
  179. *reg &= SIGP_STAT_INCORRECT_STATE;
  180. kfree(inti);
  181. goto out_li;
  182. }
  183. inti->type = KVM_S390_SIGP_SET_PREFIX;
  184. inti->prefix.address = address;
  185. list_add_tail(&inti->list, &li->list);
  186. atomic_set(&li->active, 1);
  187. if (waitqueue_active(&li->wq))
  188. wake_up_interruptible(&li->wq);
  189. rc = 0; /* order accepted */
  190. VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", cpu_addr, address);
  191. out_li:
  192. spin_unlock_bh(&li->lock);
  193. out_fi:
  194. spin_unlock_bh(&fi->lock);
  195. return rc;
  196. }
  197. int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
  198. {
  199. int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
  200. int r3 = vcpu->arch.sie_block->ipa & 0x000f;
  201. int base2 = vcpu->arch.sie_block->ipb >> 28;
  202. int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
  203. u32 parameter;
  204. u16 cpu_addr = vcpu->arch.guest_gprs[r3];
  205. u8 order_code;
  206. int rc;
  207. /* sigp in userspace can exit */
  208. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  209. return kvm_s390_inject_program_int(vcpu,
  210. PGM_PRIVILEGED_OPERATION);
  211. order_code = disp2;
  212. if (base2)
  213. order_code += vcpu->arch.guest_gprs[base2];
  214. if (r1 % 2)
  215. parameter = vcpu->arch.guest_gprs[r1];
  216. else
  217. parameter = vcpu->arch.guest_gprs[r1 + 1];
  218. switch (order_code) {
  219. case SIGP_SENSE:
  220. vcpu->stat.instruction_sigp_sense++;
  221. rc = __sigp_sense(vcpu, cpu_addr,
  222. &vcpu->arch.guest_gprs[r1]);
  223. break;
  224. case SIGP_EMERGENCY:
  225. vcpu->stat.instruction_sigp_emergency++;
  226. rc = __sigp_emergency(vcpu, cpu_addr);
  227. break;
  228. case SIGP_STOP:
  229. vcpu->stat.instruction_sigp_stop++;
  230. rc = __sigp_stop(vcpu, cpu_addr, 0);
  231. break;
  232. case SIGP_STOP_STORE_STATUS:
  233. vcpu->stat.instruction_sigp_stop++;
  234. rc = __sigp_stop(vcpu, cpu_addr, 1);
  235. break;
  236. case SIGP_SET_ARCH:
  237. vcpu->stat.instruction_sigp_arch++;
  238. rc = __sigp_set_arch(vcpu, parameter);
  239. break;
  240. case SIGP_SET_PREFIX:
  241. vcpu->stat.instruction_sigp_prefix++;
  242. rc = __sigp_set_prefix(vcpu, cpu_addr, parameter,
  243. &vcpu->arch.guest_gprs[r1]);
  244. break;
  245. case SIGP_RESTART:
  246. vcpu->stat.instruction_sigp_restart++;
  247. /* user space must know about restart */
  248. default:
  249. return -ENOTSUPP;
  250. }
  251. if (rc < 0)
  252. return rc;
  253. vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
  254. vcpu->arch.sie_block->gpsw.mask |= (rc & 3ul) << 44;
  255. return 0;
  256. }