book3s_pr_papr.c 7.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297
  1. /*
  2. * Copyright (C) 2011. Freescale Inc. All rights reserved.
  3. *
  4. * Authors:
  5. * Alexander Graf <agraf@suse.de>
  6. * Paul Mackerras <paulus@samba.org>
  7. *
  8. * Description:
  9. *
  10. * Hypercall handling for running PAPR guests in PR KVM on Book 3S
  11. * processors.
  12. *
  13. * This program is free software; you can redistribute it and/or modify
  14. * it under the terms of the GNU General Public License, version 2, as
  15. * published by the Free Software Foundation.
  16. */
  17. #include <linux/anon_inodes.h>
  18. #include <asm/uaccess.h>
  19. #include <asm/kvm_ppc.h>
  20. #include <asm/kvm_book3s.h>
  21. #define HPTE_SIZE 16 /* bytes per HPT entry */
  22. static unsigned long get_pteg_addr(struct kvm_vcpu *vcpu, long pte_index)
  23. {
  24. struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
  25. unsigned long pteg_addr;
  26. pte_index <<= 4;
  27. pte_index &= ((1 << ((vcpu_book3s->sdr1 & 0x1f) + 11)) - 1) << 7 | 0x70;
  28. pteg_addr = vcpu_book3s->sdr1 & 0xfffffffffffc0000ULL;
  29. pteg_addr |= pte_index;
  30. return pteg_addr;
  31. }
  32. static int kvmppc_h_pr_enter(struct kvm_vcpu *vcpu)
  33. {
  34. long flags = kvmppc_get_gpr(vcpu, 4);
  35. long pte_index = kvmppc_get_gpr(vcpu, 5);
  36. unsigned long pteg[2 * 8];
  37. unsigned long pteg_addr, i, *hpte;
  38. long int ret;
  39. i = pte_index & 7;
  40. pte_index &= ~7UL;
  41. pteg_addr = get_pteg_addr(vcpu, pte_index);
  42. mutex_lock(&vcpu->kvm->arch.hpt_mutex);
  43. copy_from_user(pteg, (void __user *)pteg_addr, sizeof(pteg));
  44. hpte = pteg;
  45. ret = H_PTEG_FULL;
  46. if (likely((flags & H_EXACT) == 0)) {
  47. for (i = 0; ; ++i) {
  48. if (i == 8)
  49. goto done;
  50. if ((*hpte & HPTE_V_VALID) == 0)
  51. break;
  52. hpte += 2;
  53. }
  54. } else {
  55. hpte += i * 2;
  56. if (*hpte & HPTE_V_VALID)
  57. goto done;
  58. }
  59. hpte[0] = kvmppc_get_gpr(vcpu, 6);
  60. hpte[1] = kvmppc_get_gpr(vcpu, 7);
  61. pteg_addr += i * HPTE_SIZE;
  62. copy_to_user((void __user *)pteg_addr, hpte, HPTE_SIZE);
  63. kvmppc_set_gpr(vcpu, 4, pte_index | i);
  64. ret = H_SUCCESS;
  65. done:
  66. mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
  67. kvmppc_set_gpr(vcpu, 3, ret);
  68. return EMULATE_DONE;
  69. }
  70. static int kvmppc_h_pr_remove(struct kvm_vcpu *vcpu)
  71. {
  72. unsigned long flags= kvmppc_get_gpr(vcpu, 4);
  73. unsigned long pte_index = kvmppc_get_gpr(vcpu, 5);
  74. unsigned long avpn = kvmppc_get_gpr(vcpu, 6);
  75. unsigned long v = 0, pteg, rb;
  76. unsigned long pte[2];
  77. long int ret;
  78. pteg = get_pteg_addr(vcpu, pte_index);
  79. mutex_lock(&vcpu->kvm->arch.hpt_mutex);
  80. copy_from_user(pte, (void __user *)pteg, sizeof(pte));
  81. ret = H_NOT_FOUND;
  82. if ((pte[0] & HPTE_V_VALID) == 0 ||
  83. ((flags & H_AVPN) && (pte[0] & ~0x7fUL) != avpn) ||
  84. ((flags & H_ANDCOND) && (pte[0] & avpn) != 0))
  85. goto done;
  86. copy_to_user((void __user *)pteg, &v, sizeof(v));
  87. rb = compute_tlbie_rb(pte[0], pte[1], pte_index);
  88. vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false);
  89. ret = H_SUCCESS;
  90. kvmppc_set_gpr(vcpu, 4, pte[0]);
  91. kvmppc_set_gpr(vcpu, 5, pte[1]);
  92. done:
  93. mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
  94. kvmppc_set_gpr(vcpu, 3, ret);
  95. return EMULATE_DONE;
  96. }
  97. /* Request defs for kvmppc_h_pr_bulk_remove() */
  98. #define H_BULK_REMOVE_TYPE 0xc000000000000000ULL
  99. #define H_BULK_REMOVE_REQUEST 0x4000000000000000ULL
  100. #define H_BULK_REMOVE_RESPONSE 0x8000000000000000ULL
  101. #define H_BULK_REMOVE_END 0xc000000000000000ULL
  102. #define H_BULK_REMOVE_CODE 0x3000000000000000ULL
  103. #define H_BULK_REMOVE_SUCCESS 0x0000000000000000ULL
  104. #define H_BULK_REMOVE_NOT_FOUND 0x1000000000000000ULL
  105. #define H_BULK_REMOVE_PARM 0x2000000000000000ULL
  106. #define H_BULK_REMOVE_HW 0x3000000000000000ULL
  107. #define H_BULK_REMOVE_RC 0x0c00000000000000ULL
  108. #define H_BULK_REMOVE_FLAGS 0x0300000000000000ULL
  109. #define H_BULK_REMOVE_ABSOLUTE 0x0000000000000000ULL
  110. #define H_BULK_REMOVE_ANDCOND 0x0100000000000000ULL
  111. #define H_BULK_REMOVE_AVPN 0x0200000000000000ULL
  112. #define H_BULK_REMOVE_PTEX 0x00ffffffffffffffULL
  113. #define H_BULK_REMOVE_MAX_BATCH 4
  114. static int kvmppc_h_pr_bulk_remove(struct kvm_vcpu *vcpu)
  115. {
  116. int i;
  117. int paramnr = 4;
  118. int ret = H_SUCCESS;
  119. mutex_lock(&vcpu->kvm->arch.hpt_mutex);
  120. for (i = 0; i < H_BULK_REMOVE_MAX_BATCH; i++) {
  121. unsigned long tsh = kvmppc_get_gpr(vcpu, paramnr+(2*i));
  122. unsigned long tsl = kvmppc_get_gpr(vcpu, paramnr+(2*i)+1);
  123. unsigned long pteg, rb, flags;
  124. unsigned long pte[2];
  125. unsigned long v = 0;
  126. if ((tsh & H_BULK_REMOVE_TYPE) == H_BULK_REMOVE_END) {
  127. break; /* Exit success */
  128. } else if ((tsh & H_BULK_REMOVE_TYPE) !=
  129. H_BULK_REMOVE_REQUEST) {
  130. ret = H_PARAMETER;
  131. break; /* Exit fail */
  132. }
  133. tsh &= H_BULK_REMOVE_PTEX | H_BULK_REMOVE_FLAGS;
  134. tsh |= H_BULK_REMOVE_RESPONSE;
  135. if ((tsh & H_BULK_REMOVE_ANDCOND) &&
  136. (tsh & H_BULK_REMOVE_AVPN)) {
  137. tsh |= H_BULK_REMOVE_PARM;
  138. kvmppc_set_gpr(vcpu, paramnr+(2*i), tsh);
  139. ret = H_PARAMETER;
  140. break; /* Exit fail */
  141. }
  142. pteg = get_pteg_addr(vcpu, tsh & H_BULK_REMOVE_PTEX);
  143. copy_from_user(pte, (void __user *)pteg, sizeof(pte));
  144. /* tsl = AVPN */
  145. flags = (tsh & H_BULK_REMOVE_FLAGS) >> 26;
  146. if ((pte[0] & HPTE_V_VALID) == 0 ||
  147. ((flags & H_AVPN) && (pte[0] & ~0x7fUL) != tsl) ||
  148. ((flags & H_ANDCOND) && (pte[0] & tsl) != 0)) {
  149. tsh |= H_BULK_REMOVE_NOT_FOUND;
  150. } else {
  151. /* Splat the pteg in (userland) hpt */
  152. copy_to_user((void __user *)pteg, &v, sizeof(v));
  153. rb = compute_tlbie_rb(pte[0], pte[1],
  154. tsh & H_BULK_REMOVE_PTEX);
  155. vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false);
  156. tsh |= H_BULK_REMOVE_SUCCESS;
  157. tsh |= (pte[1] & (HPTE_R_C | HPTE_R_R)) << 43;
  158. }
  159. kvmppc_set_gpr(vcpu, paramnr+(2*i), tsh);
  160. }
  161. mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
  162. kvmppc_set_gpr(vcpu, 3, ret);
  163. return EMULATE_DONE;
  164. }
  165. static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu)
  166. {
  167. unsigned long flags = kvmppc_get_gpr(vcpu, 4);
  168. unsigned long pte_index = kvmppc_get_gpr(vcpu, 5);
  169. unsigned long avpn = kvmppc_get_gpr(vcpu, 6);
  170. unsigned long rb, pteg, r, v;
  171. unsigned long pte[2];
  172. long int ret;
  173. pteg = get_pteg_addr(vcpu, pte_index);
  174. mutex_lock(&vcpu->kvm->arch.hpt_mutex);
  175. copy_from_user(pte, (void __user *)pteg, sizeof(pte));
  176. ret = H_NOT_FOUND;
  177. if ((pte[0] & HPTE_V_VALID) == 0 ||
  178. ((flags & H_AVPN) && (pte[0] & ~0x7fUL) != avpn))
  179. goto done;
  180. v = pte[0];
  181. r = pte[1];
  182. r &= ~(HPTE_R_PP0 | HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_HI |
  183. HPTE_R_KEY_LO);
  184. r |= (flags << 55) & HPTE_R_PP0;
  185. r |= (flags << 48) & HPTE_R_KEY_HI;
  186. r |= flags & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO);
  187. pte[1] = r;
  188. rb = compute_tlbie_rb(v, r, pte_index);
  189. vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false);
  190. copy_to_user((void __user *)pteg, pte, sizeof(pte));
  191. ret = H_SUCCESS;
  192. done:
  193. mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
  194. kvmppc_set_gpr(vcpu, 3, ret);
  195. return EMULATE_DONE;
  196. }
  197. static int kvmppc_h_pr_put_tce(struct kvm_vcpu *vcpu)
  198. {
  199. unsigned long liobn = kvmppc_get_gpr(vcpu, 4);
  200. unsigned long ioba = kvmppc_get_gpr(vcpu, 5);
  201. unsigned long tce = kvmppc_get_gpr(vcpu, 6);
  202. long rc;
  203. rc = kvmppc_h_put_tce(vcpu, liobn, ioba, tce);
  204. if (rc == H_TOO_HARD)
  205. return EMULATE_FAIL;
  206. kvmppc_set_gpr(vcpu, 3, rc);
  207. return EMULATE_DONE;
  208. }
  209. static int kvmppc_h_pr_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd)
  210. {
  211. long rc = kvmppc_xics_hcall(vcpu, cmd);
  212. kvmppc_set_gpr(vcpu, 3, rc);
  213. return EMULATE_DONE;
  214. }
  215. int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd)
  216. {
  217. switch (cmd) {
  218. case H_ENTER:
  219. return kvmppc_h_pr_enter(vcpu);
  220. case H_REMOVE:
  221. return kvmppc_h_pr_remove(vcpu);
  222. case H_PROTECT:
  223. return kvmppc_h_pr_protect(vcpu);
  224. case H_BULK_REMOVE:
  225. return kvmppc_h_pr_bulk_remove(vcpu);
  226. case H_PUT_TCE:
  227. return kvmppc_h_pr_put_tce(vcpu);
  228. case H_CEDE:
  229. vcpu->arch.shared->msr |= MSR_EE;
  230. kvm_vcpu_block(vcpu);
  231. clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
  232. vcpu->stat.halt_wakeup++;
  233. return EMULATE_DONE;
  234. case H_XIRR:
  235. case H_CPPR:
  236. case H_EOI:
  237. case H_IPI:
  238. case H_IPOLL:
  239. case H_XIRR_X:
  240. if (kvmppc_xics_enabled(vcpu))
  241. return kvmppc_h_pr_xics_hcall(vcpu, cmd);
  242. break;
  243. case H_RTAS:
  244. if (list_empty(&vcpu->kvm->arch.rtas_tokens))
  245. return RESUME_HOST;
  246. if (kvmppc_rtas_hcall(vcpu))
  247. break;
  248. kvmppc_set_gpr(vcpu, 3, 0);
  249. return EMULATE_DONE;
  250. }
  251. return EMULATE_FAIL;
  252. }