book3s_hv_rm_mmu.c 9.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337
  1. /*
  2. * This program is free software; you can redistribute it and/or modify
  3. * it under the terms of the GNU General Public License, version 2, as
  4. * published by the Free Software Foundation.
  5. *
  6. * Copyright 2010-2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
  7. */
  8. #include <linux/types.h>
  9. #include <linux/string.h>
  10. #include <linux/kvm.h>
  11. #include <linux/kvm_host.h>
  12. #include <linux/hugetlb.h>
  13. #include <asm/tlbflush.h>
  14. #include <asm/kvm_ppc.h>
  15. #include <asm/kvm_book3s.h>
  16. #include <asm/mmu-hash64.h>
  17. #include <asm/hvcall.h>
  18. #include <asm/synch.h>
  19. #include <asm/ppc-opcode.h>
  20. /* For now use fixed-size 16MB page table */
  21. #define HPT_ORDER 24
  22. #define HPT_NPTEG (1ul << (HPT_ORDER - 7)) /* 128B per pteg */
  23. #define HPT_HASH_MASK (HPT_NPTEG - 1)
  24. #define HPTE_V_HVLOCK 0x40UL
  25. static inline long lock_hpte(unsigned long *hpte, unsigned long bits)
  26. {
  27. unsigned long tmp, old;
  28. asm volatile(" ldarx %0,0,%2\n"
  29. " and. %1,%0,%3\n"
  30. " bne 2f\n"
  31. " ori %0,%0,%4\n"
  32. " stdcx. %0,0,%2\n"
  33. " beq+ 2f\n"
  34. " li %1,%3\n"
  35. "2: isync"
  36. : "=&r" (tmp), "=&r" (old)
  37. : "r" (hpte), "r" (bits), "i" (HPTE_V_HVLOCK)
  38. : "cc", "memory");
  39. return old == 0;
  40. }
  41. long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
  42. long pte_index, unsigned long pteh, unsigned long ptel)
  43. {
  44. unsigned long porder;
  45. struct kvm *kvm = vcpu->kvm;
  46. unsigned long i, lpn, pa;
  47. unsigned long *hpte;
  48. /* only handle 4k, 64k and 16M pages for now */
  49. porder = 12;
  50. if (pteh & HPTE_V_LARGE) {
  51. if (cpu_has_feature(CPU_FTR_ARCH_206) &&
  52. (ptel & 0xf000) == 0x1000) {
  53. /* 64k page */
  54. porder = 16;
  55. } else if ((ptel & 0xff000) == 0) {
  56. /* 16M page */
  57. porder = 24;
  58. /* lowest AVA bit must be 0 for 16M pages */
  59. if (pteh & 0x80)
  60. return H_PARAMETER;
  61. } else
  62. return H_PARAMETER;
  63. }
  64. lpn = (ptel & HPTE_R_RPN) >> kvm->arch.ram_porder;
  65. if (lpn >= kvm->arch.ram_npages || porder > kvm->arch.ram_porder)
  66. return H_PARAMETER;
  67. pa = kvm->arch.ram_pginfo[lpn].pfn << PAGE_SHIFT;
  68. if (!pa)
  69. return H_PARAMETER;
  70. /* Check WIMG */
  71. if ((ptel & HPTE_R_WIMG) != HPTE_R_M &&
  72. (ptel & HPTE_R_WIMG) != (HPTE_R_W | HPTE_R_I | HPTE_R_M))
  73. return H_PARAMETER;
  74. pteh &= ~0x60UL;
  75. ptel &= ~(HPTE_R_PP0 - kvm->arch.ram_psize);
  76. ptel |= pa;
  77. if (pte_index >= (HPT_NPTEG << 3))
  78. return H_PARAMETER;
  79. if (likely((flags & H_EXACT) == 0)) {
  80. pte_index &= ~7UL;
  81. hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
  82. for (i = 0; ; ++i) {
  83. if (i == 8)
  84. return H_PTEG_FULL;
  85. if ((*hpte & HPTE_V_VALID) == 0 &&
  86. lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID))
  87. break;
  88. hpte += 2;
  89. }
  90. } else {
  91. i = 0;
  92. hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
  93. if (!lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID))
  94. return H_PTEG_FULL;
  95. }
  96. hpte[1] = ptel;
  97. eieio();
  98. hpte[0] = pteh;
  99. asm volatile("ptesync" : : : "memory");
  100. atomic_inc(&kvm->arch.ram_pginfo[lpn].refcnt);
  101. vcpu->arch.gpr[4] = pte_index + i;
  102. return H_SUCCESS;
  103. }
  104. #define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token))
  105. static inline int try_lock_tlbie(unsigned int *lock)
  106. {
  107. unsigned int tmp, old;
  108. unsigned int token = LOCK_TOKEN;
  109. asm volatile("1:lwarx %1,0,%2\n"
  110. " cmpwi cr0,%1,0\n"
  111. " bne 2f\n"
  112. " stwcx. %3,0,%2\n"
  113. " bne- 1b\n"
  114. " isync\n"
  115. "2:"
  116. : "=&r" (tmp), "=&r" (old)
  117. : "r" (lock), "r" (token)
  118. : "cc", "memory");
  119. return old == 0;
  120. }
  121. long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
  122. unsigned long pte_index, unsigned long avpn,
  123. unsigned long va)
  124. {
  125. struct kvm *kvm = vcpu->kvm;
  126. unsigned long *hpte;
  127. unsigned long v, r, rb;
  128. if (pte_index >= (HPT_NPTEG << 3))
  129. return H_PARAMETER;
  130. hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
  131. while (!lock_hpte(hpte, HPTE_V_HVLOCK))
  132. cpu_relax();
  133. if ((hpte[0] & HPTE_V_VALID) == 0 ||
  134. ((flags & H_AVPN) && (hpte[0] & ~0x7fUL) != avpn) ||
  135. ((flags & H_ANDCOND) && (hpte[0] & avpn) != 0)) {
  136. hpte[0] &= ~HPTE_V_HVLOCK;
  137. return H_NOT_FOUND;
  138. }
  139. if (atomic_read(&kvm->online_vcpus) == 1)
  140. flags |= H_LOCAL;
  141. vcpu->arch.gpr[4] = v = hpte[0] & ~HPTE_V_HVLOCK;
  142. vcpu->arch.gpr[5] = r = hpte[1];
  143. rb = compute_tlbie_rb(v, r, pte_index);
  144. hpte[0] = 0;
  145. if (!(flags & H_LOCAL)) {
  146. while(!try_lock_tlbie(&kvm->arch.tlbie_lock))
  147. cpu_relax();
  148. asm volatile("ptesync" : : : "memory");
  149. asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync"
  150. : : "r" (rb), "r" (kvm->arch.lpid));
  151. asm volatile("ptesync" : : : "memory");
  152. kvm->arch.tlbie_lock = 0;
  153. } else {
  154. asm volatile("ptesync" : : : "memory");
  155. asm volatile("tlbiel %0" : : "r" (rb));
  156. asm volatile("ptesync" : : : "memory");
  157. }
  158. return H_SUCCESS;
  159. }
  160. long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
  161. {
  162. struct kvm *kvm = vcpu->kvm;
  163. unsigned long *args = &vcpu->arch.gpr[4];
  164. unsigned long *hp, tlbrb[4];
  165. long int i, found;
  166. long int n_inval = 0;
  167. unsigned long flags, req, pte_index;
  168. long int local = 0;
  169. long int ret = H_SUCCESS;
  170. if (atomic_read(&kvm->online_vcpus) == 1)
  171. local = 1;
  172. for (i = 0; i < 4; ++i) {
  173. pte_index = args[i * 2];
  174. flags = pte_index >> 56;
  175. pte_index &= ((1ul << 56) - 1);
  176. req = flags >> 6;
  177. flags &= 3;
  178. if (req == 3)
  179. break;
  180. if (req != 1 || flags == 3 ||
  181. pte_index >= (HPT_NPTEG << 3)) {
  182. /* parameter error */
  183. args[i * 2] = ((0xa0 | flags) << 56) + pte_index;
  184. ret = H_PARAMETER;
  185. break;
  186. }
  187. hp = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
  188. while (!lock_hpte(hp, HPTE_V_HVLOCK))
  189. cpu_relax();
  190. found = 0;
  191. if (hp[0] & HPTE_V_VALID) {
  192. switch (flags & 3) {
  193. case 0: /* absolute */
  194. found = 1;
  195. break;
  196. case 1: /* andcond */
  197. if (!(hp[0] & args[i * 2 + 1]))
  198. found = 1;
  199. break;
  200. case 2: /* AVPN */
  201. if ((hp[0] & ~0x7fUL) == args[i * 2 + 1])
  202. found = 1;
  203. break;
  204. }
  205. }
  206. if (!found) {
  207. hp[0] &= ~HPTE_V_HVLOCK;
  208. args[i * 2] = ((0x90 | flags) << 56) + pte_index;
  209. continue;
  210. }
  211. /* insert R and C bits from PTE */
  212. flags |= (hp[1] >> 5) & 0x0c;
  213. args[i * 2] = ((0x80 | flags) << 56) + pte_index;
  214. tlbrb[n_inval++] = compute_tlbie_rb(hp[0], hp[1], pte_index);
  215. hp[0] = 0;
  216. }
  217. if (n_inval == 0)
  218. return ret;
  219. if (!local) {
  220. while(!try_lock_tlbie(&kvm->arch.tlbie_lock))
  221. cpu_relax();
  222. asm volatile("ptesync" : : : "memory");
  223. for (i = 0; i < n_inval; ++i)
  224. asm volatile(PPC_TLBIE(%1,%0)
  225. : : "r" (tlbrb[i]), "r" (kvm->arch.lpid));
  226. asm volatile("eieio; tlbsync; ptesync" : : : "memory");
  227. kvm->arch.tlbie_lock = 0;
  228. } else {
  229. asm volatile("ptesync" : : : "memory");
  230. for (i = 0; i < n_inval; ++i)
  231. asm volatile("tlbiel %0" : : "r" (tlbrb[i]));
  232. asm volatile("ptesync" : : : "memory");
  233. }
  234. return ret;
  235. }
  236. long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
  237. unsigned long pte_index, unsigned long avpn,
  238. unsigned long va)
  239. {
  240. struct kvm *kvm = vcpu->kvm;
  241. unsigned long *hpte;
  242. unsigned long v, r, rb;
  243. if (pte_index >= (HPT_NPTEG << 3))
  244. return H_PARAMETER;
  245. hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
  246. while (!lock_hpte(hpte, HPTE_V_HVLOCK))
  247. cpu_relax();
  248. if ((hpte[0] & HPTE_V_VALID) == 0 ||
  249. ((flags & H_AVPN) && (hpte[0] & ~0x7fUL) != avpn)) {
  250. hpte[0] &= ~HPTE_V_HVLOCK;
  251. return H_NOT_FOUND;
  252. }
  253. if (atomic_read(&kvm->online_vcpus) == 1)
  254. flags |= H_LOCAL;
  255. v = hpte[0];
  256. r = hpte[1] & ~(HPTE_R_PP0 | HPTE_R_PP | HPTE_R_N |
  257. HPTE_R_KEY_HI | HPTE_R_KEY_LO);
  258. r |= (flags << 55) & HPTE_R_PP0;
  259. r |= (flags << 48) & HPTE_R_KEY_HI;
  260. r |= flags & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO);
  261. rb = compute_tlbie_rb(v, r, pte_index);
  262. hpte[0] = v & ~HPTE_V_VALID;
  263. if (!(flags & H_LOCAL)) {
  264. while(!try_lock_tlbie(&kvm->arch.tlbie_lock))
  265. cpu_relax();
  266. asm volatile("ptesync" : : : "memory");
  267. asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync"
  268. : : "r" (rb), "r" (kvm->arch.lpid));
  269. asm volatile("ptesync" : : : "memory");
  270. kvm->arch.tlbie_lock = 0;
  271. } else {
  272. asm volatile("ptesync" : : : "memory");
  273. asm volatile("tlbiel %0" : : "r" (rb));
  274. asm volatile("ptesync" : : : "memory");
  275. }
  276. hpte[1] = r;
  277. eieio();
  278. hpte[0] = v & ~HPTE_V_HVLOCK;
  279. asm volatile("ptesync" : : : "memory");
  280. return H_SUCCESS;
  281. }
  282. static unsigned long reverse_xlate(struct kvm *kvm, unsigned long realaddr)
  283. {
  284. long int i;
  285. unsigned long offset, rpn;
  286. offset = realaddr & (kvm->arch.ram_psize - 1);
  287. rpn = (realaddr - offset) >> PAGE_SHIFT;
  288. for (i = 0; i < kvm->arch.ram_npages; ++i)
  289. if (rpn == kvm->arch.ram_pginfo[i].pfn)
  290. return (i << PAGE_SHIFT) + offset;
  291. return HPTE_R_RPN; /* all 1s in the RPN field */
  292. }
  293. long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
  294. unsigned long pte_index)
  295. {
  296. struct kvm *kvm = vcpu->kvm;
  297. unsigned long *hpte, r;
  298. int i, n = 1;
  299. if (pte_index >= (HPT_NPTEG << 3))
  300. return H_PARAMETER;
  301. if (flags & H_READ_4) {
  302. pte_index &= ~3;
  303. n = 4;
  304. }
  305. for (i = 0; i < n; ++i, ++pte_index) {
  306. hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
  307. r = hpte[1];
  308. if ((flags & H_R_XLATE) && (hpte[0] & HPTE_V_VALID))
  309. r = reverse_xlate(kvm, r & HPTE_R_RPN) |
  310. (r & ~HPTE_R_RPN);
  311. vcpu->arch.gpr[4 + i * 2] = hpte[0];
  312. vcpu->arch.gpr[5 + i * 2] = r;
  313. }
  314. return H_SUCCESS;
  315. }