44x_tlb.c 6.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243
  1. /*
  2. * This program is free software; you can redistribute it and/or modify
  3. * it under the terms of the GNU General Public License, version 2, as
  4. * published by the Free Software Foundation.
  5. *
  6. * This program is distributed in the hope that it will be useful,
  7. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  8. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  9. * GNU General Public License for more details.
  10. *
  11. * You should have received a copy of the GNU General Public License
  12. * along with this program; if not, write to the Free Software
  13. * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  14. *
  15. * Copyright IBM Corp. 2007
  16. *
  17. * Authors: Hollis Blanchard <hollisb@us.ibm.com>
  18. */
  19. #include <linux/types.h>
  20. #include <linux/string.h>
  21. #include <linux/kvm.h>
  22. #include <linux/kvm_host.h>
  23. #include <linux/highmem.h>
  24. #include <asm/mmu-44x.h>
  25. #include <asm/kvm_ppc.h>
  26. #include "44x_tlb.h"
  27. #define PPC44x_TLB_USER_PERM_MASK (PPC44x_TLB_UX|PPC44x_TLB_UR|PPC44x_TLB_UW)
  28. #define PPC44x_TLB_SUPER_PERM_MASK (PPC44x_TLB_SX|PPC44x_TLB_SR|PPC44x_TLB_SW)
  29. static unsigned int kvmppc_tlb_44x_pos;
  30. static u32 kvmppc_44x_tlb_shadow_attrib(u32 attrib, int usermode)
  31. {
  32. /* Mask off reserved bits. */
  33. attrib &= PPC44x_TLB_PERM_MASK|PPC44x_TLB_ATTR_MASK;
  34. if (!usermode) {
  35. /* Guest is in supervisor mode, so we need to translate guest
  36. * supervisor permissions into user permissions. */
  37. attrib &= ~PPC44x_TLB_USER_PERM_MASK;
  38. attrib |= (attrib & PPC44x_TLB_SUPER_PERM_MASK) << 3;
  39. }
  40. /* Make sure host can always access this memory. */
  41. attrib |= PPC44x_TLB_SX|PPC44x_TLB_SR|PPC44x_TLB_SW;
  42. return attrib;
  43. }
  44. /* Search the guest TLB for a matching entry. */
  45. int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr, unsigned int pid,
  46. unsigned int as)
  47. {
  48. int i;
  49. /* XXX Replace loop with fancy data structures. */
  50. for (i = 0; i < PPC44x_TLB_SIZE; i++) {
  51. struct tlbe *tlbe = &vcpu->arch.guest_tlb[i];
  52. unsigned int tid;
  53. if (eaddr < get_tlb_eaddr(tlbe))
  54. continue;
  55. if (eaddr > get_tlb_end(tlbe))
  56. continue;
  57. tid = get_tlb_tid(tlbe);
  58. if (tid && (tid != pid))
  59. continue;
  60. if (!get_tlb_v(tlbe))
  61. continue;
  62. if (get_tlb_ts(tlbe) != as)
  63. continue;
  64. return i;
  65. }
  66. return -1;
  67. }
  68. struct tlbe *kvmppc_44x_itlb_search(struct kvm_vcpu *vcpu, gva_t eaddr)
  69. {
  70. unsigned int as = !!(vcpu->arch.msr & MSR_IS);
  71. unsigned int index;
  72. index = kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as);
  73. if (index == -1)
  74. return NULL;
  75. return &vcpu->arch.guest_tlb[index];
  76. }
  77. struct tlbe *kvmppc_44x_dtlb_search(struct kvm_vcpu *vcpu, gva_t eaddr)
  78. {
  79. unsigned int as = !!(vcpu->arch.msr & MSR_DS);
  80. unsigned int index;
  81. index = kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as);
  82. if (index == -1)
  83. return NULL;
  84. return &vcpu->arch.guest_tlb[index];
  85. }
  86. static int kvmppc_44x_tlbe_is_writable(struct tlbe *tlbe)
  87. {
  88. return tlbe->word2 & (PPC44x_TLB_SW|PPC44x_TLB_UW);
  89. }
  90. static void kvmppc_44x_shadow_release(struct kvm_vcpu *vcpu,
  91. unsigned int index)
  92. {
  93. struct tlbe *stlbe = &vcpu->arch.shadow_tlb[index];
  94. struct page *page = vcpu->arch.shadow_pages[index];
  95. if (get_tlb_v(stlbe)) {
  96. if (kvmppc_44x_tlbe_is_writable(stlbe))
  97. kvm_release_page_dirty(page);
  98. else
  99. kvm_release_page_clean(page);
  100. }
  101. }
  102. void kvmppc_tlbe_set_modified(struct kvm_vcpu *vcpu, unsigned int i)
  103. {
  104. vcpu->arch.shadow_tlb_mod[i] = 1;
  105. }
  106. /* Caller must ensure that the specified guest TLB entry is safe to insert into
  107. * the shadow TLB. */
  108. void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid,
  109. u32 flags)
  110. {
  111. struct page *new_page;
  112. struct tlbe *stlbe;
  113. hpa_t hpaddr;
  114. unsigned int victim;
  115. /* Future optimization: don't overwrite the TLB entry containing the
  116. * current PC (or stack?). */
  117. victim = kvmppc_tlb_44x_pos++;
  118. if (kvmppc_tlb_44x_pos > tlb_44x_hwater)
  119. kvmppc_tlb_44x_pos = 0;
  120. stlbe = &vcpu->arch.shadow_tlb[victim];
  121. /* Get reference to new page. */
  122. new_page = gfn_to_page(vcpu->kvm, gfn);
  123. if (is_error_page(new_page)) {
  124. printk(KERN_ERR "Couldn't get guest page for gfn %lx!\n", gfn);
  125. kvm_release_page_clean(new_page);
  126. return;
  127. }
  128. hpaddr = page_to_phys(new_page);
  129. /* Drop reference to old page. */
  130. kvmppc_44x_shadow_release(vcpu, victim);
  131. vcpu->arch.shadow_pages[victim] = new_page;
  132. /* XXX Make sure (va, size) doesn't overlap any other
  133. * entries. 440x6 user manual says the result would be
  134. * "undefined." */
  135. /* XXX what about AS? */
  136. stlbe->tid = !(asid & 0xff);
  137. /* Force TS=1 for all guest mappings. */
  138. /* For now we hardcode 4KB mappings, but it will be important to
  139. * use host large pages in the future. */
  140. stlbe->word0 = (gvaddr & PAGE_MASK) | PPC44x_TLB_VALID | PPC44x_TLB_TS
  141. | PPC44x_TLB_4K;
  142. stlbe->word1 = (hpaddr & 0xfffffc00) | ((hpaddr >> 32) & 0xf);
  143. stlbe->word2 = kvmppc_44x_tlb_shadow_attrib(flags,
  144. vcpu->arch.msr & MSR_PR);
  145. kvmppc_tlbe_set_modified(vcpu, victim);
  146. KVMTRACE_5D(STLB_WRITE, vcpu, victim,
  147. stlbe->tid, stlbe->word0, stlbe->word1, stlbe->word2,
  148. handler);
  149. }
  150. void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, gva_t eaddr,
  151. gva_t eend, u32 asid)
  152. {
  153. unsigned int pid = !(asid & 0xff);
  154. int i;
  155. /* XXX Replace loop with fancy data structures. */
  156. for (i = 0; i <= tlb_44x_hwater; i++) {
  157. struct tlbe *stlbe = &vcpu->arch.shadow_tlb[i];
  158. unsigned int tid;
  159. if (!get_tlb_v(stlbe))
  160. continue;
  161. if (eend < get_tlb_eaddr(stlbe))
  162. continue;
  163. if (eaddr > get_tlb_end(stlbe))
  164. continue;
  165. tid = get_tlb_tid(stlbe);
  166. if (tid && (tid != pid))
  167. continue;
  168. kvmppc_44x_shadow_release(vcpu, i);
  169. stlbe->word0 = 0;
  170. kvmppc_tlbe_set_modified(vcpu, i);
  171. KVMTRACE_5D(STLB_INVAL, vcpu, i,
  172. stlbe->tid, stlbe->word0, stlbe->word1,
  173. stlbe->word2, handler);
  174. }
  175. }
  176. /* Invalidate all mappings on the privilege switch after PID has been changed.
  177. * The guest always runs with PID=1, so we must clear the entire TLB when
  178. * switching address spaces. */
  179. void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode)
  180. {
  181. int i;
  182. if (vcpu->arch.swap_pid) {
  183. /* XXX Replace loop with fancy data structures. */
  184. for (i = 0; i <= tlb_44x_hwater; i++) {
  185. struct tlbe *stlbe = &vcpu->arch.shadow_tlb[i];
  186. /* Future optimization: clear only userspace mappings. */
  187. kvmppc_44x_shadow_release(vcpu, i);
  188. stlbe->word0 = 0;
  189. kvmppc_tlbe_set_modified(vcpu, i);
  190. KVMTRACE_5D(STLB_INVAL, vcpu, i,
  191. stlbe->tid, stlbe->word0, stlbe->word1,
  192. stlbe->word2, handler);
  193. }
  194. vcpu->arch.swap_pid = 0;
  195. }
  196. vcpu->arch.shadow_pid = !usermode;
  197. }