slb.c 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226
  1. /*
  2. * PowerPC64 SLB support.
  3. *
  4. * Copyright (C) 2004 David Gibson <dwg@au.ibm.com>, IBM
  5. * Based on earlier code writteh by:
  6. * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com
  7. * Copyright (c) 2001 Dave Engebretsen
  8. * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
  9. *
  10. *
  11. * This program is free software; you can redistribute it and/or
  12. * modify it under the terms of the GNU General Public License
  13. * as published by the Free Software Foundation; either version
  14. * 2 of the License, or (at your option) any later version.
  15. */
  16. #undef DEBUG
  17. #include <linux/config.h>
  18. #include <asm/pgtable.h>
  19. #include <asm/mmu.h>
  20. #include <asm/mmu_context.h>
  21. #include <asm/paca.h>
  22. #include <asm/cputable.h>
  23. #include <asm/cacheflush.h>
  24. #ifdef DEBUG
  25. #define DBG(fmt...) udbg_printf(fmt)
  26. #else
  27. #define DBG(fmt...)
  28. #endif
  29. extern void slb_allocate_realmode(unsigned long ea);
  30. extern void slb_allocate_user(unsigned long ea);
  31. static void slb_allocate(unsigned long ea)
  32. {
  33. /* Currently, we do real mode for all SLBs including user, but
  34. * that will change if we bring back dynamic VSIDs
  35. */
  36. slb_allocate_realmode(ea);
  37. }
  38. static inline unsigned long mk_esid_data(unsigned long ea, unsigned long slot)
  39. {
  40. return (ea & ESID_MASK) | SLB_ESID_V | slot;
  41. }
  42. static inline unsigned long mk_vsid_data(unsigned long ea, unsigned long flags)
  43. {
  44. return (get_kernel_vsid(ea) << SLB_VSID_SHIFT) | flags;
  45. }
  46. static inline void create_slbe(unsigned long ea, unsigned long flags,
  47. unsigned long entry)
  48. {
  49. asm volatile("slbmte %0,%1" :
  50. : "r" (mk_vsid_data(ea, flags)),
  51. "r" (mk_esid_data(ea, entry))
  52. : "memory" );
  53. }
  54. void slb_flush_and_rebolt(void)
  55. {
  56. /* If you change this make sure you change SLB_NUM_BOLTED
  57. * appropriately too. */
  58. unsigned long linear_llp, vmalloc_llp, lflags, vflags;
  59. unsigned long ksp_esid_data;
  60. WARN_ON(!irqs_disabled());
  61. linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
  62. vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp;
  63. lflags = SLB_VSID_KERNEL | linear_llp;
  64. vflags = SLB_VSID_KERNEL | vmalloc_llp;
  65. ksp_esid_data = mk_esid_data(get_paca()->kstack, 2);
  66. if ((ksp_esid_data & ESID_MASK) == PAGE_OFFSET)
  67. ksp_esid_data &= ~SLB_ESID_V;
  68. /* We need to do this all in asm, so we're sure we don't touch
  69. * the stack between the slbia and rebolting it. */
  70. asm volatile("isync\n"
  71. "slbia\n"
  72. /* Slot 1 - first VMALLOC segment */
  73. "slbmte %0,%1\n"
  74. /* Slot 2 - kernel stack */
  75. "slbmte %2,%3\n"
  76. "isync"
  77. :: "r"(mk_vsid_data(VMALLOC_START, vflags)),
  78. "r"(mk_esid_data(VMALLOC_START, 1)),
  79. "r"(mk_vsid_data(ksp_esid_data, lflags)),
  80. "r"(ksp_esid_data)
  81. : "memory");
  82. }
  83. /* Flush all user entries from the segment table of the current processor. */
  84. void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
  85. {
  86. unsigned long offset = get_paca()->slb_cache_ptr;
  87. unsigned long esid_data = 0;
  88. unsigned long pc = KSTK_EIP(tsk);
  89. unsigned long stack = KSTK_ESP(tsk);
  90. unsigned long unmapped_base;
  91. if (offset <= SLB_CACHE_ENTRIES) {
  92. int i;
  93. asm volatile("isync" : : : "memory");
  94. for (i = 0; i < offset; i++) {
  95. esid_data = ((unsigned long)get_paca()->slb_cache[i]
  96. << SID_SHIFT) | SLBIE_C;
  97. asm volatile("slbie %0" : : "r" (esid_data));
  98. }
  99. asm volatile("isync" : : : "memory");
  100. } else {
  101. slb_flush_and_rebolt();
  102. }
  103. /* Workaround POWER5 < DD2.1 issue */
  104. if (offset == 1 || offset > SLB_CACHE_ENTRIES)
  105. asm volatile("slbie %0" : : "r" (esid_data));
  106. get_paca()->slb_cache_ptr = 0;
  107. get_paca()->context = mm->context;
  108. /*
  109. * preload some userspace segments into the SLB.
  110. */
  111. if (test_tsk_thread_flag(tsk, TIF_32BIT))
  112. unmapped_base = TASK_UNMAPPED_BASE_USER32;
  113. else
  114. unmapped_base = TASK_UNMAPPED_BASE_USER64;
  115. if (is_kernel_addr(pc))
  116. return;
  117. slb_allocate(pc);
  118. if (GET_ESID(pc) == GET_ESID(stack))
  119. return;
  120. if (is_kernel_addr(stack))
  121. return;
  122. slb_allocate(stack);
  123. if ((GET_ESID(pc) == GET_ESID(unmapped_base))
  124. || (GET_ESID(stack) == GET_ESID(unmapped_base)))
  125. return;
  126. if (is_kernel_addr(unmapped_base))
  127. return;
  128. slb_allocate(unmapped_base);
  129. }
  130. static inline void patch_slb_encoding(unsigned int *insn_addr,
  131. unsigned int immed)
  132. {
  133. /* Assume the instruction had a "0" immediate value, just
  134. * "or" in the new value
  135. */
  136. *insn_addr |= immed;
  137. flush_icache_range((unsigned long)insn_addr, 4+
  138. (unsigned long)insn_addr);
  139. }
  140. void slb_initialize(void)
  141. {
  142. unsigned long linear_llp, vmalloc_llp, io_llp;
  143. static int slb_encoding_inited;
  144. extern unsigned int *slb_miss_kernel_load_linear;
  145. extern unsigned int *slb_miss_kernel_load_io;
  146. #ifdef CONFIG_HUGETLB_PAGE
  147. extern unsigned int *slb_miss_user_load_huge;
  148. unsigned long huge_llp;
  149. huge_llp = mmu_psize_defs[mmu_huge_psize].sllp;
  150. #endif
  151. /* Prepare our SLB miss handler based on our page size */
  152. linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
  153. io_llp = mmu_psize_defs[mmu_io_psize].sllp;
  154. vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp;
  155. get_paca()->vmalloc_sllp = SLB_VSID_KERNEL | vmalloc_llp;
  156. if (!slb_encoding_inited) {
  157. slb_encoding_inited = 1;
  158. patch_slb_encoding(slb_miss_kernel_load_linear,
  159. SLB_VSID_KERNEL | linear_llp);
  160. patch_slb_encoding(slb_miss_kernel_load_io,
  161. SLB_VSID_KERNEL | io_llp);
  162. DBG("SLB: linear LLP = %04x\n", linear_llp);
  163. DBG("SLB: io LLP = %04x\n", io_llp);
  164. #ifdef CONFIG_HUGETLB_PAGE
  165. patch_slb_encoding(slb_miss_user_load_huge,
  166. SLB_VSID_USER | huge_llp);
  167. DBG("SLB: huge LLP = %04x\n", huge_llp);
  168. #endif
  169. }
  170. /* On iSeries the bolted entries have already been set up by
  171. * the hypervisor from the lparMap data in head.S */
  172. #ifndef CONFIG_PPC_ISERIES
  173. {
  174. unsigned long lflags, vflags;
  175. lflags = SLB_VSID_KERNEL | linear_llp;
  176. vflags = SLB_VSID_KERNEL | vmalloc_llp;
  177. /* Invalidate the entire SLB (even slot 0) & all the ERATS */
  178. asm volatile("isync":::"memory");
  179. asm volatile("slbmte %0,%0"::"r" (0) : "memory");
  180. asm volatile("isync; slbia; isync":::"memory");
  181. create_slbe(PAGE_OFFSET, lflags, 0);
  182. create_slbe(VMALLOC_START, vflags, 1);
  183. /* We don't bolt the stack for the time being - we're in boot,
  184. * so the stack is in the bolted segment. By the time it goes
  185. * elsewhere, we'll call _switch() which will bolt in the new
  186. * one. */
  187. asm volatile("isync":::"memory");
  188. }
  189. #endif /* CONFIG_PPC_ISERIES */
  190. get_paca()->stab_rr = SLB_NUM_BOLTED;
  191. }