tsb.c 8.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332
  1. /* arch/sparc64/mm/tsb.c
  2. *
  3. * Copyright (C) 2006 David S. Miller <davem@davemloft.net>
  4. */
  5. #include <linux/kernel.h>
  6. #include <asm/system.h>
  7. #include <asm/page.h>
  8. #include <asm/tlbflush.h>
  9. #include <asm/tlb.h>
  10. #include <asm/mmu_context.h>
  11. #include <asm/pgtable.h>
  12. #include <asm/tsb.h>
  13. /* We use an 8K TSB for the whole kernel, this allows to
  14. * handle about 4MB of modules and vmalloc mappings without
  15. * incurring many hash conflicts.
  16. */
  17. #define KERNEL_TSB_SIZE_BYTES 8192
  18. #define KERNEL_TSB_NENTRIES \
  19. (KERNEL_TSB_SIZE_BYTES / sizeof(struct tsb))
  20. extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
  21. static inline unsigned long tsb_hash(unsigned long vaddr, unsigned long nentries)
  22. {
  23. vaddr >>= PAGE_SHIFT;
  24. return vaddr & (nentries - 1);
  25. }
  26. static inline int tag_compare(struct tsb *entry, unsigned long vaddr, unsigned long context)
  27. {
  28. if (context == ~0UL)
  29. return 1;
  30. return (entry->tag == ((vaddr >> 22) | (context << 48)));
  31. }
  32. /* TSB flushes need only occur on the processor initiating the address
  33. * space modification, not on each cpu the address space has run on.
  34. * Only the TLB flush needs that treatment.
  35. */
  36. void flush_tsb_kernel_range(unsigned long start, unsigned long end)
  37. {
  38. unsigned long v;
  39. for (v = start; v < end; v += PAGE_SIZE) {
  40. unsigned long hash = tsb_hash(v, KERNEL_TSB_NENTRIES);
  41. struct tsb *ent = &swapper_tsb[hash];
  42. if (tag_compare(ent, v, 0)) {
  43. ent->tag = 0UL;
  44. membar_storeload_storestore();
  45. }
  46. }
  47. }
  48. void flush_tsb_user(struct mmu_gather *mp)
  49. {
  50. struct mm_struct *mm = mp->mm;
  51. struct tsb *tsb = mm->context.tsb;
  52. unsigned long ctx = ~0UL;
  53. unsigned long nentries = mm->context.tsb_nentries;
  54. int i;
  55. if (CTX_VALID(mm->context))
  56. ctx = CTX_HWBITS(mm->context);
  57. for (i = 0; i < mp->tlb_nr; i++) {
  58. unsigned long v = mp->vaddrs[i];
  59. struct tsb *ent;
  60. v &= ~0x1UL;
  61. ent = &tsb[tsb_hash(v, nentries)];
  62. if (tag_compare(ent, v, ctx)) {
  63. ent->tag = 0UL;
  64. membar_storeload_storestore();
  65. }
  66. }
  67. }
  68. static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes)
  69. {
  70. unsigned long tsb_reg, base, tsb_paddr;
  71. unsigned long page_sz, tte;
  72. mm->context.tsb_nentries = tsb_bytes / sizeof(struct tsb);
  73. base = TSBMAP_BASE;
  74. tte = (_PAGE_VALID | _PAGE_L | _PAGE_CP |
  75. _PAGE_CV | _PAGE_P | _PAGE_W);
  76. tsb_paddr = __pa(mm->context.tsb);
  77. /* Use the smallest page size that can map the whole TSB
  78. * in one TLB entry.
  79. */
  80. switch (tsb_bytes) {
  81. case 8192 << 0:
  82. tsb_reg = 0x0UL;
  83. #ifdef DCACHE_ALIASING_POSSIBLE
  84. base += (tsb_paddr & 8192);
  85. #endif
  86. tte |= _PAGE_SZ8K;
  87. page_sz = 8192;
  88. break;
  89. case 8192 << 1:
  90. tsb_reg = 0x1UL;
  91. tte |= _PAGE_SZ64K;
  92. page_sz = 64 * 1024;
  93. break;
  94. case 8192 << 2:
  95. tsb_reg = 0x2UL;
  96. tte |= _PAGE_SZ64K;
  97. page_sz = 64 * 1024;
  98. break;
  99. case 8192 << 3:
  100. tsb_reg = 0x3UL;
  101. tte |= _PAGE_SZ64K;
  102. page_sz = 64 * 1024;
  103. break;
  104. case 8192 << 4:
  105. tsb_reg = 0x4UL;
  106. tte |= _PAGE_SZ512K;
  107. page_sz = 512 * 1024;
  108. break;
  109. case 8192 << 5:
  110. tsb_reg = 0x5UL;
  111. tte |= _PAGE_SZ512K;
  112. page_sz = 512 * 1024;
  113. break;
  114. case 8192 << 6:
  115. tsb_reg = 0x6UL;
  116. tte |= _PAGE_SZ512K;
  117. page_sz = 512 * 1024;
  118. break;
  119. case 8192 << 7:
  120. tsb_reg = 0x7UL;
  121. tte |= _PAGE_SZ4MB;
  122. page_sz = 4 * 1024 * 1024;
  123. break;
  124. default:
  125. BUG();
  126. };
  127. tsb_reg |= base;
  128. tsb_reg |= (tsb_paddr & (page_sz - 1UL));
  129. tte |= (tsb_paddr & ~(page_sz - 1UL));
  130. mm->context.tsb_reg_val = tsb_reg;
  131. mm->context.tsb_map_vaddr = base;
  132. mm->context.tsb_map_pte = tte;
  133. }
  134. /* The page tables are locked against modifications while this
  135. * runs.
  136. *
  137. * XXX do some prefetching...
  138. */
  139. static void copy_tsb(struct tsb *old_tsb, unsigned long old_size,
  140. struct tsb *new_tsb, unsigned long new_size)
  141. {
  142. unsigned long old_nentries = old_size / sizeof(struct tsb);
  143. unsigned long new_nentries = new_size / sizeof(struct tsb);
  144. unsigned long i;
  145. for (i = 0; i < old_nentries; i++) {
  146. register unsigned long tag asm("o4");
  147. register unsigned long pte asm("o5");
  148. unsigned long v;
  149. unsigned int hash;
  150. __asm__ __volatile__(
  151. "ldda [%2] %3, %0"
  152. : "=r" (tag), "=r" (pte)
  153. : "r" (&old_tsb[i]), "i" (ASI_NUCLEUS_QUAD_LDD));
  154. if (!tag || (tag & (1UL << TSB_TAG_LOCK_BIT)))
  155. continue;
  156. /* We only put base page size PTEs into the TSB,
  157. * but that might change in the future. This code
  158. * would need to be changed if we start putting larger
  159. * page size PTEs into there.
  160. */
  161. WARN_ON((pte & _PAGE_ALL_SZ_BITS) != _PAGE_SZBITS);
  162. /* The tag holds bits 22 to 63 of the virtual address
  163. * and the context. Clear out the context, and shift
  164. * up to make a virtual address.
  165. */
  166. v = (tag & ((1UL << 42UL) - 1UL)) << 22UL;
  167. /* The implied bits of the tag (bits 13 to 21) are
  168. * determined by the TSB entry index, so fill that in.
  169. */
  170. v |= (i & (512UL - 1UL)) << 13UL;
  171. hash = tsb_hash(v, new_nentries);
  172. new_tsb[hash].tag = tag;
  173. new_tsb[hash].pte = pte;
  174. }
  175. }
  176. /* When the RSS of an address space exceeds mm->context.tsb_rss_limit,
  177. * update_mmu_cache() invokes this routine to try and grow the TSB.
  178. * When we reach the maximum TSB size supported, we stick ~0UL into
  179. * mm->context.tsb_rss_limit so the grow checks in update_mmu_cache()
  180. * will not trigger any longer.
  181. *
  182. * The TSB can be anywhere from 8K to 1MB in size, in increasing powers
  183. * of two. The TSB must be aligned to it's size, so f.e. a 512K TSB
  184. * must be 512K aligned.
  185. *
  186. * The idea here is to grow the TSB when the RSS of the process approaches
  187. * the number of entries that the current TSB can hold at once. Currently,
  188. * we trigger when the RSS hits 3/4 of the TSB capacity.
  189. */
  190. void tsb_grow(struct mm_struct *mm, unsigned long rss, gfp_t gfp_flags)
  191. {
  192. unsigned long max_tsb_size = 1 * 1024 * 1024;
  193. unsigned long size, old_size;
  194. struct page *page;
  195. struct tsb *old_tsb;
  196. if (max_tsb_size > (PAGE_SIZE << MAX_ORDER))
  197. max_tsb_size = (PAGE_SIZE << MAX_ORDER);
  198. for (size = PAGE_SIZE; size < max_tsb_size; size <<= 1UL) {
  199. unsigned long n_entries = size / sizeof(struct tsb);
  200. n_entries = (n_entries * 3) / 4;
  201. if (n_entries > rss)
  202. break;
  203. }
  204. page = alloc_pages(gfp_flags | __GFP_ZERO, get_order(size));
  205. if (unlikely(!page))
  206. return;
  207. if (size == max_tsb_size)
  208. mm->context.tsb_rss_limit = ~0UL;
  209. else
  210. mm->context.tsb_rss_limit =
  211. ((size / sizeof(struct tsb)) * 3) / 4;
  212. old_tsb = mm->context.tsb;
  213. old_size = mm->context.tsb_nentries * sizeof(struct tsb);
  214. if (old_tsb)
  215. copy_tsb(old_tsb, old_size, page_address(page), size);
  216. mm->context.tsb = page_address(page);
  217. setup_tsb_params(mm, size);
  218. /* If old_tsb is NULL, we're being invoked for the first time
  219. * from init_new_context().
  220. */
  221. if (old_tsb) {
  222. /* Now force all other processors to reload the new
  223. * TSB state.
  224. */
  225. smp_tsb_sync(mm);
  226. /* Finally reload it on the local cpu. No further
  227. * references will remain to the old TSB and we can
  228. * thus free it up.
  229. */
  230. tsb_context_switch(mm);
  231. free_pages((unsigned long) old_tsb, get_order(old_size));
  232. }
  233. }
  234. int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
  235. {
  236. unsigned long initial_rss;
  237. mm->context.sparc64_ctx_val = 0UL;
  238. /* copy_mm() copies over the parent's mm_struct before calling
  239. * us, so we need to zero out the TSB pointer or else tsb_grow()
  240. * will be confused and think there is an older TSB to free up.
  241. */
  242. mm->context.tsb = NULL;
  243. /* If this is fork, inherit the parent's TSB size. We would
  244. * grow it to that size on the first page fault anyways.
  245. */
  246. initial_rss = mm->context.tsb_nentries;
  247. if (initial_rss)
  248. initial_rss -= 1;
  249. tsb_grow(mm, initial_rss, GFP_KERNEL);
  250. if (unlikely(!mm->context.tsb))
  251. return -ENOMEM;
  252. return 0;
  253. }
  254. void destroy_context(struct mm_struct *mm)
  255. {
  256. unsigned long size = mm->context.tsb_nentries * sizeof(struct tsb);
  257. free_pages((unsigned long) mm->context.tsb, get_order(size));
  258. /* We can remove these later, but for now it's useful
  259. * to catch any bogus post-destroy_context() references
  260. * to the TSB.
  261. */
  262. mm->context.tsb = NULL;
  263. mm->context.tsb_reg_val = 0UL;
  264. spin_lock(&ctx_alloc_lock);
  265. if (CTX_VALID(mm->context)) {
  266. unsigned long nr = CTX_NRBITS(mm->context);
  267. mmu_context_bmap[nr>>6] &= ~(1UL << (nr & 63));
  268. }
  269. spin_unlock(&ctx_alloc_lock);
  270. }