tsb.c 8.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359
  1. /* arch/sparc64/mm/tsb.c
  2. *
  3. * Copyright (C) 2006 David S. Miller <davem@davemloft.net>
  4. */
  5. #include <linux/kernel.h>
  6. #include <asm/system.h>
  7. #include <asm/page.h>
  8. #include <asm/tlbflush.h>
  9. #include <asm/tlb.h>
  10. #include <asm/mmu_context.h>
  11. #include <asm/pgtable.h>
  12. #include <asm/tsb.h>
  13. extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
  14. static inline unsigned long tsb_hash(unsigned long vaddr, unsigned long nentries)
  15. {
  16. vaddr >>= PAGE_SHIFT;
  17. return vaddr & (nentries - 1);
  18. }
  19. static inline int tag_compare(unsigned long tag, unsigned long vaddr, unsigned long context)
  20. {
  21. return (tag == ((vaddr >> 22) | (context << 48)));
  22. }
  23. /* TSB flushes need only occur on the processor initiating the address
  24. * space modification, not on each cpu the address space has run on.
  25. * Only the TLB flush needs that treatment.
  26. */
  27. void flush_tsb_kernel_range(unsigned long start, unsigned long end)
  28. {
  29. unsigned long v;
  30. for (v = start; v < end; v += PAGE_SIZE) {
  31. unsigned long hash = tsb_hash(v, KERNEL_TSB_NENTRIES);
  32. struct tsb *ent = &swapper_tsb[hash];
  33. if (tag_compare(ent->tag, v, 0)) {
  34. ent->tag = 0UL;
  35. membar_storeload_storestore();
  36. }
  37. }
  38. }
  39. void flush_tsb_user(struct mmu_gather *mp)
  40. {
  41. struct mm_struct *mm = mp->mm;
  42. struct tsb *tsb = mm->context.tsb;
  43. unsigned long nentries = mm->context.tsb_nentries;
  44. unsigned long ctx, base;
  45. int i;
  46. if (unlikely(!CTX_VALID(mm->context)))
  47. return;
  48. ctx = CTX_HWBITS(mm->context);
  49. if (tlb_type == cheetah_plus)
  50. base = __pa(tsb);
  51. else
  52. base = (unsigned long) tsb;
  53. for (i = 0; i < mp->tlb_nr; i++) {
  54. unsigned long v = mp->vaddrs[i];
  55. unsigned long tag, ent, hash;
  56. v &= ~0x1UL;
  57. hash = tsb_hash(v, nentries);
  58. ent = base + (hash * sizeof(struct tsb));
  59. tag = (v >> 22UL) | (ctx << 48UL);
  60. tsb_flush(ent, tag);
  61. }
  62. }
  63. static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes)
  64. {
  65. unsigned long tsb_reg, base, tsb_paddr;
  66. unsigned long page_sz, tte;
  67. mm->context.tsb_nentries = tsb_bytes / sizeof(struct tsb);
  68. base = TSBMAP_BASE;
  69. tte = (_PAGE_VALID | _PAGE_L | _PAGE_CP |
  70. _PAGE_CV | _PAGE_P | _PAGE_W);
  71. tsb_paddr = __pa(mm->context.tsb);
  72. BUG_ON(tsb_paddr & (tsb_bytes - 1UL));
  73. /* Use the smallest page size that can map the whole TSB
  74. * in one TLB entry.
  75. */
  76. switch (tsb_bytes) {
  77. case 8192 << 0:
  78. tsb_reg = 0x0UL;
  79. #ifdef DCACHE_ALIASING_POSSIBLE
  80. base += (tsb_paddr & 8192);
  81. #endif
  82. tte |= _PAGE_SZ8K;
  83. page_sz = 8192;
  84. break;
  85. case 8192 << 1:
  86. tsb_reg = 0x1UL;
  87. tte |= _PAGE_SZ64K;
  88. page_sz = 64 * 1024;
  89. break;
  90. case 8192 << 2:
  91. tsb_reg = 0x2UL;
  92. tte |= _PAGE_SZ64K;
  93. page_sz = 64 * 1024;
  94. break;
  95. case 8192 << 3:
  96. tsb_reg = 0x3UL;
  97. tte |= _PAGE_SZ64K;
  98. page_sz = 64 * 1024;
  99. break;
  100. case 8192 << 4:
  101. tsb_reg = 0x4UL;
  102. tte |= _PAGE_SZ512K;
  103. page_sz = 512 * 1024;
  104. break;
  105. case 8192 << 5:
  106. tsb_reg = 0x5UL;
  107. tte |= _PAGE_SZ512K;
  108. page_sz = 512 * 1024;
  109. break;
  110. case 8192 << 6:
  111. tsb_reg = 0x6UL;
  112. tte |= _PAGE_SZ512K;
  113. page_sz = 512 * 1024;
  114. break;
  115. case 8192 << 7:
  116. tsb_reg = 0x7UL;
  117. tte |= _PAGE_SZ4MB;
  118. page_sz = 4 * 1024 * 1024;
  119. break;
  120. default:
  121. BUG();
  122. };
  123. if (tlb_type == cheetah_plus) {
  124. /* Physical mapping, no locked TLB entry for TSB. */
  125. tsb_reg |= tsb_paddr;
  126. mm->context.tsb_reg_val = tsb_reg;
  127. mm->context.tsb_map_vaddr = 0;
  128. mm->context.tsb_map_pte = 0;
  129. } else {
  130. tsb_reg |= base;
  131. tsb_reg |= (tsb_paddr & (page_sz - 1UL));
  132. tte |= (tsb_paddr & ~(page_sz - 1UL));
  133. mm->context.tsb_reg_val = tsb_reg;
  134. mm->context.tsb_map_vaddr = base;
  135. mm->context.tsb_map_pte = tte;
  136. }
  137. }
  138. /* The page tables are locked against modifications while this
  139. * runs.
  140. *
  141. * XXX do some prefetching...
  142. */
  143. static void copy_tsb(struct tsb *old_tsb, unsigned long old_size,
  144. struct tsb *new_tsb, unsigned long new_size)
  145. {
  146. unsigned long old_nentries = old_size / sizeof(struct tsb);
  147. unsigned long new_nentries = new_size / sizeof(struct tsb);
  148. unsigned long i;
  149. for (i = 0; i < old_nentries; i++) {
  150. register unsigned long tag asm("o4");
  151. register unsigned long pte asm("o5");
  152. unsigned long v, hash;
  153. if (tlb_type == cheetah_plus) {
  154. __asm__ __volatile__(
  155. "ldda [%2] %3, %0"
  156. : "=r" (tag), "=r" (pte)
  157. : "r" (__pa(&old_tsb[i])),
  158. "i" (ASI_QUAD_LDD_PHYS));
  159. } else {
  160. __asm__ __volatile__(
  161. "ldda [%2] %3, %0"
  162. : "=r" (tag), "=r" (pte)
  163. : "r" (&old_tsb[i]),
  164. "i" (ASI_NUCLEUS_QUAD_LDD));
  165. }
  166. if (!tag || (tag & (1UL << TSB_TAG_LOCK_BIT)))
  167. continue;
  168. /* We only put base page size PTEs into the TSB,
  169. * but that might change in the future. This code
  170. * would need to be changed if we start putting larger
  171. * page size PTEs into there.
  172. */
  173. WARN_ON((pte & _PAGE_ALL_SZ_BITS) != _PAGE_SZBITS);
  174. /* The tag holds bits 22 to 63 of the virtual address
  175. * and the context. Clear out the context, and shift
  176. * up to make a virtual address.
  177. */
  178. v = (tag & ((1UL << 42UL) - 1UL)) << 22UL;
  179. /* The implied bits of the tag (bits 13 to 21) are
  180. * determined by the TSB entry index, so fill that in.
  181. */
  182. v |= (i & (512UL - 1UL)) << 13UL;
  183. hash = tsb_hash(v, new_nentries);
  184. if (tlb_type == cheetah_plus) {
  185. __asm__ __volatile__(
  186. "stxa %0, [%1] %2\n\t"
  187. "stxa %3, [%4] %2"
  188. : /* no outputs */
  189. : "r" (tag),
  190. "r" (__pa(&new_tsb[hash].tag)),
  191. "i" (ASI_PHYS_USE_EC),
  192. "r" (pte),
  193. "r" (__pa(&new_tsb[hash].pte)));
  194. } else {
  195. new_tsb[hash].tag = tag;
  196. new_tsb[hash].pte = pte;
  197. }
  198. }
  199. }
  200. /* When the RSS of an address space exceeds mm->context.tsb_rss_limit,
  201. * update_mmu_cache() invokes this routine to try and grow the TSB.
  202. * When we reach the maximum TSB size supported, we stick ~0UL into
  203. * mm->context.tsb_rss_limit so the grow checks in update_mmu_cache()
  204. * will not trigger any longer.
  205. *
  206. * The TSB can be anywhere from 8K to 1MB in size, in increasing powers
  207. * of two. The TSB must be aligned to it's size, so f.e. a 512K TSB
  208. * must be 512K aligned.
  209. *
  210. * The idea here is to grow the TSB when the RSS of the process approaches
  211. * the number of entries that the current TSB can hold at once. Currently,
  212. * we trigger when the RSS hits 3/4 of the TSB capacity.
  213. */
  214. void tsb_grow(struct mm_struct *mm, unsigned long rss, gfp_t gfp_flags)
  215. {
  216. unsigned long max_tsb_size = 1 * 1024 * 1024;
  217. unsigned long size, old_size;
  218. struct page *page;
  219. struct tsb *old_tsb;
  220. if (max_tsb_size > (PAGE_SIZE << MAX_ORDER))
  221. max_tsb_size = (PAGE_SIZE << MAX_ORDER);
  222. for (size = PAGE_SIZE; size < max_tsb_size; size <<= 1UL) {
  223. unsigned long n_entries = size / sizeof(struct tsb);
  224. n_entries = (n_entries * 3) / 4;
  225. if (n_entries > rss)
  226. break;
  227. }
  228. page = alloc_pages(gfp_flags | __GFP_ZERO, get_order(size));
  229. if (unlikely(!page))
  230. return;
  231. if (size == max_tsb_size)
  232. mm->context.tsb_rss_limit = ~0UL;
  233. else
  234. mm->context.tsb_rss_limit =
  235. ((size / sizeof(struct tsb)) * 3) / 4;
  236. old_tsb = mm->context.tsb;
  237. old_size = mm->context.tsb_nentries * sizeof(struct tsb);
  238. if (old_tsb)
  239. copy_tsb(old_tsb, old_size, page_address(page), size);
  240. mm->context.tsb = page_address(page);
  241. setup_tsb_params(mm, size);
  242. /* If old_tsb is NULL, we're being invoked for the first time
  243. * from init_new_context().
  244. */
  245. if (old_tsb) {
  246. /* Now force all other processors to reload the new
  247. * TSB state.
  248. */
  249. smp_tsb_sync(mm);
  250. /* Finally reload it on the local cpu. No further
  251. * references will remain to the old TSB and we can
  252. * thus free it up.
  253. */
  254. tsb_context_switch(mm);
  255. free_pages((unsigned long) old_tsb, get_order(old_size));
  256. }
  257. }
  258. int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
  259. {
  260. unsigned long initial_rss;
  261. mm->context.sparc64_ctx_val = 0UL;
  262. /* copy_mm() copies over the parent's mm_struct before calling
  263. * us, so we need to zero out the TSB pointer or else tsb_grow()
  264. * will be confused and think there is an older TSB to free up.
  265. */
  266. mm->context.tsb = NULL;
  267. /* If this is fork, inherit the parent's TSB size. We would
  268. * grow it to that size on the first page fault anyways.
  269. */
  270. initial_rss = mm->context.tsb_nentries;
  271. if (initial_rss)
  272. initial_rss -= 1;
  273. tsb_grow(mm, initial_rss, GFP_KERNEL);
  274. if (unlikely(!mm->context.tsb))
  275. return -ENOMEM;
  276. return 0;
  277. }
  278. void destroy_context(struct mm_struct *mm)
  279. {
  280. unsigned long size = mm->context.tsb_nentries * sizeof(struct tsb);
  281. free_pages((unsigned long) mm->context.tsb, get_order(size));
  282. /* We can remove these later, but for now it's useful
  283. * to catch any bogus post-destroy_context() references
  284. * to the TSB.
  285. */
  286. mm->context.tsb = NULL;
  287. mm->context.tsb_reg_val = 0UL;
  288. spin_lock(&ctx_alloc_lock);
  289. if (CTX_VALID(mm->context)) {
  290. unsigned long nr = CTX_NRBITS(mm->context);
  291. mmu_context_bmap[nr>>6] &= ~(1UL << (nr & 63));
  292. }
  293. spin_unlock(&ctx_alloc_lock);
  294. }