tsb.c 9.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395
  1. /* arch/sparc64/mm/tsb.c
  2. *
  3. * Copyright (C) 2006 David S. Miller <davem@davemloft.net>
  4. */
  5. #include <linux/kernel.h>
  6. #include <asm/system.h>
  7. #include <asm/page.h>
  8. #include <asm/tlbflush.h>
  9. #include <asm/tlb.h>
  10. #include <asm/mmu_context.h>
  11. #include <asm/pgtable.h>
  12. #include <asm/tsb.h>
  13. extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
  14. static inline unsigned long tsb_hash(unsigned long vaddr, unsigned long nentries)
  15. {
  16. vaddr >>= PAGE_SHIFT;
  17. return vaddr & (nentries - 1);
  18. }
  19. static inline int tag_compare(unsigned long tag, unsigned long vaddr, unsigned long context)
  20. {
  21. return (tag == ((vaddr >> 22) | (context << 48)));
  22. }
  23. /* TSB flushes need only occur on the processor initiating the address
  24. * space modification, not on each cpu the address space has run on.
  25. * Only the TLB flush needs that treatment.
  26. */
  27. void flush_tsb_kernel_range(unsigned long start, unsigned long end)
  28. {
  29. unsigned long v;
  30. for (v = start; v < end; v += PAGE_SIZE) {
  31. unsigned long hash = tsb_hash(v, KERNEL_TSB_NENTRIES);
  32. struct tsb *ent = &swapper_tsb[hash];
  33. if (tag_compare(ent->tag, v, 0)) {
  34. ent->tag = 0UL;
  35. membar_storeload_storestore();
  36. }
  37. }
  38. }
  39. void flush_tsb_user(struct mmu_gather *mp)
  40. {
  41. struct mm_struct *mm = mp->mm;
  42. struct tsb *tsb = mm->context.tsb;
  43. unsigned long nentries = mm->context.tsb_nentries;
  44. unsigned long ctx, base;
  45. int i;
  46. if (unlikely(!CTX_VALID(mm->context)))
  47. return;
  48. ctx = CTX_HWBITS(mm->context);
  49. if (tlb_type == cheetah_plus)
  50. base = __pa(tsb);
  51. else
  52. base = (unsigned long) tsb;
  53. for (i = 0; i < mp->tlb_nr; i++) {
  54. unsigned long v = mp->vaddrs[i];
  55. unsigned long tag, ent, hash;
  56. v &= ~0x1UL;
  57. hash = tsb_hash(v, nentries);
  58. ent = base + (hash * sizeof(struct tsb));
  59. tag = (v >> 22UL) | (ctx << 48UL);
  60. tsb_flush(ent, tag);
  61. }
  62. }
  63. static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes)
  64. {
  65. unsigned long tsb_reg, base, tsb_paddr;
  66. unsigned long page_sz, tte;
  67. mm->context.tsb_nentries = tsb_bytes / sizeof(struct tsb);
  68. base = TSBMAP_BASE;
  69. tte = pgprot_val(PAGE_KERNEL_LOCKED);
  70. tsb_paddr = __pa(mm->context.tsb);
  71. BUG_ON(tsb_paddr & (tsb_bytes - 1UL));
  72. /* Use the smallest page size that can map the whole TSB
  73. * in one TLB entry.
  74. */
  75. switch (tsb_bytes) {
  76. case 8192 << 0:
  77. tsb_reg = 0x0UL;
  78. #ifdef DCACHE_ALIASING_POSSIBLE
  79. base += (tsb_paddr & 8192);
  80. #endif
  81. page_sz = 8192;
  82. break;
  83. case 8192 << 1:
  84. tsb_reg = 0x1UL;
  85. page_sz = 64 * 1024;
  86. break;
  87. case 8192 << 2:
  88. tsb_reg = 0x2UL;
  89. page_sz = 64 * 1024;
  90. break;
  91. case 8192 << 3:
  92. tsb_reg = 0x3UL;
  93. page_sz = 64 * 1024;
  94. break;
  95. case 8192 << 4:
  96. tsb_reg = 0x4UL;
  97. page_sz = 512 * 1024;
  98. break;
  99. case 8192 << 5:
  100. tsb_reg = 0x5UL;
  101. page_sz = 512 * 1024;
  102. break;
  103. case 8192 << 6:
  104. tsb_reg = 0x6UL;
  105. page_sz = 512 * 1024;
  106. break;
  107. case 8192 << 7:
  108. tsb_reg = 0x7UL;
  109. page_sz = 4 * 1024 * 1024;
  110. break;
  111. default:
  112. BUG();
  113. };
  114. tte |= pte_sz_bits(page_sz);
  115. if (tlb_type == cheetah_plus || tlb_type == hypervisor) {
  116. /* Physical mapping, no locked TLB entry for TSB. */
  117. tsb_reg |= tsb_paddr;
  118. mm->context.tsb_reg_val = tsb_reg;
  119. mm->context.tsb_map_vaddr = 0;
  120. mm->context.tsb_map_pte = 0;
  121. } else {
  122. tsb_reg |= base;
  123. tsb_reg |= (tsb_paddr & (page_sz - 1UL));
  124. tte |= (tsb_paddr & ~(page_sz - 1UL));
  125. mm->context.tsb_reg_val = tsb_reg;
  126. mm->context.tsb_map_vaddr = base;
  127. mm->context.tsb_map_pte = tte;
  128. }
  129. /* Setup the Hypervisor TSB descriptor. */
  130. if (tlb_type == hypervisor) {
  131. struct hv_tsb_descr *hp = &mm->context.tsb_descr;
  132. switch (PAGE_SIZE) {
  133. case 8192:
  134. default:
  135. hp->pgsz_idx = HV_PGSZ_IDX_8K;
  136. break;
  137. case 64 * 1024:
  138. hp->pgsz_idx = HV_PGSZ_IDX_64K;
  139. break;
  140. case 512 * 1024:
  141. hp->pgsz_idx = HV_PGSZ_IDX_512K;
  142. break;
  143. case 4 * 1024 * 1024:
  144. hp->pgsz_idx = HV_PGSZ_IDX_4MB;
  145. break;
  146. };
  147. hp->assoc = 1;
  148. hp->num_ttes = tsb_bytes / 16;
  149. hp->ctx_idx = 0;
  150. switch (PAGE_SIZE) {
  151. case 8192:
  152. default:
  153. hp->pgsz_mask = HV_PGSZ_MASK_8K;
  154. break;
  155. case 64 * 1024:
  156. hp->pgsz_mask = HV_PGSZ_MASK_64K;
  157. break;
  158. case 512 * 1024:
  159. hp->pgsz_mask = HV_PGSZ_MASK_512K;
  160. break;
  161. case 4 * 1024 * 1024:
  162. hp->pgsz_mask = HV_PGSZ_MASK_4MB;
  163. break;
  164. };
  165. hp->tsb_base = tsb_paddr;
  166. hp->resv = 0;
  167. }
  168. }
  169. /* The page tables are locked against modifications while this
  170. * runs.
  171. *
  172. * XXX do some prefetching...
  173. */
  174. static void copy_tsb(struct tsb *old_tsb, unsigned long old_size,
  175. struct tsb *new_tsb, unsigned long new_size)
  176. {
  177. unsigned long old_nentries = old_size / sizeof(struct tsb);
  178. unsigned long new_nentries = new_size / sizeof(struct tsb);
  179. unsigned long i;
  180. for (i = 0; i < old_nentries; i++) {
  181. register unsigned long tag asm("o4");
  182. register unsigned long pte asm("o5");
  183. unsigned long v, hash;
  184. if (tlb_type == hypervisor) {
  185. __asm__ __volatile__(
  186. "ldda [%2] %3, %0"
  187. : "=r" (tag), "=r" (pte)
  188. : "r" (__pa(&old_tsb[i])),
  189. "i" (ASI_QUAD_LDD_PHYS_4V));
  190. } else if (tlb_type == cheetah_plus) {
  191. __asm__ __volatile__(
  192. "ldda [%2] %3, %0"
  193. : "=r" (tag), "=r" (pte)
  194. : "r" (__pa(&old_tsb[i])),
  195. "i" (ASI_QUAD_LDD_PHYS));
  196. } else {
  197. __asm__ __volatile__(
  198. "ldda [%2] %3, %0"
  199. : "=r" (tag), "=r" (pte)
  200. : "r" (&old_tsb[i]),
  201. "i" (ASI_NUCLEUS_QUAD_LDD));
  202. }
  203. if (!tag || (tag & (1UL << TSB_TAG_LOCK_BIT)))
  204. continue;
  205. /* We only put base page size PTEs into the TSB,
  206. * but that might change in the future. This code
  207. * would need to be changed if we start putting larger
  208. * page size PTEs into there.
  209. */
  210. WARN_ON((pte & _PAGE_ALL_SZ_BITS) != _PAGE_SZBITS);
  211. /* The tag holds bits 22 to 63 of the virtual address
  212. * and the context. Clear out the context, and shift
  213. * up to make a virtual address.
  214. */
  215. v = (tag & ((1UL << 42UL) - 1UL)) << 22UL;
  216. /* The implied bits of the tag (bits 13 to 21) are
  217. * determined by the TSB entry index, so fill that in.
  218. */
  219. v |= (i & (512UL - 1UL)) << 13UL;
  220. hash = tsb_hash(v, new_nentries);
  221. if (tlb_type == cheetah_plus ||
  222. tlb_type == hypervisor) {
  223. __asm__ __volatile__(
  224. "stxa %0, [%1] %2\n\t"
  225. "stxa %3, [%4] %2"
  226. : /* no outputs */
  227. : "r" (tag),
  228. "r" (__pa(&new_tsb[hash].tag)),
  229. "i" (ASI_PHYS_USE_EC),
  230. "r" (pte),
  231. "r" (__pa(&new_tsb[hash].pte)));
  232. } else {
  233. new_tsb[hash].tag = tag;
  234. new_tsb[hash].pte = pte;
  235. }
  236. }
  237. }
  238. /* When the RSS of an address space exceeds mm->context.tsb_rss_limit,
  239. * update_mmu_cache() invokes this routine to try and grow the TSB.
  240. * When we reach the maximum TSB size supported, we stick ~0UL into
  241. * mm->context.tsb_rss_limit so the grow checks in update_mmu_cache()
  242. * will not trigger any longer.
  243. *
  244. * The TSB can be anywhere from 8K to 1MB in size, in increasing powers
  245. * of two. The TSB must be aligned to it's size, so f.e. a 512K TSB
  246. * must be 512K aligned.
  247. *
  248. * The idea here is to grow the TSB when the RSS of the process approaches
  249. * the number of entries that the current TSB can hold at once. Currently,
  250. * we trigger when the RSS hits 3/4 of the TSB capacity.
  251. */
  252. void tsb_grow(struct mm_struct *mm, unsigned long rss, gfp_t gfp_flags)
  253. {
  254. unsigned long max_tsb_size = 1 * 1024 * 1024;
  255. unsigned long size, old_size;
  256. struct page *page;
  257. struct tsb *old_tsb;
  258. if (max_tsb_size > (PAGE_SIZE << MAX_ORDER))
  259. max_tsb_size = (PAGE_SIZE << MAX_ORDER);
  260. for (size = PAGE_SIZE; size < max_tsb_size; size <<= 1UL) {
  261. unsigned long n_entries = size / sizeof(struct tsb);
  262. n_entries = (n_entries * 3) / 4;
  263. if (n_entries > rss)
  264. break;
  265. }
  266. page = alloc_pages(gfp_flags | __GFP_ZERO, get_order(size));
  267. if (unlikely(!page))
  268. return;
  269. if (size == max_tsb_size)
  270. mm->context.tsb_rss_limit = ~0UL;
  271. else
  272. mm->context.tsb_rss_limit =
  273. ((size / sizeof(struct tsb)) * 3) / 4;
  274. old_tsb = mm->context.tsb;
  275. old_size = mm->context.tsb_nentries * sizeof(struct tsb);
  276. if (old_tsb)
  277. copy_tsb(old_tsb, old_size, page_address(page), size);
  278. mm->context.tsb = page_address(page);
  279. setup_tsb_params(mm, size);
  280. /* If old_tsb is NULL, we're being invoked for the first time
  281. * from init_new_context().
  282. */
  283. if (old_tsb) {
  284. /* Now force all other processors to reload the new
  285. * TSB state.
  286. */
  287. smp_tsb_sync(mm);
  288. /* Finally reload it on the local cpu. No further
  289. * references will remain to the old TSB and we can
  290. * thus free it up.
  291. */
  292. tsb_context_switch(mm);
  293. free_pages((unsigned long) old_tsb, get_order(old_size));
  294. }
  295. }
  296. int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
  297. {
  298. mm->context.sparc64_ctx_val = 0UL;
  299. /* copy_mm() copies over the parent's mm_struct before calling
  300. * us, so we need to zero out the TSB pointer or else tsb_grow()
  301. * will be confused and think there is an older TSB to free up.
  302. */
  303. mm->context.tsb = NULL;
  304. tsb_grow(mm, 0, GFP_KERNEL);
  305. if (unlikely(!mm->context.tsb))
  306. return -ENOMEM;
  307. return 0;
  308. }
  309. void destroy_context(struct mm_struct *mm)
  310. {
  311. unsigned long size = mm->context.tsb_nentries * sizeof(struct tsb);
  312. free_pages((unsigned long) mm->context.tsb, get_order(size));
  313. /* We can remove these later, but for now it's useful
  314. * to catch any bogus post-destroy_context() references
  315. * to the TSB.
  316. */
  317. mm->context.tsb = NULL;
  318. mm->context.tsb_reg_val = 0UL;
  319. spin_lock(&ctx_alloc_lock);
  320. if (CTX_VALID(mm->context)) {
  321. unsigned long nr = CTX_NRBITS(mm->context);
  322. mmu_context_bmap[nr>>6] &= ~(1UL << (nr & 63));
  323. }
  324. spin_unlock(&ctx_alloc_lock);
  325. }